1 //===- SIInstrInfo.cpp - SI Instruction Information ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// SI Implementation of TargetInstrInfo.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "SIInstrInfo.h"
15 #include "AMDGPU.h"
16 #include "AMDGPUSubtarget.h"
17 #include "GCNHazardRecognizer.h"
18 #include "SIDefines.h"
19 #include "SIMachineFunctionInfo.h"
20 #include "SIRegisterInfo.h"
21 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
22 #include "Utils/AMDGPUBaseInfo.h"
23 #include "llvm/ADT/APInt.h"
24 #include "llvm/ADT/ArrayRef.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/ADT/iterator_range.h"
28 #include "llvm/Analysis/AliasAnalysis.h"
29 #include "llvm/Analysis/MemoryLocation.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/CodeGen/MachineBasicBlock.h"
32 #include "llvm/CodeGen/MachineDominators.h"
33 #include "llvm/CodeGen/MachineFrameInfo.h"
34 #include "llvm/CodeGen/MachineFunction.h"
35 #include "llvm/CodeGen/MachineInstr.h"
36 #include "llvm/CodeGen/MachineInstrBuilder.h"
37 #include "llvm/CodeGen/MachineInstrBundle.h"
38 #include "llvm/CodeGen/MachineMemOperand.h"
39 #include "llvm/CodeGen/MachineOperand.h"
40 #include "llvm/CodeGen/MachineRegisterInfo.h"
41 #include "llvm/CodeGen/RegisterScavenging.h"
42 #include "llvm/CodeGen/ScheduleDAG.h"
43 #include "llvm/CodeGen/SelectionDAGNodes.h"
44 #include "llvm/CodeGen/TargetOpcodes.h"
45 #include "llvm/CodeGen/TargetRegisterInfo.h"
46 #include "llvm/IR/DebugLoc.h"
47 #include "llvm/IR/DiagnosticInfo.h"
48 #include "llvm/IR/Function.h"
49 #include "llvm/IR/InlineAsm.h"
50 #include "llvm/IR/LLVMContext.h"
51 #include "llvm/MC/MCInstrDesc.h"
52 #include "llvm/Support/Casting.h"
53 #include "llvm/Support/CommandLine.h"
54 #include "llvm/Support/Compiler.h"
55 #include "llvm/Support/ErrorHandling.h"
56 #include "llvm/Support/MachineValueType.h"
57 #include "llvm/Support/MathExtras.h"
58 #include "llvm/Target/TargetMachine.h"
59 #include <cassert>
60 #include <cstdint>
61 #include <iterator>
62 #include <utility>
63
64 using namespace llvm;
65
66 #define DEBUG_TYPE "si-instr-info"
67
68 #define GET_INSTRINFO_CTOR_DTOR
69 #include "AMDGPUGenInstrInfo.inc"
70
71 namespace llvm {
72 namespace AMDGPU {
73 #define GET_D16ImageDimIntrinsics_IMPL
74 #define GET_ImageDimIntrinsicTable_IMPL
75 #define GET_RsrcIntrinsics_IMPL
76 #include "AMDGPUGenSearchableTables.inc"
77 }
78 }
79
80
81 // Must be at least 4 to be able to branch over minimum unconditional branch
82 // code. This is only for making it possible to write reasonably small tests for
83 // long branches.
84 static cl::opt<unsigned>
85 BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16),
86 cl::desc("Restrict range of branch instructions (DEBUG)"));
87
88 static cl::opt<bool> Fix16BitCopies(
89 "amdgpu-fix-16-bit-physreg-copies",
90 cl::desc("Fix copies between 32 and 16 bit registers by extending to 32 bit"),
91 cl::init(true),
92 cl::ReallyHidden);
93
SIInstrInfo(const GCNSubtarget & ST)94 SIInstrInfo::SIInstrInfo(const GCNSubtarget &ST)
95 : AMDGPUGenInstrInfo(AMDGPU::ADJCALLSTACKUP, AMDGPU::ADJCALLSTACKDOWN),
96 RI(ST), ST(ST) {
97 SchedModel.init(&ST);
98 }
99
100 //===----------------------------------------------------------------------===//
101 // TargetInstrInfo callbacks
102 //===----------------------------------------------------------------------===//
103
getNumOperandsNoGlue(SDNode * Node)104 static unsigned getNumOperandsNoGlue(SDNode *Node) {
105 unsigned N = Node->getNumOperands();
106 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue)
107 --N;
108 return N;
109 }
110
111 /// Returns true if both nodes have the same value for the given
112 /// operand \p Op, or if both nodes do not have this operand.
nodesHaveSameOperandValue(SDNode * N0,SDNode * N1,unsigned OpName)113 static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) {
114 unsigned Opc0 = N0->getMachineOpcode();
115 unsigned Opc1 = N1->getMachineOpcode();
116
117 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName);
118 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName);
119
120 if (Op0Idx == -1 && Op1Idx == -1)
121 return true;
122
123
124 if ((Op0Idx == -1 && Op1Idx != -1) ||
125 (Op1Idx == -1 && Op0Idx != -1))
126 return false;
127
128 // getNamedOperandIdx returns the index for the MachineInstr's operands,
129 // which includes the result as the first operand. We are indexing into the
130 // MachineSDNode's operands, so we need to skip the result operand to get
131 // the real index.
132 --Op0Idx;
133 --Op1Idx;
134
135 return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx);
136 }
137
isReallyTriviallyReMaterializable(const MachineInstr & MI,AliasAnalysis * AA) const138 bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI,
139 AliasAnalysis *AA) const {
140 // TODO: The generic check fails for VALU instructions that should be
141 // rematerializable due to implicit reads of exec. We really want all of the
142 // generic logic for this except for this.
143 switch (MI.getOpcode()) {
144 case AMDGPU::V_MOV_B32_e32:
145 case AMDGPU::V_MOV_B32_e64:
146 case AMDGPU::V_MOV_B64_PSEUDO:
147 case AMDGPU::V_ACCVGPR_READ_B32:
148 case AMDGPU::V_ACCVGPR_WRITE_B32:
149 // No implicit operands.
150 return MI.getNumOperands() == MI.getDesc().getNumOperands();
151 default:
152 return false;
153 }
154 }
155
areLoadsFromSameBasePtr(SDNode * Load0,SDNode * Load1,int64_t & Offset0,int64_t & Offset1) const156 bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1,
157 int64_t &Offset0,
158 int64_t &Offset1) const {
159 if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode())
160 return false;
161
162 unsigned Opc0 = Load0->getMachineOpcode();
163 unsigned Opc1 = Load1->getMachineOpcode();
164
165 // Make sure both are actually loads.
166 if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad())
167 return false;
168
169 if (isDS(Opc0) && isDS(Opc1)) {
170
171 // FIXME: Handle this case:
172 if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1))
173 return false;
174
175 // Check base reg.
176 if (Load0->getOperand(0) != Load1->getOperand(0))
177 return false;
178
179 // Skip read2 / write2 variants for simplicity.
180 // TODO: We should report true if the used offsets are adjacent (excluded
181 // st64 versions).
182 int Offset0Idx = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset);
183 int Offset1Idx = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset);
184 if (Offset0Idx == -1 || Offset1Idx == -1)
185 return false;
186
187 // XXX - be careful of datalesss loads
188 // getNamedOperandIdx returns the index for MachineInstrs. Since they
189 // include the output in the operand list, but SDNodes don't, we need to
190 // subtract the index by one.
191 Offset0Idx -= get(Opc0).NumDefs;
192 Offset1Idx -= get(Opc1).NumDefs;
193 Offset0 = cast<ConstantSDNode>(Load0->getOperand(Offset0Idx))->getZExtValue();
194 Offset1 = cast<ConstantSDNode>(Load1->getOperand(Offset1Idx))->getZExtValue();
195 return true;
196 }
197
198 if (isSMRD(Opc0) && isSMRD(Opc1)) {
199 // Skip time and cache invalidation instructions.
200 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::sbase) == -1 ||
201 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::sbase) == -1)
202 return false;
203
204 assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1));
205
206 // Check base reg.
207 if (Load0->getOperand(0) != Load1->getOperand(0))
208 return false;
209
210 const ConstantSDNode *Load0Offset =
211 dyn_cast<ConstantSDNode>(Load0->getOperand(1));
212 const ConstantSDNode *Load1Offset =
213 dyn_cast<ConstantSDNode>(Load1->getOperand(1));
214
215 if (!Load0Offset || !Load1Offset)
216 return false;
217
218 Offset0 = Load0Offset->getZExtValue();
219 Offset1 = Load1Offset->getZExtValue();
220 return true;
221 }
222
223 // MUBUF and MTBUF can access the same addresses.
224 if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) {
225
226 // MUBUF and MTBUF have vaddr at different indices.
227 if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) ||
228 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) ||
229 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc))
230 return false;
231
232 int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset);
233 int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset);
234
235 if (OffIdx0 == -1 || OffIdx1 == -1)
236 return false;
237
238 // getNamedOperandIdx returns the index for MachineInstrs. Since they
239 // include the output in the operand list, but SDNodes don't, we need to
240 // subtract the index by one.
241 OffIdx0 -= get(Opc0).NumDefs;
242 OffIdx1 -= get(Opc1).NumDefs;
243
244 SDValue Off0 = Load0->getOperand(OffIdx0);
245 SDValue Off1 = Load1->getOperand(OffIdx1);
246
247 // The offset might be a FrameIndexSDNode.
248 if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1))
249 return false;
250
251 Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue();
252 Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue();
253 return true;
254 }
255
256 return false;
257 }
258
isStride64(unsigned Opc)259 static bool isStride64(unsigned Opc) {
260 switch (Opc) {
261 case AMDGPU::DS_READ2ST64_B32:
262 case AMDGPU::DS_READ2ST64_B64:
263 case AMDGPU::DS_WRITE2ST64_B32:
264 case AMDGPU::DS_WRITE2ST64_B64:
265 return true;
266 default:
267 return false;
268 }
269 }
270
getMemOperandsWithOffsetWidth(const MachineInstr & LdSt,SmallVectorImpl<const MachineOperand * > & BaseOps,int64_t & Offset,bool & OffsetIsScalable,unsigned & Width,const TargetRegisterInfo * TRI) const271 bool SIInstrInfo::getMemOperandsWithOffsetWidth(
272 const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps,
273 int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
274 const TargetRegisterInfo *TRI) const {
275 if (!LdSt.mayLoadOrStore())
276 return false;
277
278 unsigned Opc = LdSt.getOpcode();
279 OffsetIsScalable = false;
280 const MachineOperand *BaseOp, *OffsetOp;
281 int DataOpIdx;
282
283 if (isDS(LdSt)) {
284 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr);
285 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset);
286 if (OffsetOp) {
287 // Normal, single offset LDS instruction.
288 if (!BaseOp) {
289 // DS_CONSUME/DS_APPEND use M0 for the base address.
290 // TODO: find the implicit use operand for M0 and use that as BaseOp?
291 return false;
292 }
293 BaseOps.push_back(BaseOp);
294 Offset = OffsetOp->getImm();
295 // Get appropriate operand, and compute width accordingly.
296 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
297 if (DataOpIdx == -1)
298 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0);
299 Width = getOpSize(LdSt, DataOpIdx);
300 } else {
301 // The 2 offset instructions use offset0 and offset1 instead. We can treat
302 // these as a load with a single offset if the 2 offsets are consecutive.
303 // We will use this for some partially aligned loads.
304 const MachineOperand *Offset0Op =
305 getNamedOperand(LdSt, AMDGPU::OpName::offset0);
306 const MachineOperand *Offset1Op =
307 getNamedOperand(LdSt, AMDGPU::OpName::offset1);
308
309 unsigned Offset0 = Offset0Op->getImm();
310 unsigned Offset1 = Offset1Op->getImm();
311 if (Offset0 + 1 != Offset1)
312 return false;
313
314 // Each of these offsets is in element sized units, so we need to convert
315 // to bytes of the individual reads.
316
317 unsigned EltSize;
318 if (LdSt.mayLoad())
319 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, 0)) / 16;
320 else {
321 assert(LdSt.mayStore());
322 int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0);
323 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, Data0Idx)) / 8;
324 }
325
326 if (isStride64(Opc))
327 EltSize *= 64;
328
329 BaseOps.push_back(BaseOp);
330 Offset = EltSize * Offset0;
331 // Get appropriate operand(s), and compute width accordingly.
332 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
333 if (DataOpIdx == -1) {
334 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0);
335 Width = getOpSize(LdSt, DataOpIdx);
336 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data1);
337 Width += getOpSize(LdSt, DataOpIdx);
338 } else {
339 Width = getOpSize(LdSt, DataOpIdx);
340 }
341 }
342 return true;
343 }
344
345 if (isMUBUF(LdSt) || isMTBUF(LdSt)) {
346 const MachineOperand *SOffset = getNamedOperand(LdSt, AMDGPU::OpName::soffset);
347 if (SOffset && SOffset->isReg()) {
348 // We can only handle this if it's a stack access, as any other resource
349 // would require reporting multiple base registers.
350 const MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
351 if (AddrReg && !AddrReg->isFI())
352 return false;
353
354 const MachineOperand *RSrc = getNamedOperand(LdSt, AMDGPU::OpName::srsrc);
355 const SIMachineFunctionInfo *MFI
356 = LdSt.getParent()->getParent()->getInfo<SIMachineFunctionInfo>();
357 if (RSrc->getReg() != MFI->getScratchRSrcReg())
358 return false;
359
360 const MachineOperand *OffsetImm =
361 getNamedOperand(LdSt, AMDGPU::OpName::offset);
362 BaseOps.push_back(RSrc);
363 BaseOps.push_back(SOffset);
364 Offset = OffsetImm->getImm();
365 } else {
366 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::srsrc);
367 if (!BaseOp) // e.g. BUFFER_WBINVL1_VOL
368 return false;
369 BaseOps.push_back(BaseOp);
370
371 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
372 if (BaseOp)
373 BaseOps.push_back(BaseOp);
374
375 const MachineOperand *OffsetImm =
376 getNamedOperand(LdSt, AMDGPU::OpName::offset);
377 Offset = OffsetImm->getImm();
378 if (SOffset) // soffset can be an inline immediate.
379 Offset += SOffset->getImm();
380 }
381 // Get appropriate operand, and compute width accordingly.
382 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
383 if (DataOpIdx == -1)
384 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata);
385 Width = getOpSize(LdSt, DataOpIdx);
386 return true;
387 }
388
389 if (isMIMG(LdSt)) {
390 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc);
391 BaseOps.push_back(&LdSt.getOperand(SRsrcIdx));
392 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0);
393 if (VAddr0Idx >= 0) {
394 // GFX10 possible NSA encoding.
395 for (int I = VAddr0Idx; I < SRsrcIdx; ++I)
396 BaseOps.push_back(&LdSt.getOperand(I));
397 } else {
398 BaseOps.push_back(getNamedOperand(LdSt, AMDGPU::OpName::vaddr));
399 }
400 Offset = 0;
401 // Get appropriate operand, and compute width accordingly.
402 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata);
403 Width = getOpSize(LdSt, DataOpIdx);
404 return true;
405 }
406
407 if (isSMRD(LdSt)) {
408 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::sbase);
409 if (!BaseOp) // e.g. S_MEMTIME
410 return false;
411 BaseOps.push_back(BaseOp);
412 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset);
413 Offset = OffsetOp ? OffsetOp->getImm() : 0;
414 // Get appropriate operand, and compute width accordingly.
415 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::sdst);
416 Width = getOpSize(LdSt, DataOpIdx);
417 return true;
418 }
419
420 if (isFLAT(LdSt)) {
421 // Instructions have either vaddr or saddr or both.
422 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
423 if (BaseOp)
424 BaseOps.push_back(BaseOp);
425 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::saddr);
426 if (BaseOp)
427 BaseOps.push_back(BaseOp);
428 Offset = getNamedOperand(LdSt, AMDGPU::OpName::offset)->getImm();
429 // Get appropriate operand, and compute width accordingly.
430 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
431 if (DataOpIdx == -1)
432 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata);
433 Width = getOpSize(LdSt, DataOpIdx);
434 return true;
435 }
436
437 return false;
438 }
439
memOpsHaveSameBasePtr(const MachineInstr & MI1,ArrayRef<const MachineOperand * > BaseOps1,const MachineInstr & MI2,ArrayRef<const MachineOperand * > BaseOps2)440 static bool memOpsHaveSameBasePtr(const MachineInstr &MI1,
441 ArrayRef<const MachineOperand *> BaseOps1,
442 const MachineInstr &MI2,
443 ArrayRef<const MachineOperand *> BaseOps2) {
444 // Only examine the first "base" operand of each instruction, on the
445 // assumption that it represents the real base address of the memory access.
446 // Other operands are typically offsets or indices from this base address.
447 if (BaseOps1.front()->isIdenticalTo(*BaseOps2.front()))
448 return true;
449
450 if (!MI1.hasOneMemOperand() || !MI2.hasOneMemOperand())
451 return false;
452
453 auto MO1 = *MI1.memoperands_begin();
454 auto MO2 = *MI2.memoperands_begin();
455 if (MO1->getAddrSpace() != MO2->getAddrSpace())
456 return false;
457
458 auto Base1 = MO1->getValue();
459 auto Base2 = MO2->getValue();
460 if (!Base1 || !Base2)
461 return false;
462 const MachineFunction &MF = *MI1.getParent()->getParent();
463 const DataLayout &DL = MF.getFunction().getParent()->getDataLayout();
464 Base1 = GetUnderlyingObject(Base1, DL);
465 Base2 = GetUnderlyingObject(Base2, DL);
466
467 if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2))
468 return false;
469
470 return Base1 == Base2;
471 }
472
shouldClusterMemOps(ArrayRef<const MachineOperand * > BaseOps1,ArrayRef<const MachineOperand * > BaseOps2,unsigned NumLoads,unsigned NumBytes) const473 bool SIInstrInfo::shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1,
474 ArrayRef<const MachineOperand *> BaseOps2,
475 unsigned NumLoads,
476 unsigned NumBytes) const {
477 // If current mem ops pair do not have same base pointer, then they cannot be
478 // clustered.
479 assert(!BaseOps1.empty() && !BaseOps2.empty());
480 const MachineInstr &FirstLdSt = *BaseOps1.front()->getParent();
481 const MachineInstr &SecondLdSt = *BaseOps2.front()->getParent();
482 if (!memOpsHaveSameBasePtr(FirstLdSt, BaseOps1, SecondLdSt, BaseOps2))
483 return false;
484
485 // Compute max cluster size based on average number bytes clustered till now,
486 // and decide based on it, if current mem ops pair can be clustered or not.
487 assert((NumLoads > 0) && (NumBytes > 0) && (NumBytes >= NumLoads) &&
488 "Invalid NumLoads/NumBytes values");
489 unsigned MaxNumLoads;
490 if (NumBytes <= 4 * NumLoads) {
491 // Loads are dword or smaller (on average).
492 MaxNumLoads = 5;
493 } else {
494 // Loads are bigger than a dword (on average).
495 MaxNumLoads = 4;
496 }
497 return NumLoads <= MaxNumLoads;
498 }
499
500 // FIXME: This behaves strangely. If, for example, you have 32 load + stores,
501 // the first 16 loads will be interleaved with the stores, and the next 16 will
502 // be clustered as expected. It should really split into 2 16 store batches.
503 //
504 // Loads are clustered until this returns false, rather than trying to schedule
505 // groups of stores. This also means we have to deal with saying different
506 // address space loads should be clustered, and ones which might cause bank
507 // conflicts.
508 //
509 // This might be deprecated so it might not be worth that much effort to fix.
shouldScheduleLoadsNear(SDNode * Load0,SDNode * Load1,int64_t Offset0,int64_t Offset1,unsigned NumLoads) const510 bool SIInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1,
511 int64_t Offset0, int64_t Offset1,
512 unsigned NumLoads) const {
513 assert(Offset1 > Offset0 &&
514 "Second offset should be larger than first offset!");
515 // If we have less than 16 loads in a row, and the offsets are within 64
516 // bytes, then schedule together.
517
518 // A cacheline is 64 bytes (for global memory).
519 return (NumLoads <= 16 && (Offset1 - Offset0) < 64);
520 }
521
reportIllegalCopy(const SIInstrInfo * TII,MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,const DebugLoc & DL,MCRegister DestReg,MCRegister SrcReg,bool KillSrc,const char * Msg="illegal SGPR to VGPR copy")522 static void reportIllegalCopy(const SIInstrInfo *TII, MachineBasicBlock &MBB,
523 MachineBasicBlock::iterator MI,
524 const DebugLoc &DL, MCRegister DestReg,
525 MCRegister SrcReg, bool KillSrc,
526 const char *Msg = "illegal SGPR to VGPR copy") {
527 MachineFunction *MF = MBB.getParent();
528 DiagnosticInfoUnsupported IllegalCopy(MF->getFunction(), Msg, DL, DS_Error);
529 LLVMContext &C = MF->getFunction().getContext();
530 C.diagnose(IllegalCopy);
531
532 BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_ILLEGAL_COPY), DestReg)
533 .addReg(SrcReg, getKillRegState(KillSrc));
534 }
535
copyPhysReg(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,const DebugLoc & DL,MCRegister DestReg,MCRegister SrcReg,bool KillSrc) const536 void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
537 MachineBasicBlock::iterator MI,
538 const DebugLoc &DL, MCRegister DestReg,
539 MCRegister SrcReg, bool KillSrc) const {
540 const TargetRegisterClass *RC = RI.getPhysRegClass(DestReg);
541
542 // FIXME: This is hack to resolve copies between 16 bit and 32 bit
543 // registers until all patterns are fixed.
544 if (Fix16BitCopies &&
545 ((RI.getRegSizeInBits(*RC) == 16) ^
546 (RI.getRegSizeInBits(*RI.getPhysRegClass(SrcReg)) == 16))) {
547 MCRegister &RegToFix = (RI.getRegSizeInBits(*RC) == 16) ? DestReg : SrcReg;
548 MCRegister Super = RI.get32BitRegister(RegToFix);
549 assert(RI.getSubReg(Super, AMDGPU::lo16) == RegToFix);
550 RegToFix = Super;
551
552 if (DestReg == SrcReg) {
553 // Insert empty bundle since ExpandPostRA expects an instruction here.
554 BuildMI(MBB, MI, DL, get(AMDGPU::BUNDLE));
555 return;
556 }
557
558 RC = RI.getPhysRegClass(DestReg);
559 }
560
561 if (RC == &AMDGPU::VGPR_32RegClass) {
562 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) ||
563 AMDGPU::SReg_32RegClass.contains(SrcReg) ||
564 AMDGPU::AGPR_32RegClass.contains(SrcReg));
565 unsigned Opc = AMDGPU::AGPR_32RegClass.contains(SrcReg) ?
566 AMDGPU::V_ACCVGPR_READ_B32 : AMDGPU::V_MOV_B32_e32;
567 BuildMI(MBB, MI, DL, get(Opc), DestReg)
568 .addReg(SrcReg, getKillRegState(KillSrc));
569 return;
570 }
571
572 if (RC == &AMDGPU::SReg_32_XM0RegClass ||
573 RC == &AMDGPU::SReg_32RegClass) {
574 if (SrcReg == AMDGPU::SCC) {
575 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B32), DestReg)
576 .addImm(1)
577 .addImm(0);
578 return;
579 }
580
581 if (DestReg == AMDGPU::VCC_LO) {
582 if (AMDGPU::SReg_32RegClass.contains(SrcReg)) {
583 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), AMDGPU::VCC_LO)
584 .addReg(SrcReg, getKillRegState(KillSrc));
585 } else {
586 // FIXME: Hack until VReg_1 removed.
587 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg));
588 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32))
589 .addImm(0)
590 .addReg(SrcReg, getKillRegState(KillSrc));
591 }
592
593 return;
594 }
595
596 if (!AMDGPU::SReg_32RegClass.contains(SrcReg)) {
597 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc);
598 return;
599 }
600
601 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
602 .addReg(SrcReg, getKillRegState(KillSrc));
603 return;
604 }
605
606 if (RC == &AMDGPU::SReg_64RegClass) {
607 if (SrcReg == AMDGPU::SCC) {
608 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B64), DestReg)
609 .addImm(1)
610 .addImm(0);
611 return;
612 }
613
614 if (DestReg == AMDGPU::VCC) {
615 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) {
616 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC)
617 .addReg(SrcReg, getKillRegState(KillSrc));
618 } else {
619 // FIXME: Hack until VReg_1 removed.
620 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg));
621 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32))
622 .addImm(0)
623 .addReg(SrcReg, getKillRegState(KillSrc));
624 }
625
626 return;
627 }
628
629 if (!AMDGPU::SReg_64RegClass.contains(SrcReg)) {
630 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc);
631 return;
632 }
633
634 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
635 .addReg(SrcReg, getKillRegState(KillSrc));
636 return;
637 }
638
639 if (DestReg == AMDGPU::SCC) {
640 // Copying 64-bit or 32-bit sources to SCC barely makes sense,
641 // but SelectionDAG emits such copies for i1 sources.
642 // TODO: Use S_BITCMP0_B32 instead and only consider the 0th bit.
643 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) {
644 SrcReg = RI.getSubReg(SrcReg, AMDGPU::sub0);
645 }
646 assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
647
648 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32))
649 .addReg(SrcReg, getKillRegState(KillSrc))
650 .addImm(0);
651
652 return;
653 }
654
655 if (RC == &AMDGPU::AGPR_32RegClass) {
656 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) ||
657 AMDGPU::SReg_32RegClass.contains(SrcReg) ||
658 AMDGPU::AGPR_32RegClass.contains(SrcReg));
659 if (!AMDGPU::VGPR_32RegClass.contains(SrcReg)) {
660 // First try to find defining accvgpr_write to avoid temporary registers.
661 for (auto Def = MI, E = MBB.begin(); Def != E; ) {
662 --Def;
663 if (!Def->definesRegister(SrcReg, &RI))
664 continue;
665 if (Def->getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32)
666 break;
667
668 MachineOperand &DefOp = Def->getOperand(1);
669 assert(DefOp.isReg() || DefOp.isImm());
670
671 if (DefOp.isReg()) {
672 // Check that register source operand if not clobbered before MI.
673 // Immediate operands are always safe to propagate.
674 bool SafeToPropagate = true;
675 for (auto I = Def; I != MI && SafeToPropagate; ++I)
676 if (I->modifiesRegister(DefOp.getReg(), &RI))
677 SafeToPropagate = false;
678
679 if (!SafeToPropagate)
680 break;
681
682 DefOp.setIsKill(false);
683 }
684
685 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32), DestReg)
686 .add(DefOp);
687 return;
688 }
689
690 RegScavenger RS;
691 RS.enterBasicBlock(MBB);
692 RS.forward(MI);
693
694 // Ideally we want to have three registers for a long reg_sequence copy
695 // to hide 2 waitstates between v_mov_b32 and accvgpr_write.
696 unsigned MaxVGPRs = RI.getRegPressureLimit(&AMDGPU::VGPR_32RegClass,
697 *MBB.getParent());
698
699 // Registers in the sequence are allocated contiguously so we can just
700 // use register number to pick one of three round-robin temps.
701 unsigned RegNo = DestReg % 3;
702 Register Tmp = RS.scavengeRegister(&AMDGPU::VGPR_32RegClass, 0);
703 if (!Tmp)
704 report_fatal_error("Cannot scavenge VGPR to copy to AGPR");
705 RS.setRegUsed(Tmp);
706 // Only loop through if there are any free registers left, otherwise
707 // scavenger may report a fatal error without emergency spill slot
708 // or spill with the slot.
709 while (RegNo-- && RS.FindUnusedReg(&AMDGPU::VGPR_32RegClass)) {
710 unsigned Tmp2 = RS.scavengeRegister(&AMDGPU::VGPR_32RegClass, 0);
711 if (!Tmp2 || RI.getHWRegIndex(Tmp2) >= MaxVGPRs)
712 break;
713 Tmp = Tmp2;
714 RS.setRegUsed(Tmp);
715 }
716 copyPhysReg(MBB, MI, DL, Tmp, SrcReg, KillSrc);
717 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32), DestReg)
718 .addReg(Tmp, RegState::Kill);
719 return;
720 }
721
722 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32), DestReg)
723 .addReg(SrcReg, getKillRegState(KillSrc));
724 return;
725 }
726
727 if (RI.getRegSizeInBits(*RC) == 16) {
728 assert(AMDGPU::VGPR_LO16RegClass.contains(SrcReg) ||
729 AMDGPU::VGPR_HI16RegClass.contains(SrcReg) ||
730 AMDGPU::SReg_LO16RegClass.contains(SrcReg) ||
731 AMDGPU::AGPR_LO16RegClass.contains(SrcReg));
732
733 bool IsSGPRDst = AMDGPU::SReg_LO16RegClass.contains(DestReg);
734 bool IsSGPRSrc = AMDGPU::SReg_LO16RegClass.contains(SrcReg);
735 bool IsAGPRDst = AMDGPU::AGPR_LO16RegClass.contains(DestReg);
736 bool IsAGPRSrc = AMDGPU::AGPR_LO16RegClass.contains(SrcReg);
737 bool DstLow = AMDGPU::VGPR_LO16RegClass.contains(DestReg) ||
738 AMDGPU::SReg_LO16RegClass.contains(DestReg) ||
739 AMDGPU::AGPR_LO16RegClass.contains(DestReg);
740 bool SrcLow = AMDGPU::VGPR_LO16RegClass.contains(SrcReg) ||
741 AMDGPU::SReg_LO16RegClass.contains(SrcReg) ||
742 AMDGPU::AGPR_LO16RegClass.contains(SrcReg);
743 MCRegister NewDestReg = RI.get32BitRegister(DestReg);
744 MCRegister NewSrcReg = RI.get32BitRegister(SrcReg);
745
746 if (IsSGPRDst) {
747 if (!IsSGPRSrc) {
748 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc);
749 return;
750 }
751
752 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), NewDestReg)
753 .addReg(NewSrcReg, getKillRegState(KillSrc));
754 return;
755 }
756
757 if (IsAGPRDst || IsAGPRSrc) {
758 if (!DstLow || !SrcLow) {
759 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc,
760 "Cannot use hi16 subreg with an AGPR!");
761 }
762
763 copyPhysReg(MBB, MI, DL, NewDestReg, NewSrcReg, KillSrc);
764 return;
765 }
766
767 if (IsSGPRSrc && !ST.hasSDWAScalar()) {
768 if (!DstLow || !SrcLow) {
769 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc,
770 "Cannot use hi16 subreg on VI!");
771 }
772
773 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), NewDestReg)
774 .addReg(NewSrcReg, getKillRegState(KillSrc));
775 return;
776 }
777
778 auto MIB = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_sdwa), NewDestReg)
779 .addImm(0) // src0_modifiers
780 .addReg(NewSrcReg)
781 .addImm(0) // clamp
782 .addImm(DstLow ? AMDGPU::SDWA::SdwaSel::WORD_0
783 : AMDGPU::SDWA::SdwaSel::WORD_1)
784 .addImm(AMDGPU::SDWA::DstUnused::UNUSED_PRESERVE)
785 .addImm(SrcLow ? AMDGPU::SDWA::SdwaSel::WORD_0
786 : AMDGPU::SDWA::SdwaSel::WORD_1)
787 .addReg(NewDestReg, RegState::Implicit | RegState::Undef);
788 // First implicit operand is $exec.
789 MIB->tieOperands(0, MIB->getNumOperands() - 1);
790 return;
791 }
792
793 unsigned EltSize = 4;
794 unsigned Opcode = AMDGPU::V_MOV_B32_e32;
795 if (RI.isSGPRClass(RC)) {
796 // TODO: Copy vec3/vec5 with s_mov_b64s then final s_mov_b32.
797 if (!(RI.getRegSizeInBits(*RC) % 64)) {
798 Opcode = AMDGPU::S_MOV_B64;
799 EltSize = 8;
800 } else {
801 Opcode = AMDGPU::S_MOV_B32;
802 EltSize = 4;
803 }
804
805 if (!RI.isSGPRClass(RI.getPhysRegClass(SrcReg))) {
806 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc);
807 return;
808 }
809 } else if (RI.hasAGPRs(RC)) {
810 Opcode = RI.hasVGPRs(RI.getPhysRegClass(SrcReg)) ?
811 AMDGPU::V_ACCVGPR_WRITE_B32 : AMDGPU::COPY;
812 } else if (RI.hasVGPRs(RC) && RI.hasAGPRs(RI.getPhysRegClass(SrcReg))) {
813 Opcode = AMDGPU::V_ACCVGPR_READ_B32;
814 }
815
816 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, EltSize);
817 bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg);
818
819 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) {
820 unsigned SubIdx;
821 if (Forward)
822 SubIdx = SubIndices[Idx];
823 else
824 SubIdx = SubIndices[SubIndices.size() - Idx - 1];
825
826 if (Opcode == TargetOpcode::COPY) {
827 copyPhysReg(MBB, MI, DL, RI.getSubReg(DestReg, SubIdx),
828 RI.getSubReg(SrcReg, SubIdx), KillSrc);
829 continue;
830 }
831
832 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL,
833 get(Opcode), RI.getSubReg(DestReg, SubIdx));
834
835 Builder.addReg(RI.getSubReg(SrcReg, SubIdx));
836
837 if (Idx == 0)
838 Builder.addReg(DestReg, RegState::Define | RegState::Implicit);
839
840 bool UseKill = KillSrc && Idx == SubIndices.size() - 1;
841 Builder.addReg(SrcReg, getKillRegState(UseKill) | RegState::Implicit);
842 }
843 }
844
commuteOpcode(unsigned Opcode) const845 int SIInstrInfo::commuteOpcode(unsigned Opcode) const {
846 int NewOpc;
847
848 // Try to map original to commuted opcode
849 NewOpc = AMDGPU::getCommuteRev(Opcode);
850 if (NewOpc != -1)
851 // Check if the commuted (REV) opcode exists on the target.
852 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1;
853
854 // Try to map commuted to original opcode
855 NewOpc = AMDGPU::getCommuteOrig(Opcode);
856 if (NewOpc != -1)
857 // Check if the original (non-REV) opcode exists on the target.
858 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1;
859
860 return Opcode;
861 }
862
materializeImmediate(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,const DebugLoc & DL,unsigned DestReg,int64_t Value) const863 void SIInstrInfo::materializeImmediate(MachineBasicBlock &MBB,
864 MachineBasicBlock::iterator MI,
865 const DebugLoc &DL, unsigned DestReg,
866 int64_t Value) const {
867 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
868 const TargetRegisterClass *RegClass = MRI.getRegClass(DestReg);
869 if (RegClass == &AMDGPU::SReg_32RegClass ||
870 RegClass == &AMDGPU::SGPR_32RegClass ||
871 RegClass == &AMDGPU::SReg_32_XM0RegClass ||
872 RegClass == &AMDGPU::SReg_32_XM0_XEXECRegClass) {
873 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
874 .addImm(Value);
875 return;
876 }
877
878 if (RegClass == &AMDGPU::SReg_64RegClass ||
879 RegClass == &AMDGPU::SGPR_64RegClass ||
880 RegClass == &AMDGPU::SReg_64_XEXECRegClass) {
881 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
882 .addImm(Value);
883 return;
884 }
885
886 if (RegClass == &AMDGPU::VGPR_32RegClass) {
887 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
888 .addImm(Value);
889 return;
890 }
891 if (RegClass == &AMDGPU::VReg_64RegClass) {
892 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), DestReg)
893 .addImm(Value);
894 return;
895 }
896
897 unsigned EltSize = 4;
898 unsigned Opcode = AMDGPU::V_MOV_B32_e32;
899 if (RI.isSGPRClass(RegClass)) {
900 if (RI.getRegSizeInBits(*RegClass) > 32) {
901 Opcode = AMDGPU::S_MOV_B64;
902 EltSize = 8;
903 } else {
904 Opcode = AMDGPU::S_MOV_B32;
905 EltSize = 4;
906 }
907 }
908
909 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RegClass, EltSize);
910 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) {
911 int64_t IdxValue = Idx == 0 ? Value : 0;
912
913 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL,
914 get(Opcode), RI.getSubReg(DestReg, SubIndices[Idx]));
915 Builder.addImm(IdxValue);
916 }
917 }
918
919 const TargetRegisterClass *
getPreferredSelectRegClass(unsigned Size) const920 SIInstrInfo::getPreferredSelectRegClass(unsigned Size) const {
921 return &AMDGPU::VGPR_32RegClass;
922 }
923
insertVectorSelect(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,const DebugLoc & DL,Register DstReg,ArrayRef<MachineOperand> Cond,Register TrueReg,Register FalseReg) const924 void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB,
925 MachineBasicBlock::iterator I,
926 const DebugLoc &DL, Register DstReg,
927 ArrayRef<MachineOperand> Cond,
928 Register TrueReg,
929 Register FalseReg) const {
930 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
931 MachineFunction *MF = MBB.getParent();
932 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
933 const TargetRegisterClass *BoolXExecRC =
934 RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
935 assert(MRI.getRegClass(DstReg) == &AMDGPU::VGPR_32RegClass &&
936 "Not a VGPR32 reg");
937
938 if (Cond.size() == 1) {
939 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
940 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg)
941 .add(Cond[0]);
942 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
943 .addImm(0)
944 .addReg(FalseReg)
945 .addImm(0)
946 .addReg(TrueReg)
947 .addReg(SReg);
948 } else if (Cond.size() == 2) {
949 assert(Cond[0].isImm() && "Cond[0] is not an immediate");
950 switch (Cond[0].getImm()) {
951 case SIInstrInfo::SCC_TRUE: {
952 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
953 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32
954 : AMDGPU::S_CSELECT_B64), SReg)
955 .addImm(1)
956 .addImm(0);
957 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
958 .addImm(0)
959 .addReg(FalseReg)
960 .addImm(0)
961 .addReg(TrueReg)
962 .addReg(SReg);
963 break;
964 }
965 case SIInstrInfo::SCC_FALSE: {
966 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
967 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32
968 : AMDGPU::S_CSELECT_B64), SReg)
969 .addImm(0)
970 .addImm(1);
971 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
972 .addImm(0)
973 .addReg(FalseReg)
974 .addImm(0)
975 .addReg(TrueReg)
976 .addReg(SReg);
977 break;
978 }
979 case SIInstrInfo::VCCNZ: {
980 MachineOperand RegOp = Cond[1];
981 RegOp.setImplicit(false);
982 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
983 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg)
984 .add(RegOp);
985 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
986 .addImm(0)
987 .addReg(FalseReg)
988 .addImm(0)
989 .addReg(TrueReg)
990 .addReg(SReg);
991 break;
992 }
993 case SIInstrInfo::VCCZ: {
994 MachineOperand RegOp = Cond[1];
995 RegOp.setImplicit(false);
996 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
997 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg)
998 .add(RegOp);
999 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1000 .addImm(0)
1001 .addReg(TrueReg)
1002 .addImm(0)
1003 .addReg(FalseReg)
1004 .addReg(SReg);
1005 break;
1006 }
1007 case SIInstrInfo::EXECNZ: {
1008 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1009 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC());
1010 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32
1011 : AMDGPU::S_OR_SAVEEXEC_B64), SReg2)
1012 .addImm(0);
1013 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32
1014 : AMDGPU::S_CSELECT_B64), SReg)
1015 .addImm(1)
1016 .addImm(0);
1017 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1018 .addImm(0)
1019 .addReg(FalseReg)
1020 .addImm(0)
1021 .addReg(TrueReg)
1022 .addReg(SReg);
1023 break;
1024 }
1025 case SIInstrInfo::EXECZ: {
1026 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1027 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC());
1028 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32
1029 : AMDGPU::S_OR_SAVEEXEC_B64), SReg2)
1030 .addImm(0);
1031 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32
1032 : AMDGPU::S_CSELECT_B64), SReg)
1033 .addImm(0)
1034 .addImm(1);
1035 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1036 .addImm(0)
1037 .addReg(FalseReg)
1038 .addImm(0)
1039 .addReg(TrueReg)
1040 .addReg(SReg);
1041 llvm_unreachable("Unhandled branch predicate EXECZ");
1042 break;
1043 }
1044 default:
1045 llvm_unreachable("invalid branch predicate");
1046 }
1047 } else {
1048 llvm_unreachable("Can only handle Cond size 1 or 2");
1049 }
1050 }
1051
insertEQ(MachineBasicBlock * MBB,MachineBasicBlock::iterator I,const DebugLoc & DL,Register SrcReg,int Value) const1052 Register SIInstrInfo::insertEQ(MachineBasicBlock *MBB,
1053 MachineBasicBlock::iterator I,
1054 const DebugLoc &DL,
1055 Register SrcReg, int Value) const {
1056 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
1057 Register Reg = MRI.createVirtualRegister(RI.getBoolRC());
1058 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_EQ_I32_e64), Reg)
1059 .addImm(Value)
1060 .addReg(SrcReg);
1061
1062 return Reg;
1063 }
1064
insertNE(MachineBasicBlock * MBB,MachineBasicBlock::iterator I,const DebugLoc & DL,Register SrcReg,int Value) const1065 Register SIInstrInfo::insertNE(MachineBasicBlock *MBB,
1066 MachineBasicBlock::iterator I,
1067 const DebugLoc &DL,
1068 Register SrcReg, int Value) const {
1069 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
1070 Register Reg = MRI.createVirtualRegister(RI.getBoolRC());
1071 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_NE_I32_e64), Reg)
1072 .addImm(Value)
1073 .addReg(SrcReg);
1074
1075 return Reg;
1076 }
1077
getMovOpcode(const TargetRegisterClass * DstRC) const1078 unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const {
1079
1080 if (RI.hasAGPRs(DstRC))
1081 return AMDGPU::COPY;
1082 if (RI.getRegSizeInBits(*DstRC) == 32) {
1083 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1084 } else if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC)) {
1085 return AMDGPU::S_MOV_B64;
1086 } else if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC)) {
1087 return AMDGPU::V_MOV_B64_PSEUDO;
1088 }
1089 return AMDGPU::COPY;
1090 }
1091
getIndirectVGPRWritePseudoOpc(unsigned VecSize)1092 static unsigned getIndirectVGPRWritePseudoOpc(unsigned VecSize) {
1093 if (VecSize <= 32) // 4 bytes
1094 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V1;
1095 if (VecSize <= 64) // 8 bytes
1096 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V2;
1097 if (VecSize <= 96) // 12 bytes
1098 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V3;
1099 if (VecSize <= 128) // 16 bytes
1100 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V4;
1101 if (VecSize <= 160) // 20 bytes
1102 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V5;
1103 if (VecSize <= 256) // 32 bytes
1104 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V8;
1105 if (VecSize <= 512) // 64 bytes
1106 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V16;
1107 if (VecSize <= 1024) // 128 bytes
1108 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V32;
1109
1110 llvm_unreachable("unsupported size for IndirectRegWrite pseudos");
1111 }
1112
getIndirectSGPRWritePseudo32(unsigned VecSize)1113 static unsigned getIndirectSGPRWritePseudo32(unsigned VecSize) {
1114 if (VecSize <= 32) // 4 bytes
1115 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V1;
1116 if (VecSize <= 64) // 8 bytes
1117 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V2;
1118 if (VecSize <= 96) // 12 bytes
1119 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V3;
1120 if (VecSize <= 128) // 16 bytes
1121 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V4;
1122 if (VecSize <= 160) // 20 bytes
1123 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V5;
1124 if (VecSize <= 256) // 32 bytes
1125 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V8;
1126 if (VecSize <= 512) // 64 bytes
1127 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V16;
1128 if (VecSize <= 1024) // 128 bytes
1129 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V32;
1130
1131 llvm_unreachable("unsupported size for IndirectRegWrite pseudos");
1132 }
1133
getIndirectSGPRWritePseudo64(unsigned VecSize)1134 static unsigned getIndirectSGPRWritePseudo64(unsigned VecSize) {
1135 if (VecSize <= 64) // 8 bytes
1136 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V1;
1137 if (VecSize <= 128) // 16 bytes
1138 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V2;
1139 if (VecSize <= 256) // 32 bytes
1140 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V4;
1141 if (VecSize <= 512) // 64 bytes
1142 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V8;
1143 if (VecSize <= 1024) // 128 bytes
1144 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V16;
1145
1146 llvm_unreachable("unsupported size for IndirectRegWrite pseudos");
1147 }
1148
getIndirectRegWritePseudo(unsigned VecSize,unsigned EltSize,bool IsSGPR) const1149 const MCInstrDesc &SIInstrInfo::getIndirectRegWritePseudo(
1150 unsigned VecSize, unsigned EltSize, bool IsSGPR) const {
1151 if (IsSGPR) {
1152 switch (EltSize) {
1153 case 32:
1154 return get(getIndirectSGPRWritePseudo32(VecSize));
1155 case 64:
1156 return get(getIndirectSGPRWritePseudo64(VecSize));
1157 default:
1158 llvm_unreachable("invalid reg indexing elt size");
1159 }
1160 }
1161
1162 assert(EltSize == 32 && "invalid reg indexing elt size");
1163 return get(getIndirectVGPRWritePseudoOpc(VecSize));
1164 }
1165
getSGPRSpillSaveOpcode(unsigned Size)1166 static unsigned getSGPRSpillSaveOpcode(unsigned Size) {
1167 switch (Size) {
1168 case 4:
1169 return AMDGPU::SI_SPILL_S32_SAVE;
1170 case 8:
1171 return AMDGPU::SI_SPILL_S64_SAVE;
1172 case 12:
1173 return AMDGPU::SI_SPILL_S96_SAVE;
1174 case 16:
1175 return AMDGPU::SI_SPILL_S128_SAVE;
1176 case 20:
1177 return AMDGPU::SI_SPILL_S160_SAVE;
1178 case 24:
1179 return AMDGPU::SI_SPILL_S192_SAVE;
1180 case 32:
1181 return AMDGPU::SI_SPILL_S256_SAVE;
1182 case 64:
1183 return AMDGPU::SI_SPILL_S512_SAVE;
1184 case 128:
1185 return AMDGPU::SI_SPILL_S1024_SAVE;
1186 default:
1187 llvm_unreachable("unknown register size");
1188 }
1189 }
1190
getVGPRSpillSaveOpcode(unsigned Size)1191 static unsigned getVGPRSpillSaveOpcode(unsigned Size) {
1192 switch (Size) {
1193 case 4:
1194 return AMDGPU::SI_SPILL_V32_SAVE;
1195 case 8:
1196 return AMDGPU::SI_SPILL_V64_SAVE;
1197 case 12:
1198 return AMDGPU::SI_SPILL_V96_SAVE;
1199 case 16:
1200 return AMDGPU::SI_SPILL_V128_SAVE;
1201 case 20:
1202 return AMDGPU::SI_SPILL_V160_SAVE;
1203 case 24:
1204 return AMDGPU::SI_SPILL_V192_SAVE;
1205 case 32:
1206 return AMDGPU::SI_SPILL_V256_SAVE;
1207 case 64:
1208 return AMDGPU::SI_SPILL_V512_SAVE;
1209 case 128:
1210 return AMDGPU::SI_SPILL_V1024_SAVE;
1211 default:
1212 llvm_unreachable("unknown register size");
1213 }
1214 }
1215
getAGPRSpillSaveOpcode(unsigned Size)1216 static unsigned getAGPRSpillSaveOpcode(unsigned Size) {
1217 switch (Size) {
1218 case 4:
1219 return AMDGPU::SI_SPILL_A32_SAVE;
1220 case 8:
1221 return AMDGPU::SI_SPILL_A64_SAVE;
1222 case 16:
1223 return AMDGPU::SI_SPILL_A128_SAVE;
1224 case 64:
1225 return AMDGPU::SI_SPILL_A512_SAVE;
1226 case 128:
1227 return AMDGPU::SI_SPILL_A1024_SAVE;
1228 default:
1229 llvm_unreachable("unknown register size");
1230 }
1231 }
1232
storeRegToStackSlot(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,Register SrcReg,bool isKill,int FrameIndex,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI) const1233 void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
1234 MachineBasicBlock::iterator MI,
1235 Register SrcReg, bool isKill,
1236 int FrameIndex,
1237 const TargetRegisterClass *RC,
1238 const TargetRegisterInfo *TRI) const {
1239 MachineFunction *MF = MBB.getParent();
1240 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1241 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
1242 const DebugLoc &DL = MBB.findDebugLoc(MI);
1243
1244 MachinePointerInfo PtrInfo
1245 = MachinePointerInfo::getFixedStack(*MF, FrameIndex);
1246 MachineMemOperand *MMO = MF->getMachineMemOperand(
1247 PtrInfo, MachineMemOperand::MOStore, FrameInfo.getObjectSize(FrameIndex),
1248 FrameInfo.getObjectAlign(FrameIndex));
1249 unsigned SpillSize = TRI->getSpillSize(*RC);
1250
1251 if (RI.isSGPRClass(RC)) {
1252 MFI->setHasSpilledSGPRs();
1253 assert(SrcReg != AMDGPU::M0 && "m0 should not be spilled");
1254 assert(SrcReg != AMDGPU::EXEC_LO && SrcReg != AMDGPU::EXEC_HI &&
1255 SrcReg != AMDGPU::EXEC && "exec should not be spilled");
1256
1257 // We are only allowed to create one new instruction when spilling
1258 // registers, so we need to use pseudo instruction for spilling SGPRs.
1259 const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(SpillSize));
1260
1261 // The SGPR spill/restore instructions only work on number sgprs, so we need
1262 // to make sure we are using the correct register class.
1263 if (Register::isVirtualRegister(SrcReg) && SpillSize == 4) {
1264 MachineRegisterInfo &MRI = MF->getRegInfo();
1265 MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0_XEXECRegClass);
1266 }
1267
1268 BuildMI(MBB, MI, DL, OpDesc)
1269 .addReg(SrcReg, getKillRegState(isKill)) // data
1270 .addFrameIndex(FrameIndex) // addr
1271 .addMemOperand(MMO)
1272 .addReg(MFI->getScratchRSrcReg(), RegState::Implicit)
1273 .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit);
1274 // Add the scratch resource registers as implicit uses because we may end up
1275 // needing them, and need to ensure that the reserved registers are
1276 // correctly handled.
1277 if (RI.spillSGPRToVGPR())
1278 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill);
1279 return;
1280 }
1281
1282 unsigned Opcode = RI.hasAGPRs(RC) ? getAGPRSpillSaveOpcode(SpillSize)
1283 : getVGPRSpillSaveOpcode(SpillSize);
1284 MFI->setHasSpilledVGPRs();
1285
1286 auto MIB = BuildMI(MBB, MI, DL, get(Opcode));
1287 if (RI.hasAGPRs(RC)) {
1288 MachineRegisterInfo &MRI = MF->getRegInfo();
1289 Register Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1290 MIB.addReg(Tmp, RegState::Define);
1291 }
1292 MIB.addReg(SrcReg, getKillRegState(isKill)) // data
1293 .addFrameIndex(FrameIndex) // addr
1294 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc
1295 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset
1296 .addImm(0) // offset
1297 .addMemOperand(MMO);
1298 }
1299
getSGPRSpillRestoreOpcode(unsigned Size)1300 static unsigned getSGPRSpillRestoreOpcode(unsigned Size) {
1301 switch (Size) {
1302 case 4:
1303 return AMDGPU::SI_SPILL_S32_RESTORE;
1304 case 8:
1305 return AMDGPU::SI_SPILL_S64_RESTORE;
1306 case 12:
1307 return AMDGPU::SI_SPILL_S96_RESTORE;
1308 case 16:
1309 return AMDGPU::SI_SPILL_S128_RESTORE;
1310 case 20:
1311 return AMDGPU::SI_SPILL_S160_RESTORE;
1312 case 24:
1313 return AMDGPU::SI_SPILL_S192_RESTORE;
1314 case 32:
1315 return AMDGPU::SI_SPILL_S256_RESTORE;
1316 case 64:
1317 return AMDGPU::SI_SPILL_S512_RESTORE;
1318 case 128:
1319 return AMDGPU::SI_SPILL_S1024_RESTORE;
1320 default:
1321 llvm_unreachable("unknown register size");
1322 }
1323 }
1324
getVGPRSpillRestoreOpcode(unsigned Size)1325 static unsigned getVGPRSpillRestoreOpcode(unsigned Size) {
1326 switch (Size) {
1327 case 4:
1328 return AMDGPU::SI_SPILL_V32_RESTORE;
1329 case 8:
1330 return AMDGPU::SI_SPILL_V64_RESTORE;
1331 case 12:
1332 return AMDGPU::SI_SPILL_V96_RESTORE;
1333 case 16:
1334 return AMDGPU::SI_SPILL_V128_RESTORE;
1335 case 20:
1336 return AMDGPU::SI_SPILL_V160_RESTORE;
1337 case 24:
1338 return AMDGPU::SI_SPILL_V192_RESTORE;
1339 case 32:
1340 return AMDGPU::SI_SPILL_V256_RESTORE;
1341 case 64:
1342 return AMDGPU::SI_SPILL_V512_RESTORE;
1343 case 128:
1344 return AMDGPU::SI_SPILL_V1024_RESTORE;
1345 default:
1346 llvm_unreachable("unknown register size");
1347 }
1348 }
1349
getAGPRSpillRestoreOpcode(unsigned Size)1350 static unsigned getAGPRSpillRestoreOpcode(unsigned Size) {
1351 switch (Size) {
1352 case 4:
1353 return AMDGPU::SI_SPILL_A32_RESTORE;
1354 case 8:
1355 return AMDGPU::SI_SPILL_A64_RESTORE;
1356 case 16:
1357 return AMDGPU::SI_SPILL_A128_RESTORE;
1358 case 64:
1359 return AMDGPU::SI_SPILL_A512_RESTORE;
1360 case 128:
1361 return AMDGPU::SI_SPILL_A1024_RESTORE;
1362 default:
1363 llvm_unreachable("unknown register size");
1364 }
1365 }
1366
loadRegFromStackSlot(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,Register DestReg,int FrameIndex,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI) const1367 void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
1368 MachineBasicBlock::iterator MI,
1369 Register DestReg, int FrameIndex,
1370 const TargetRegisterClass *RC,
1371 const TargetRegisterInfo *TRI) const {
1372 MachineFunction *MF = MBB.getParent();
1373 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1374 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
1375 const DebugLoc &DL = MBB.findDebugLoc(MI);
1376 unsigned SpillSize = TRI->getSpillSize(*RC);
1377
1378 MachinePointerInfo PtrInfo
1379 = MachinePointerInfo::getFixedStack(*MF, FrameIndex);
1380
1381 MachineMemOperand *MMO = MF->getMachineMemOperand(
1382 PtrInfo, MachineMemOperand::MOLoad, FrameInfo.getObjectSize(FrameIndex),
1383 FrameInfo.getObjectAlign(FrameIndex));
1384
1385 if (RI.isSGPRClass(RC)) {
1386 MFI->setHasSpilledSGPRs();
1387 assert(DestReg != AMDGPU::M0 && "m0 should not be reloaded into");
1388 assert(DestReg != AMDGPU::EXEC_LO && DestReg != AMDGPU::EXEC_HI &&
1389 DestReg != AMDGPU::EXEC && "exec should not be spilled");
1390
1391 // FIXME: Maybe this should not include a memoperand because it will be
1392 // lowered to non-memory instructions.
1393 const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(SpillSize));
1394 if (DestReg.isVirtual() && SpillSize == 4) {
1395 MachineRegisterInfo &MRI = MF->getRegInfo();
1396 MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0_XEXECRegClass);
1397 }
1398
1399 if (RI.spillSGPRToVGPR())
1400 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill);
1401 BuildMI(MBB, MI, DL, OpDesc, DestReg)
1402 .addFrameIndex(FrameIndex) // addr
1403 .addMemOperand(MMO)
1404 .addReg(MFI->getScratchRSrcReg(), RegState::Implicit)
1405 .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit);
1406 return;
1407 }
1408
1409 unsigned Opcode = RI.hasAGPRs(RC) ? getAGPRSpillRestoreOpcode(SpillSize)
1410 : getVGPRSpillRestoreOpcode(SpillSize);
1411 auto MIB = BuildMI(MBB, MI, DL, get(Opcode), DestReg);
1412 if (RI.hasAGPRs(RC)) {
1413 MachineRegisterInfo &MRI = MF->getRegInfo();
1414 Register Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1415 MIB.addReg(Tmp, RegState::Define);
1416 }
1417 MIB.addFrameIndex(FrameIndex) // vaddr
1418 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc
1419 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset
1420 .addImm(0) // offset
1421 .addMemOperand(MMO);
1422 }
1423
1424 /// \param @Offset Offset in bytes of the FrameIndex being spilled
calculateLDSSpillAddress(MachineBasicBlock & MBB,MachineInstr & MI,RegScavenger * RS,unsigned TmpReg,unsigned FrameOffset,unsigned Size) const1425 unsigned SIInstrInfo::calculateLDSSpillAddress(
1426 MachineBasicBlock &MBB, MachineInstr &MI, RegScavenger *RS, unsigned TmpReg,
1427 unsigned FrameOffset, unsigned Size) const {
1428 MachineFunction *MF = MBB.getParent();
1429 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1430 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
1431 const DebugLoc &DL = MBB.findDebugLoc(MI);
1432 unsigned WorkGroupSize = MFI->getMaxFlatWorkGroupSize();
1433 unsigned WavefrontSize = ST.getWavefrontSize();
1434
1435 Register TIDReg = MFI->getTIDReg();
1436 if (!MFI->hasCalculatedTID()) {
1437 MachineBasicBlock &Entry = MBB.getParent()->front();
1438 MachineBasicBlock::iterator Insert = Entry.front();
1439 const DebugLoc &DL = Insert->getDebugLoc();
1440
1441 TIDReg = RI.findUnusedRegister(MF->getRegInfo(), &AMDGPU::VGPR_32RegClass,
1442 *MF);
1443 if (TIDReg == AMDGPU::NoRegister)
1444 return TIDReg;
1445
1446 if (!AMDGPU::isShader(MF->getFunction().getCallingConv()) &&
1447 WorkGroupSize > WavefrontSize) {
1448 Register TIDIGXReg =
1449 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_X);
1450 Register TIDIGYReg =
1451 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_Y);
1452 Register TIDIGZReg =
1453 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_Z);
1454 Register InputPtrReg =
1455 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
1456 for (unsigned Reg : {TIDIGXReg, TIDIGYReg, TIDIGZReg}) {
1457 if (!Entry.isLiveIn(Reg))
1458 Entry.addLiveIn(Reg);
1459 }
1460
1461 RS->enterBasicBlock(Entry);
1462 // FIXME: Can we scavenge an SReg_64 and access the subregs?
1463 Register STmp0 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0);
1464 Register STmp1 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0);
1465 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp0)
1466 .addReg(InputPtrReg)
1467 .addImm(SI::KernelInputOffsets::NGROUPS_Z);
1468 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp1)
1469 .addReg(InputPtrReg)
1470 .addImm(SI::KernelInputOffsets::NGROUPS_Y);
1471
1472 // NGROUPS.X * NGROUPS.Y
1473 BuildMI(Entry, Insert, DL, get(AMDGPU::S_MUL_I32), STmp1)
1474 .addReg(STmp1)
1475 .addReg(STmp0);
1476 // (NGROUPS.X * NGROUPS.Y) * TIDIG.X
1477 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MUL_U32_U24_e32), TIDReg)
1478 .addReg(STmp1)
1479 .addReg(TIDIGXReg);
1480 // NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X)
1481 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MAD_U32_U24), TIDReg)
1482 .addReg(STmp0)
1483 .addReg(TIDIGYReg)
1484 .addReg(TIDReg);
1485 // (NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X)) + TIDIG.Z
1486 getAddNoCarry(Entry, Insert, DL, TIDReg)
1487 .addReg(TIDReg)
1488 .addReg(TIDIGZReg)
1489 .addImm(0); // clamp bit
1490 } else {
1491 // Get the wave id
1492 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_LO_U32_B32_e64),
1493 TIDReg)
1494 .addImm(-1)
1495 .addImm(0);
1496
1497 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_HI_U32_B32_e64),
1498 TIDReg)
1499 .addImm(-1)
1500 .addReg(TIDReg);
1501 }
1502
1503 BuildMI(Entry, Insert, DL, get(AMDGPU::V_LSHLREV_B32_e32),
1504 TIDReg)
1505 .addImm(2)
1506 .addReg(TIDReg);
1507 MFI->setTIDReg(TIDReg);
1508 }
1509
1510 // Add FrameIndex to LDS offset
1511 unsigned LDSOffset = MFI->getLDSSize() + (FrameOffset * WorkGroupSize);
1512 getAddNoCarry(MBB, MI, DL, TmpReg)
1513 .addImm(LDSOffset)
1514 .addReg(TIDReg)
1515 .addImm(0); // clamp bit
1516
1517 return TmpReg;
1518 }
1519
insertWaitStates(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,int Count) const1520 void SIInstrInfo::insertWaitStates(MachineBasicBlock &MBB,
1521 MachineBasicBlock::iterator MI,
1522 int Count) const {
1523 DebugLoc DL = MBB.findDebugLoc(MI);
1524 while (Count > 0) {
1525 int Arg;
1526 if (Count >= 8)
1527 Arg = 7;
1528 else
1529 Arg = Count - 1;
1530 Count -= 8;
1531 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOP))
1532 .addImm(Arg);
1533 }
1534 }
1535
insertNoop(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI) const1536 void SIInstrInfo::insertNoop(MachineBasicBlock &MBB,
1537 MachineBasicBlock::iterator MI) const {
1538 insertWaitStates(MBB, MI, 1);
1539 }
1540
insertReturn(MachineBasicBlock & MBB) const1541 void SIInstrInfo::insertReturn(MachineBasicBlock &MBB) const {
1542 auto MF = MBB.getParent();
1543 SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
1544
1545 assert(Info->isEntryFunction());
1546
1547 if (MBB.succ_empty()) {
1548 bool HasNoTerminator = MBB.getFirstTerminator() == MBB.end();
1549 if (HasNoTerminator) {
1550 if (Info->returnsVoid()) {
1551 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::S_ENDPGM)).addImm(0);
1552 } else {
1553 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::SI_RETURN_TO_EPILOG));
1554 }
1555 }
1556 }
1557 }
1558
getNumWaitStates(const MachineInstr & MI)1559 unsigned SIInstrInfo::getNumWaitStates(const MachineInstr &MI) {
1560 switch (MI.getOpcode()) {
1561 default: return 1; // FIXME: Do wait states equal cycles?
1562
1563 case AMDGPU::S_NOP:
1564 return MI.getOperand(0).getImm() + 1;
1565 }
1566 }
1567
expandPostRAPseudo(MachineInstr & MI) const1568 bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
1569 MachineBasicBlock &MBB = *MI.getParent();
1570 DebugLoc DL = MBB.findDebugLoc(MI);
1571 switch (MI.getOpcode()) {
1572 default: return TargetInstrInfo::expandPostRAPseudo(MI);
1573 case AMDGPU::S_MOV_B64_term:
1574 // This is only a terminator to get the correct spill code placement during
1575 // register allocation.
1576 MI.setDesc(get(AMDGPU::S_MOV_B64));
1577 break;
1578
1579 case AMDGPU::S_MOV_B32_term:
1580 // This is only a terminator to get the correct spill code placement during
1581 // register allocation.
1582 MI.setDesc(get(AMDGPU::S_MOV_B32));
1583 break;
1584
1585 case AMDGPU::S_XOR_B64_term:
1586 // This is only a terminator to get the correct spill code placement during
1587 // register allocation.
1588 MI.setDesc(get(AMDGPU::S_XOR_B64));
1589 break;
1590
1591 case AMDGPU::S_XOR_B32_term:
1592 // This is only a terminator to get the correct spill code placement during
1593 // register allocation.
1594 MI.setDesc(get(AMDGPU::S_XOR_B32));
1595 break;
1596
1597 case AMDGPU::S_OR_B32_term:
1598 // This is only a terminator to get the correct spill code placement during
1599 // register allocation.
1600 MI.setDesc(get(AMDGPU::S_OR_B32));
1601 break;
1602
1603 case AMDGPU::S_ANDN2_B64_term:
1604 // This is only a terminator to get the correct spill code placement during
1605 // register allocation.
1606 MI.setDesc(get(AMDGPU::S_ANDN2_B64));
1607 break;
1608
1609 case AMDGPU::S_ANDN2_B32_term:
1610 // This is only a terminator to get the correct spill code placement during
1611 // register allocation.
1612 MI.setDesc(get(AMDGPU::S_ANDN2_B32));
1613 break;
1614
1615 case AMDGPU::V_MOV_B64_PSEUDO: {
1616 Register Dst = MI.getOperand(0).getReg();
1617 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
1618 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
1619
1620 const MachineOperand &SrcOp = MI.getOperand(1);
1621 // FIXME: Will this work for 64-bit floating point immediates?
1622 assert(!SrcOp.isFPImm());
1623 if (SrcOp.isImm()) {
1624 APInt Imm(64, SrcOp.getImm());
1625 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo)
1626 .addImm(Imm.getLoBits(32).getZExtValue())
1627 .addReg(Dst, RegState::Implicit | RegState::Define);
1628 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi)
1629 .addImm(Imm.getHiBits(32).getZExtValue())
1630 .addReg(Dst, RegState::Implicit | RegState::Define);
1631 } else {
1632 assert(SrcOp.isReg());
1633 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo)
1634 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0))
1635 .addReg(Dst, RegState::Implicit | RegState::Define);
1636 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi)
1637 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1))
1638 .addReg(Dst, RegState::Implicit | RegState::Define);
1639 }
1640 MI.eraseFromParent();
1641 break;
1642 }
1643 case AMDGPU::V_MOV_B64_DPP_PSEUDO: {
1644 expandMovDPP64(MI);
1645 break;
1646 }
1647 case AMDGPU::V_SET_INACTIVE_B32: {
1648 unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64;
1649 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
1650 BuildMI(MBB, MI, DL, get(NotOpc), Exec)
1651 .addReg(Exec);
1652 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), MI.getOperand(0).getReg())
1653 .add(MI.getOperand(2));
1654 BuildMI(MBB, MI, DL, get(NotOpc), Exec)
1655 .addReg(Exec);
1656 MI.eraseFromParent();
1657 break;
1658 }
1659 case AMDGPU::V_SET_INACTIVE_B64: {
1660 unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64;
1661 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
1662 BuildMI(MBB, MI, DL, get(NotOpc), Exec)
1663 .addReg(Exec);
1664 MachineInstr *Copy = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO),
1665 MI.getOperand(0).getReg())
1666 .add(MI.getOperand(2));
1667 expandPostRAPseudo(*Copy);
1668 BuildMI(MBB, MI, DL, get(NotOpc), Exec)
1669 .addReg(Exec);
1670 MI.eraseFromParent();
1671 break;
1672 }
1673 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V1:
1674 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V2:
1675 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V3:
1676 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V4:
1677 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V5:
1678 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V8:
1679 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V16:
1680 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V32:
1681 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V1:
1682 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V2:
1683 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V3:
1684 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V4:
1685 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V5:
1686 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V8:
1687 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V16:
1688 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V32:
1689 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V1:
1690 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V2:
1691 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V4:
1692 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V8:
1693 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V16: {
1694 const TargetRegisterClass *EltRC = getOpRegClass(MI, 2);
1695
1696 unsigned Opc;
1697 if (RI.hasVGPRs(EltRC)) {
1698 Opc = ST.useVGPRIndexMode() ?
1699 AMDGPU::V_MOV_B32_indirect : AMDGPU::V_MOVRELD_B32_e32;
1700 } else {
1701 Opc = RI.getRegSizeInBits(*EltRC) == 64 ?
1702 AMDGPU::S_MOVRELD_B64 : AMDGPU::S_MOVRELD_B32;
1703 }
1704
1705 const MCInstrDesc &OpDesc = get(Opc);
1706 Register VecReg = MI.getOperand(0).getReg();
1707 bool IsUndef = MI.getOperand(1).isUndef();
1708 unsigned SubReg = MI.getOperand(3).getImm();
1709 assert(VecReg == MI.getOperand(1).getReg());
1710
1711 MachineInstrBuilder MIB =
1712 BuildMI(MBB, MI, DL, OpDesc)
1713 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef)
1714 .add(MI.getOperand(2))
1715 .addReg(VecReg, RegState::ImplicitDefine)
1716 .addReg(VecReg, RegState::Implicit | (IsUndef ? RegState::Undef : 0));
1717
1718 const int ImpDefIdx =
1719 OpDesc.getNumOperands() + OpDesc.getNumImplicitUses();
1720 const int ImpUseIdx = ImpDefIdx + 1;
1721 MIB->tieOperands(ImpDefIdx, ImpUseIdx);
1722 MI.eraseFromParent();
1723 break;
1724 }
1725 case AMDGPU::SI_PC_ADD_REL_OFFSET: {
1726 MachineFunction &MF = *MBB.getParent();
1727 Register Reg = MI.getOperand(0).getReg();
1728 Register RegLo = RI.getSubReg(Reg, AMDGPU::sub0);
1729 Register RegHi = RI.getSubReg(Reg, AMDGPU::sub1);
1730
1731 // Create a bundle so these instructions won't be re-ordered by the
1732 // post-RA scheduler.
1733 MIBundleBuilder Bundler(MBB, MI);
1734 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg));
1735
1736 // Add 32-bit offset from this instruction to the start of the
1737 // constant data.
1738 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo)
1739 .addReg(RegLo)
1740 .add(MI.getOperand(1)));
1741
1742 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi)
1743 .addReg(RegHi);
1744 MIB.add(MI.getOperand(2));
1745
1746 Bundler.append(MIB);
1747 finalizeBundle(MBB, Bundler.begin());
1748
1749 MI.eraseFromParent();
1750 break;
1751 }
1752 case AMDGPU::ENTER_WWM: {
1753 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when
1754 // WWM is entered.
1755 MI.setDesc(get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32
1756 : AMDGPU::S_OR_SAVEEXEC_B64));
1757 break;
1758 }
1759 case AMDGPU::EXIT_WWM: {
1760 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when
1761 // WWM is exited.
1762 MI.setDesc(get(ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64));
1763 break;
1764 }
1765 }
1766 return true;
1767 }
1768
1769 std::pair<MachineInstr*, MachineInstr*>
expandMovDPP64(MachineInstr & MI) const1770 SIInstrInfo::expandMovDPP64(MachineInstr &MI) const {
1771 assert (MI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO);
1772
1773 MachineBasicBlock &MBB = *MI.getParent();
1774 DebugLoc DL = MBB.findDebugLoc(MI);
1775 MachineFunction *MF = MBB.getParent();
1776 MachineRegisterInfo &MRI = MF->getRegInfo();
1777 Register Dst = MI.getOperand(0).getReg();
1778 unsigned Part = 0;
1779 MachineInstr *Split[2];
1780
1781
1782 for (auto Sub : { AMDGPU::sub0, AMDGPU::sub1 }) {
1783 auto MovDPP = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_dpp));
1784 if (Dst.isPhysical()) {
1785 MovDPP.addDef(RI.getSubReg(Dst, Sub));
1786 } else {
1787 assert(MRI.isSSA());
1788 auto Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1789 MovDPP.addDef(Tmp);
1790 }
1791
1792 for (unsigned I = 1; I <= 2; ++I) { // old and src operands.
1793 const MachineOperand &SrcOp = MI.getOperand(I);
1794 assert(!SrcOp.isFPImm());
1795 if (SrcOp.isImm()) {
1796 APInt Imm(64, SrcOp.getImm());
1797 Imm.ashrInPlace(Part * 32);
1798 MovDPP.addImm(Imm.getLoBits(32).getZExtValue());
1799 } else {
1800 assert(SrcOp.isReg());
1801 Register Src = SrcOp.getReg();
1802 if (Src.isPhysical())
1803 MovDPP.addReg(RI.getSubReg(Src, Sub));
1804 else
1805 MovDPP.addReg(Src, SrcOp.isUndef() ? RegState::Undef : 0, Sub);
1806 }
1807 }
1808
1809 for (unsigned I = 3; I < MI.getNumExplicitOperands(); ++I)
1810 MovDPP.addImm(MI.getOperand(I).getImm());
1811
1812 Split[Part] = MovDPP;
1813 ++Part;
1814 }
1815
1816 if (Dst.isVirtual())
1817 BuildMI(MBB, MI, DL, get(AMDGPU::REG_SEQUENCE), Dst)
1818 .addReg(Split[0]->getOperand(0).getReg())
1819 .addImm(AMDGPU::sub0)
1820 .addReg(Split[1]->getOperand(0).getReg())
1821 .addImm(AMDGPU::sub1);
1822
1823 MI.eraseFromParent();
1824 return std::make_pair(Split[0], Split[1]);
1825 }
1826
swapSourceModifiers(MachineInstr & MI,MachineOperand & Src0,unsigned Src0OpName,MachineOperand & Src1,unsigned Src1OpName) const1827 bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI,
1828 MachineOperand &Src0,
1829 unsigned Src0OpName,
1830 MachineOperand &Src1,
1831 unsigned Src1OpName) const {
1832 MachineOperand *Src0Mods = getNamedOperand(MI, Src0OpName);
1833 if (!Src0Mods)
1834 return false;
1835
1836 MachineOperand *Src1Mods = getNamedOperand(MI, Src1OpName);
1837 assert(Src1Mods &&
1838 "All commutable instructions have both src0 and src1 modifiers");
1839
1840 int Src0ModsVal = Src0Mods->getImm();
1841 int Src1ModsVal = Src1Mods->getImm();
1842
1843 Src1Mods->setImm(Src0ModsVal);
1844 Src0Mods->setImm(Src1ModsVal);
1845 return true;
1846 }
1847
swapRegAndNonRegOperand(MachineInstr & MI,MachineOperand & RegOp,MachineOperand & NonRegOp)1848 static MachineInstr *swapRegAndNonRegOperand(MachineInstr &MI,
1849 MachineOperand &RegOp,
1850 MachineOperand &NonRegOp) {
1851 Register Reg = RegOp.getReg();
1852 unsigned SubReg = RegOp.getSubReg();
1853 bool IsKill = RegOp.isKill();
1854 bool IsDead = RegOp.isDead();
1855 bool IsUndef = RegOp.isUndef();
1856 bool IsDebug = RegOp.isDebug();
1857
1858 if (NonRegOp.isImm())
1859 RegOp.ChangeToImmediate(NonRegOp.getImm());
1860 else if (NonRegOp.isFI())
1861 RegOp.ChangeToFrameIndex(NonRegOp.getIndex());
1862 else if (NonRegOp.isGlobal()) {
1863 RegOp.ChangeToGA(NonRegOp.getGlobal(), NonRegOp.getOffset(),
1864 NonRegOp.getTargetFlags());
1865 } else
1866 return nullptr;
1867
1868 // Make sure we don't reinterpret a subreg index in the target flags.
1869 RegOp.setTargetFlags(NonRegOp.getTargetFlags());
1870
1871 NonRegOp.ChangeToRegister(Reg, false, false, IsKill, IsDead, IsUndef, IsDebug);
1872 NonRegOp.setSubReg(SubReg);
1873
1874 return &MI;
1875 }
1876
commuteInstructionImpl(MachineInstr & MI,bool NewMI,unsigned Src0Idx,unsigned Src1Idx) const1877 MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
1878 unsigned Src0Idx,
1879 unsigned Src1Idx) const {
1880 assert(!NewMI && "this should never be used");
1881
1882 unsigned Opc = MI.getOpcode();
1883 int CommutedOpcode = commuteOpcode(Opc);
1884 if (CommutedOpcode == -1)
1885 return nullptr;
1886
1887 assert(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) ==
1888 static_cast<int>(Src0Idx) &&
1889 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) ==
1890 static_cast<int>(Src1Idx) &&
1891 "inconsistency with findCommutedOpIndices");
1892
1893 MachineOperand &Src0 = MI.getOperand(Src0Idx);
1894 MachineOperand &Src1 = MI.getOperand(Src1Idx);
1895
1896 MachineInstr *CommutedMI = nullptr;
1897 if (Src0.isReg() && Src1.isReg()) {
1898 if (isOperandLegal(MI, Src1Idx, &Src0)) {
1899 // Be sure to copy the source modifiers to the right place.
1900 CommutedMI
1901 = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, Src0Idx, Src1Idx);
1902 }
1903
1904 } else if (Src0.isReg() && !Src1.isReg()) {
1905 // src0 should always be able to support any operand type, so no need to
1906 // check operand legality.
1907 CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1);
1908 } else if (!Src0.isReg() && Src1.isReg()) {
1909 if (isOperandLegal(MI, Src1Idx, &Src0))
1910 CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0);
1911 } else {
1912 // FIXME: Found two non registers to commute. This does happen.
1913 return nullptr;
1914 }
1915
1916 if (CommutedMI) {
1917 swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers,
1918 Src1, AMDGPU::OpName::src1_modifiers);
1919
1920 CommutedMI->setDesc(get(CommutedOpcode));
1921 }
1922
1923 return CommutedMI;
1924 }
1925
1926 // This needs to be implemented because the source modifiers may be inserted
1927 // between the true commutable operands, and the base
1928 // TargetInstrInfo::commuteInstruction uses it.
findCommutedOpIndices(const MachineInstr & MI,unsigned & SrcOpIdx0,unsigned & SrcOpIdx1) const1929 bool SIInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
1930 unsigned &SrcOpIdx0,
1931 unsigned &SrcOpIdx1) const {
1932 return findCommutedOpIndices(MI.getDesc(), SrcOpIdx0, SrcOpIdx1);
1933 }
1934
findCommutedOpIndices(MCInstrDesc Desc,unsigned & SrcOpIdx0,unsigned & SrcOpIdx1) const1935 bool SIInstrInfo::findCommutedOpIndices(MCInstrDesc Desc, unsigned &SrcOpIdx0,
1936 unsigned &SrcOpIdx1) const {
1937 if (!Desc.isCommutable())
1938 return false;
1939
1940 unsigned Opc = Desc.getOpcode();
1941 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
1942 if (Src0Idx == -1)
1943 return false;
1944
1945 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
1946 if (Src1Idx == -1)
1947 return false;
1948
1949 return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx);
1950 }
1951
isBranchOffsetInRange(unsigned BranchOp,int64_t BrOffset) const1952 bool SIInstrInfo::isBranchOffsetInRange(unsigned BranchOp,
1953 int64_t BrOffset) const {
1954 // BranchRelaxation should never have to check s_setpc_b64 because its dest
1955 // block is unanalyzable.
1956 assert(BranchOp != AMDGPU::S_SETPC_B64);
1957
1958 // Convert to dwords.
1959 BrOffset /= 4;
1960
1961 // The branch instructions do PC += signext(SIMM16 * 4) + 4, so the offset is
1962 // from the next instruction.
1963 BrOffset -= 1;
1964
1965 return isIntN(BranchOffsetBits, BrOffset);
1966 }
1967
getBranchDestBlock(const MachineInstr & MI) const1968 MachineBasicBlock *SIInstrInfo::getBranchDestBlock(
1969 const MachineInstr &MI) const {
1970 if (MI.getOpcode() == AMDGPU::S_SETPC_B64) {
1971 // This would be a difficult analysis to perform, but can always be legal so
1972 // there's no need to analyze it.
1973 return nullptr;
1974 }
1975
1976 return MI.getOperand(0).getMBB();
1977 }
1978
insertIndirectBranch(MachineBasicBlock & MBB,MachineBasicBlock & DestBB,const DebugLoc & DL,int64_t BrOffset,RegScavenger * RS) const1979 unsigned SIInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
1980 MachineBasicBlock &DestBB,
1981 const DebugLoc &DL,
1982 int64_t BrOffset,
1983 RegScavenger *RS) const {
1984 assert(RS && "RegScavenger required for long branching");
1985 assert(MBB.empty() &&
1986 "new block should be inserted for expanding unconditional branch");
1987 assert(MBB.pred_size() == 1);
1988
1989 MachineFunction *MF = MBB.getParent();
1990 MachineRegisterInfo &MRI = MF->getRegInfo();
1991
1992 // FIXME: Virtual register workaround for RegScavenger not working with empty
1993 // blocks.
1994 Register PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
1995
1996 auto I = MBB.end();
1997
1998 // We need to compute the offset relative to the instruction immediately after
1999 // s_getpc_b64. Insert pc arithmetic code before last terminator.
2000 MachineInstr *GetPC = BuildMI(MBB, I, DL, get(AMDGPU::S_GETPC_B64), PCReg);
2001
2002 // TODO: Handle > 32-bit block address.
2003 if (BrOffset >= 0) {
2004 BuildMI(MBB, I, DL, get(AMDGPU::S_ADD_U32))
2005 .addReg(PCReg, RegState::Define, AMDGPU::sub0)
2006 .addReg(PCReg, 0, AMDGPU::sub0)
2007 .addMBB(&DestBB, MO_LONG_BRANCH_FORWARD);
2008 BuildMI(MBB, I, DL, get(AMDGPU::S_ADDC_U32))
2009 .addReg(PCReg, RegState::Define, AMDGPU::sub1)
2010 .addReg(PCReg, 0, AMDGPU::sub1)
2011 .addImm(0);
2012 } else {
2013 // Backwards branch.
2014 BuildMI(MBB, I, DL, get(AMDGPU::S_SUB_U32))
2015 .addReg(PCReg, RegState::Define, AMDGPU::sub0)
2016 .addReg(PCReg, 0, AMDGPU::sub0)
2017 .addMBB(&DestBB, MO_LONG_BRANCH_BACKWARD);
2018 BuildMI(MBB, I, DL, get(AMDGPU::S_SUBB_U32))
2019 .addReg(PCReg, RegState::Define, AMDGPU::sub1)
2020 .addReg(PCReg, 0, AMDGPU::sub1)
2021 .addImm(0);
2022 }
2023
2024 // Insert the indirect branch after the other terminator.
2025 BuildMI(&MBB, DL, get(AMDGPU::S_SETPC_B64))
2026 .addReg(PCReg);
2027
2028 // FIXME: If spilling is necessary, this will fail because this scavenger has
2029 // no emergency stack slots. It is non-trivial to spill in this situation,
2030 // because the restore code needs to be specially placed after the
2031 // jump. BranchRelaxation then needs to be made aware of the newly inserted
2032 // block.
2033 //
2034 // If a spill is needed for the pc register pair, we need to insert a spill
2035 // restore block right before the destination block, and insert a short branch
2036 // into the old destination block's fallthrough predecessor.
2037 // e.g.:
2038 //
2039 // s_cbranch_scc0 skip_long_branch:
2040 //
2041 // long_branch_bb:
2042 // spill s[8:9]
2043 // s_getpc_b64 s[8:9]
2044 // s_add_u32 s8, s8, restore_bb
2045 // s_addc_u32 s9, s9, 0
2046 // s_setpc_b64 s[8:9]
2047 //
2048 // skip_long_branch:
2049 // foo;
2050 //
2051 // .....
2052 //
2053 // dest_bb_fallthrough_predecessor:
2054 // bar;
2055 // s_branch dest_bb
2056 //
2057 // restore_bb:
2058 // restore s[8:9]
2059 // fallthrough dest_bb
2060 ///
2061 // dest_bb:
2062 // buzz;
2063
2064 RS->enterBasicBlockEnd(MBB);
2065 unsigned Scav = RS->scavengeRegisterBackwards(
2066 AMDGPU::SReg_64RegClass,
2067 MachineBasicBlock::iterator(GetPC), false, 0);
2068 MRI.replaceRegWith(PCReg, Scav);
2069 MRI.clearVirtRegs();
2070 RS->setRegUsed(Scav);
2071
2072 return 4 + 8 + 4 + 4;
2073 }
2074
getBranchOpcode(SIInstrInfo::BranchPredicate Cond)2075 unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate Cond) {
2076 switch (Cond) {
2077 case SIInstrInfo::SCC_TRUE:
2078 return AMDGPU::S_CBRANCH_SCC1;
2079 case SIInstrInfo::SCC_FALSE:
2080 return AMDGPU::S_CBRANCH_SCC0;
2081 case SIInstrInfo::VCCNZ:
2082 return AMDGPU::S_CBRANCH_VCCNZ;
2083 case SIInstrInfo::VCCZ:
2084 return AMDGPU::S_CBRANCH_VCCZ;
2085 case SIInstrInfo::EXECNZ:
2086 return AMDGPU::S_CBRANCH_EXECNZ;
2087 case SIInstrInfo::EXECZ:
2088 return AMDGPU::S_CBRANCH_EXECZ;
2089 default:
2090 llvm_unreachable("invalid branch predicate");
2091 }
2092 }
2093
getBranchPredicate(unsigned Opcode)2094 SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(unsigned Opcode) {
2095 switch (Opcode) {
2096 case AMDGPU::S_CBRANCH_SCC0:
2097 return SCC_FALSE;
2098 case AMDGPU::S_CBRANCH_SCC1:
2099 return SCC_TRUE;
2100 case AMDGPU::S_CBRANCH_VCCNZ:
2101 return VCCNZ;
2102 case AMDGPU::S_CBRANCH_VCCZ:
2103 return VCCZ;
2104 case AMDGPU::S_CBRANCH_EXECNZ:
2105 return EXECNZ;
2106 case AMDGPU::S_CBRANCH_EXECZ:
2107 return EXECZ;
2108 default:
2109 return INVALID_BR;
2110 }
2111 }
2112
analyzeBranchImpl(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,MachineBasicBlock * & TBB,MachineBasicBlock * & FBB,SmallVectorImpl<MachineOperand> & Cond,bool AllowModify) const2113 bool SIInstrInfo::analyzeBranchImpl(MachineBasicBlock &MBB,
2114 MachineBasicBlock::iterator I,
2115 MachineBasicBlock *&TBB,
2116 MachineBasicBlock *&FBB,
2117 SmallVectorImpl<MachineOperand> &Cond,
2118 bool AllowModify) const {
2119 if (I->getOpcode() == AMDGPU::S_BRANCH) {
2120 // Unconditional Branch
2121 TBB = I->getOperand(0).getMBB();
2122 return false;
2123 }
2124
2125 MachineBasicBlock *CondBB = nullptr;
2126
2127 if (I->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) {
2128 CondBB = I->getOperand(1).getMBB();
2129 Cond.push_back(I->getOperand(0));
2130 } else {
2131 BranchPredicate Pred = getBranchPredicate(I->getOpcode());
2132 if (Pred == INVALID_BR)
2133 return true;
2134
2135 CondBB = I->getOperand(0).getMBB();
2136 Cond.push_back(MachineOperand::CreateImm(Pred));
2137 Cond.push_back(I->getOperand(1)); // Save the branch register.
2138 }
2139 ++I;
2140
2141 if (I == MBB.end()) {
2142 // Conditional branch followed by fall-through.
2143 TBB = CondBB;
2144 return false;
2145 }
2146
2147 if (I->getOpcode() == AMDGPU::S_BRANCH) {
2148 TBB = CondBB;
2149 FBB = I->getOperand(0).getMBB();
2150 return false;
2151 }
2152
2153 return true;
2154 }
2155
analyzeBranch(MachineBasicBlock & MBB,MachineBasicBlock * & TBB,MachineBasicBlock * & FBB,SmallVectorImpl<MachineOperand> & Cond,bool AllowModify) const2156 bool SIInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
2157 MachineBasicBlock *&FBB,
2158 SmallVectorImpl<MachineOperand> &Cond,
2159 bool AllowModify) const {
2160 MachineBasicBlock::iterator I = MBB.getFirstTerminator();
2161 auto E = MBB.end();
2162 if (I == E)
2163 return false;
2164
2165 // Skip over the instructions that are artificially terminators for special
2166 // exec management.
2167 while (I != E && !I->isBranch() && !I->isReturn() &&
2168 I->getOpcode() != AMDGPU::SI_MASK_BRANCH) {
2169 switch (I->getOpcode()) {
2170 case AMDGPU::SI_MASK_BRANCH:
2171 case AMDGPU::S_MOV_B64_term:
2172 case AMDGPU::S_XOR_B64_term:
2173 case AMDGPU::S_ANDN2_B64_term:
2174 case AMDGPU::S_MOV_B32_term:
2175 case AMDGPU::S_XOR_B32_term:
2176 case AMDGPU::S_OR_B32_term:
2177 case AMDGPU::S_ANDN2_B32_term:
2178 break;
2179 case AMDGPU::SI_IF:
2180 case AMDGPU::SI_ELSE:
2181 case AMDGPU::SI_KILL_I1_TERMINATOR:
2182 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR:
2183 // FIXME: It's messy that these need to be considered here at all.
2184 return true;
2185 default:
2186 llvm_unreachable("unexpected non-branch terminator inst");
2187 }
2188
2189 ++I;
2190 }
2191
2192 if (I == E)
2193 return false;
2194
2195 if (I->getOpcode() != AMDGPU::SI_MASK_BRANCH)
2196 return analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify);
2197
2198 ++I;
2199
2200 // TODO: Should be able to treat as fallthrough?
2201 if (I == MBB.end())
2202 return true;
2203
2204 if (analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify))
2205 return true;
2206
2207 MachineBasicBlock *MaskBrDest = I->getOperand(0).getMBB();
2208
2209 // Specifically handle the case where the conditional branch is to the same
2210 // destination as the mask branch. e.g.
2211 //
2212 // si_mask_branch BB8
2213 // s_cbranch_execz BB8
2214 // s_cbranch BB9
2215 //
2216 // This is required to understand divergent loops which may need the branches
2217 // to be relaxed.
2218 if (TBB != MaskBrDest || Cond.empty())
2219 return true;
2220
2221 auto Pred = Cond[0].getImm();
2222 return (Pred != EXECZ && Pred != EXECNZ);
2223 }
2224
removeBranch(MachineBasicBlock & MBB,int * BytesRemoved) const2225 unsigned SIInstrInfo::removeBranch(MachineBasicBlock &MBB,
2226 int *BytesRemoved) const {
2227 MachineBasicBlock::iterator I = MBB.getFirstTerminator();
2228
2229 unsigned Count = 0;
2230 unsigned RemovedSize = 0;
2231 while (I != MBB.end()) {
2232 MachineBasicBlock::iterator Next = std::next(I);
2233 if (I->getOpcode() == AMDGPU::SI_MASK_BRANCH) {
2234 I = Next;
2235 continue;
2236 }
2237
2238 RemovedSize += getInstSizeInBytes(*I);
2239 I->eraseFromParent();
2240 ++Count;
2241 I = Next;
2242 }
2243
2244 if (BytesRemoved)
2245 *BytesRemoved = RemovedSize;
2246
2247 return Count;
2248 }
2249
2250 // Copy the flags onto the implicit condition register operand.
preserveCondRegFlags(MachineOperand & CondReg,const MachineOperand & OrigCond)2251 static void preserveCondRegFlags(MachineOperand &CondReg,
2252 const MachineOperand &OrigCond) {
2253 CondReg.setIsUndef(OrigCond.isUndef());
2254 CondReg.setIsKill(OrigCond.isKill());
2255 }
2256
insertBranch(MachineBasicBlock & MBB,MachineBasicBlock * TBB,MachineBasicBlock * FBB,ArrayRef<MachineOperand> Cond,const DebugLoc & DL,int * BytesAdded) const2257 unsigned SIInstrInfo::insertBranch(MachineBasicBlock &MBB,
2258 MachineBasicBlock *TBB,
2259 MachineBasicBlock *FBB,
2260 ArrayRef<MachineOperand> Cond,
2261 const DebugLoc &DL,
2262 int *BytesAdded) const {
2263 if (!FBB && Cond.empty()) {
2264 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH))
2265 .addMBB(TBB);
2266 if (BytesAdded)
2267 *BytesAdded = 4;
2268 return 1;
2269 }
2270
2271 if(Cond.size() == 1 && Cond[0].isReg()) {
2272 BuildMI(&MBB, DL, get(AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO))
2273 .add(Cond[0])
2274 .addMBB(TBB);
2275 return 1;
2276 }
2277
2278 assert(TBB && Cond[0].isImm());
2279
2280 unsigned Opcode
2281 = getBranchOpcode(static_cast<BranchPredicate>(Cond[0].getImm()));
2282
2283 if (!FBB) {
2284 Cond[1].isUndef();
2285 MachineInstr *CondBr =
2286 BuildMI(&MBB, DL, get(Opcode))
2287 .addMBB(TBB);
2288
2289 // Copy the flags onto the implicit condition register operand.
2290 preserveCondRegFlags(CondBr->getOperand(1), Cond[1]);
2291 fixImplicitOperands(*CondBr);
2292
2293 if (BytesAdded)
2294 *BytesAdded = 4;
2295 return 1;
2296 }
2297
2298 assert(TBB && FBB);
2299
2300 MachineInstr *CondBr =
2301 BuildMI(&MBB, DL, get(Opcode))
2302 .addMBB(TBB);
2303 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH))
2304 .addMBB(FBB);
2305
2306 MachineOperand &CondReg = CondBr->getOperand(1);
2307 CondReg.setIsUndef(Cond[1].isUndef());
2308 CondReg.setIsKill(Cond[1].isKill());
2309
2310 if (BytesAdded)
2311 *BytesAdded = 8;
2312
2313 return 2;
2314 }
2315
reverseBranchCondition(SmallVectorImpl<MachineOperand> & Cond) const2316 bool SIInstrInfo::reverseBranchCondition(
2317 SmallVectorImpl<MachineOperand> &Cond) const {
2318 if (Cond.size() != 2) {
2319 return true;
2320 }
2321
2322 if (Cond[0].isImm()) {
2323 Cond[0].setImm(-Cond[0].getImm());
2324 return false;
2325 }
2326
2327 return true;
2328 }
2329
canInsertSelect(const MachineBasicBlock & MBB,ArrayRef<MachineOperand> Cond,Register DstReg,Register TrueReg,Register FalseReg,int & CondCycles,int & TrueCycles,int & FalseCycles) const2330 bool SIInstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
2331 ArrayRef<MachineOperand> Cond,
2332 Register DstReg, Register TrueReg,
2333 Register FalseReg, int &CondCycles,
2334 int &TrueCycles, int &FalseCycles) const {
2335 switch (Cond[0].getImm()) {
2336 case VCCNZ:
2337 case VCCZ: {
2338 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2339 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg);
2340 assert(MRI.getRegClass(FalseReg) == RC);
2341
2342 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32;
2343 CondCycles = TrueCycles = FalseCycles = NumInsts; // ???
2344
2345 // Limit to equal cost for branch vs. N v_cndmask_b32s.
2346 return RI.hasVGPRs(RC) && NumInsts <= 6;
2347 }
2348 case SCC_TRUE:
2349 case SCC_FALSE: {
2350 // FIXME: We could insert for VGPRs if we could replace the original compare
2351 // with a vector one.
2352 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2353 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg);
2354 assert(MRI.getRegClass(FalseReg) == RC);
2355
2356 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32;
2357
2358 // Multiples of 8 can do s_cselect_b64
2359 if (NumInsts % 2 == 0)
2360 NumInsts /= 2;
2361
2362 CondCycles = TrueCycles = FalseCycles = NumInsts; // ???
2363 return RI.isSGPRClass(RC);
2364 }
2365 default:
2366 return false;
2367 }
2368 }
2369
insertSelect(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,const DebugLoc & DL,Register DstReg,ArrayRef<MachineOperand> Cond,Register TrueReg,Register FalseReg) const2370 void SIInstrInfo::insertSelect(MachineBasicBlock &MBB,
2371 MachineBasicBlock::iterator I, const DebugLoc &DL,
2372 Register DstReg, ArrayRef<MachineOperand> Cond,
2373 Register TrueReg, Register FalseReg) const {
2374 BranchPredicate Pred = static_cast<BranchPredicate>(Cond[0].getImm());
2375 if (Pred == VCCZ || Pred == SCC_FALSE) {
2376 Pred = static_cast<BranchPredicate>(-Pred);
2377 std::swap(TrueReg, FalseReg);
2378 }
2379
2380 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2381 const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg);
2382 unsigned DstSize = RI.getRegSizeInBits(*DstRC);
2383
2384 if (DstSize == 32) {
2385 MachineInstr *Select;
2386 if (Pred == SCC_TRUE) {
2387 Select = BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B32), DstReg)
2388 .addReg(TrueReg)
2389 .addReg(FalseReg);
2390 } else {
2391 // Instruction's operands are backwards from what is expected.
2392 Select = BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e32), DstReg)
2393 .addReg(FalseReg)
2394 .addReg(TrueReg);
2395 }
2396
2397 preserveCondRegFlags(Select->getOperand(3), Cond[1]);
2398 return;
2399 }
2400
2401 if (DstSize == 64 && Pred == SCC_TRUE) {
2402 MachineInstr *Select =
2403 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), DstReg)
2404 .addReg(TrueReg)
2405 .addReg(FalseReg);
2406
2407 preserveCondRegFlags(Select->getOperand(3), Cond[1]);
2408 return;
2409 }
2410
2411 static const int16_t Sub0_15[] = {
2412 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
2413 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
2414 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
2415 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15,
2416 };
2417
2418 static const int16_t Sub0_15_64[] = {
2419 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3,
2420 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7,
2421 AMDGPU::sub8_sub9, AMDGPU::sub10_sub11,
2422 AMDGPU::sub12_sub13, AMDGPU::sub14_sub15,
2423 };
2424
2425 unsigned SelOp = AMDGPU::V_CNDMASK_B32_e32;
2426 const TargetRegisterClass *EltRC = &AMDGPU::VGPR_32RegClass;
2427 const int16_t *SubIndices = Sub0_15;
2428 int NElts = DstSize / 32;
2429
2430 // 64-bit select is only available for SALU.
2431 // TODO: Split 96-bit into 64-bit and 32-bit, not 3x 32-bit.
2432 if (Pred == SCC_TRUE) {
2433 if (NElts % 2) {
2434 SelOp = AMDGPU::S_CSELECT_B32;
2435 EltRC = &AMDGPU::SGPR_32RegClass;
2436 } else {
2437 SelOp = AMDGPU::S_CSELECT_B64;
2438 EltRC = &AMDGPU::SGPR_64RegClass;
2439 SubIndices = Sub0_15_64;
2440 NElts /= 2;
2441 }
2442 }
2443
2444 MachineInstrBuilder MIB = BuildMI(
2445 MBB, I, DL, get(AMDGPU::REG_SEQUENCE), DstReg);
2446
2447 I = MIB->getIterator();
2448
2449 SmallVector<Register, 8> Regs;
2450 for (int Idx = 0; Idx != NElts; ++Idx) {
2451 Register DstElt = MRI.createVirtualRegister(EltRC);
2452 Regs.push_back(DstElt);
2453
2454 unsigned SubIdx = SubIndices[Idx];
2455
2456 MachineInstr *Select;
2457 if (SelOp == AMDGPU::V_CNDMASK_B32_e32) {
2458 Select =
2459 BuildMI(MBB, I, DL, get(SelOp), DstElt)
2460 .addReg(FalseReg, 0, SubIdx)
2461 .addReg(TrueReg, 0, SubIdx);
2462 } else {
2463 Select =
2464 BuildMI(MBB, I, DL, get(SelOp), DstElt)
2465 .addReg(TrueReg, 0, SubIdx)
2466 .addReg(FalseReg, 0, SubIdx);
2467 }
2468
2469 preserveCondRegFlags(Select->getOperand(3), Cond[1]);
2470 fixImplicitOperands(*Select);
2471
2472 MIB.addReg(DstElt)
2473 .addImm(SubIdx);
2474 }
2475 }
2476
isFoldableCopy(const MachineInstr & MI) const2477 bool SIInstrInfo::isFoldableCopy(const MachineInstr &MI) const {
2478 switch (MI.getOpcode()) {
2479 case AMDGPU::V_MOV_B32_e32:
2480 case AMDGPU::V_MOV_B32_e64:
2481 case AMDGPU::V_MOV_B64_PSEUDO: {
2482 // If there are additional implicit register operands, this may be used for
2483 // register indexing so the source register operand isn't simply copied.
2484 unsigned NumOps = MI.getDesc().getNumOperands() +
2485 MI.getDesc().getNumImplicitUses();
2486
2487 return MI.getNumOperands() == NumOps;
2488 }
2489 case AMDGPU::S_MOV_B32:
2490 case AMDGPU::S_MOV_B64:
2491 case AMDGPU::COPY:
2492 case AMDGPU::V_ACCVGPR_WRITE_B32:
2493 case AMDGPU::V_ACCVGPR_READ_B32:
2494 return true;
2495 default:
2496 return false;
2497 }
2498 }
2499
getAddressSpaceForPseudoSourceKind(unsigned Kind) const2500 unsigned SIInstrInfo::getAddressSpaceForPseudoSourceKind(
2501 unsigned Kind) const {
2502 switch(Kind) {
2503 case PseudoSourceValue::Stack:
2504 case PseudoSourceValue::FixedStack:
2505 return AMDGPUAS::PRIVATE_ADDRESS;
2506 case PseudoSourceValue::ConstantPool:
2507 case PseudoSourceValue::GOT:
2508 case PseudoSourceValue::JumpTable:
2509 case PseudoSourceValue::GlobalValueCallEntry:
2510 case PseudoSourceValue::ExternalSymbolCallEntry:
2511 case PseudoSourceValue::TargetCustom:
2512 return AMDGPUAS::CONSTANT_ADDRESS;
2513 }
2514 return AMDGPUAS::FLAT_ADDRESS;
2515 }
2516
removeModOperands(MachineInstr & MI)2517 static void removeModOperands(MachineInstr &MI) {
2518 unsigned Opc = MI.getOpcode();
2519 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc,
2520 AMDGPU::OpName::src0_modifiers);
2521 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc,
2522 AMDGPU::OpName::src1_modifiers);
2523 int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc,
2524 AMDGPU::OpName::src2_modifiers);
2525
2526 MI.RemoveOperand(Src2ModIdx);
2527 MI.RemoveOperand(Src1ModIdx);
2528 MI.RemoveOperand(Src0ModIdx);
2529 }
2530
FoldImmediate(MachineInstr & UseMI,MachineInstr & DefMI,Register Reg,MachineRegisterInfo * MRI) const2531 bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
2532 Register Reg, MachineRegisterInfo *MRI) const {
2533 if (!MRI->hasOneNonDBGUse(Reg))
2534 return false;
2535
2536 switch (DefMI.getOpcode()) {
2537 default:
2538 return false;
2539 case AMDGPU::S_MOV_B64:
2540 // TODO: We could fold 64-bit immediates, but this get compilicated
2541 // when there are sub-registers.
2542 return false;
2543
2544 case AMDGPU::V_MOV_B32_e32:
2545 case AMDGPU::S_MOV_B32:
2546 case AMDGPU::V_ACCVGPR_WRITE_B32:
2547 break;
2548 }
2549
2550 const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0);
2551 assert(ImmOp);
2552 // FIXME: We could handle FrameIndex values here.
2553 if (!ImmOp->isImm())
2554 return false;
2555
2556 unsigned Opc = UseMI.getOpcode();
2557 if (Opc == AMDGPU::COPY) {
2558 Register DstReg = UseMI.getOperand(0).getReg();
2559 bool Is16Bit = getOpSize(UseMI, 0) == 2;
2560 bool isVGPRCopy = RI.isVGPR(*MRI, DstReg);
2561 unsigned NewOpc = isVGPRCopy ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
2562 APInt Imm(32, ImmOp->getImm());
2563
2564 if (UseMI.getOperand(1).getSubReg() == AMDGPU::hi16)
2565 Imm = Imm.ashr(16);
2566
2567 if (RI.isAGPR(*MRI, DstReg)) {
2568 if (!isInlineConstant(Imm))
2569 return false;
2570 NewOpc = AMDGPU::V_ACCVGPR_WRITE_B32;
2571 }
2572
2573 if (Is16Bit) {
2574 if (isVGPRCopy)
2575 return false; // Do not clobber vgpr_hi16
2576
2577 if (DstReg.isVirtual() &&
2578 UseMI.getOperand(0).getSubReg() != AMDGPU::lo16)
2579 return false;
2580
2581 UseMI.getOperand(0).setSubReg(0);
2582 if (DstReg.isPhysical()) {
2583 DstReg = RI.get32BitRegister(DstReg);
2584 UseMI.getOperand(0).setReg(DstReg);
2585 }
2586 assert(UseMI.getOperand(1).getReg().isVirtual());
2587 }
2588
2589 UseMI.setDesc(get(NewOpc));
2590 UseMI.getOperand(1).ChangeToImmediate(Imm.getSExtValue());
2591 UseMI.getOperand(1).setTargetFlags(0);
2592 UseMI.addImplicitDefUseOperands(*UseMI.getParent()->getParent());
2593 return true;
2594 }
2595
2596 if (Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64 ||
2597 Opc == AMDGPU::V_MAD_F16 || Opc == AMDGPU::V_MAC_F16_e64 ||
2598 Opc == AMDGPU::V_FMA_F32 || Opc == AMDGPU::V_FMAC_F32_e64 ||
2599 Opc == AMDGPU::V_FMA_F16 || Opc == AMDGPU::V_FMAC_F16_e64) {
2600 // Don't fold if we are using source or output modifiers. The new VOP2
2601 // instructions don't have them.
2602 if (hasAnyModifiersSet(UseMI))
2603 return false;
2604
2605 // If this is a free constant, there's no reason to do this.
2606 // TODO: We could fold this here instead of letting SIFoldOperands do it
2607 // later.
2608 MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0);
2609
2610 // Any src operand can be used for the legality check.
2611 if (isInlineConstant(UseMI, *Src0, *ImmOp))
2612 return false;
2613
2614 bool IsF32 = Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64 ||
2615 Opc == AMDGPU::V_FMA_F32 || Opc == AMDGPU::V_FMAC_F32_e64;
2616 bool IsFMA = Opc == AMDGPU::V_FMA_F32 || Opc == AMDGPU::V_FMAC_F32_e64 ||
2617 Opc == AMDGPU::V_FMA_F16 || Opc == AMDGPU::V_FMAC_F16_e64;
2618 MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1);
2619 MachineOperand *Src2 = getNamedOperand(UseMI, AMDGPU::OpName::src2);
2620
2621 // Multiplied part is the constant: Use v_madmk_{f16, f32}.
2622 // We should only expect these to be on src0 due to canonicalizations.
2623 if (Src0->isReg() && Src0->getReg() == Reg) {
2624 if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))
2625 return false;
2626
2627 if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg())))
2628 return false;
2629
2630 unsigned NewOpc =
2631 IsFMA ? (IsF32 ? AMDGPU::V_FMAMK_F32 : AMDGPU::V_FMAMK_F16)
2632 : (IsF32 ? AMDGPU::V_MADMK_F32 : AMDGPU::V_MADMK_F16);
2633 if (pseudoToMCOpcode(NewOpc) == -1)
2634 return false;
2635
2636 // We need to swap operands 0 and 1 since madmk constant is at operand 1.
2637
2638 const int64_t Imm = ImmOp->getImm();
2639
2640 // FIXME: This would be a lot easier if we could return a new instruction
2641 // instead of having to modify in place.
2642
2643 // Remove these first since they are at the end.
2644 UseMI.RemoveOperand(
2645 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod));
2646 UseMI.RemoveOperand(
2647 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp));
2648
2649 Register Src1Reg = Src1->getReg();
2650 unsigned Src1SubReg = Src1->getSubReg();
2651 Src0->setReg(Src1Reg);
2652 Src0->setSubReg(Src1SubReg);
2653 Src0->setIsKill(Src1->isKill());
2654
2655 if (Opc == AMDGPU::V_MAC_F32_e64 ||
2656 Opc == AMDGPU::V_MAC_F16_e64 ||
2657 Opc == AMDGPU::V_FMAC_F32_e64 ||
2658 Opc == AMDGPU::V_FMAC_F16_e64)
2659 UseMI.untieRegOperand(
2660 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2));
2661
2662 Src1->ChangeToImmediate(Imm);
2663
2664 removeModOperands(UseMI);
2665 UseMI.setDesc(get(NewOpc));
2666
2667 bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
2668 if (DeleteDef)
2669 DefMI.eraseFromParent();
2670
2671 return true;
2672 }
2673
2674 // Added part is the constant: Use v_madak_{f16, f32}.
2675 if (Src2->isReg() && Src2->getReg() == Reg) {
2676 // Not allowed to use constant bus for another operand.
2677 // We can however allow an inline immediate as src0.
2678 bool Src0Inlined = false;
2679 if (Src0->isReg()) {
2680 // Try to inline constant if possible.
2681 // If the Def moves immediate and the use is single
2682 // We are saving VGPR here.
2683 MachineInstr *Def = MRI->getUniqueVRegDef(Src0->getReg());
2684 if (Def && Def->isMoveImmediate() &&
2685 isInlineConstant(Def->getOperand(1)) &&
2686 MRI->hasOneUse(Src0->getReg())) {
2687 Src0->ChangeToImmediate(Def->getOperand(1).getImm());
2688 Src0Inlined = true;
2689 } else if ((Register::isPhysicalRegister(Src0->getReg()) &&
2690 (ST.getConstantBusLimit(Opc) <= 1 &&
2691 RI.isSGPRClass(RI.getPhysRegClass(Src0->getReg())))) ||
2692 (Register::isVirtualRegister(Src0->getReg()) &&
2693 (ST.getConstantBusLimit(Opc) <= 1 &&
2694 RI.isSGPRClass(MRI->getRegClass(Src0->getReg())))))
2695 return false;
2696 // VGPR is okay as Src0 - fallthrough
2697 }
2698
2699 if (Src1->isReg() && !Src0Inlined ) {
2700 // We have one slot for inlinable constant so far - try to fill it
2701 MachineInstr *Def = MRI->getUniqueVRegDef(Src1->getReg());
2702 if (Def && Def->isMoveImmediate() &&
2703 isInlineConstant(Def->getOperand(1)) &&
2704 MRI->hasOneUse(Src1->getReg()) &&
2705 commuteInstruction(UseMI)) {
2706 Src0->ChangeToImmediate(Def->getOperand(1).getImm());
2707 } else if ((Register::isPhysicalRegister(Src1->getReg()) &&
2708 RI.isSGPRClass(RI.getPhysRegClass(Src1->getReg()))) ||
2709 (Register::isVirtualRegister(Src1->getReg()) &&
2710 RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))))
2711 return false;
2712 // VGPR is okay as Src1 - fallthrough
2713 }
2714
2715 unsigned NewOpc =
2716 IsFMA ? (IsF32 ? AMDGPU::V_FMAAK_F32 : AMDGPU::V_FMAAK_F16)
2717 : (IsF32 ? AMDGPU::V_MADAK_F32 : AMDGPU::V_MADAK_F16);
2718 if (pseudoToMCOpcode(NewOpc) == -1)
2719 return false;
2720
2721 const int64_t Imm = ImmOp->getImm();
2722
2723 // FIXME: This would be a lot easier if we could return a new instruction
2724 // instead of having to modify in place.
2725
2726 // Remove these first since they are at the end.
2727 UseMI.RemoveOperand(
2728 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod));
2729 UseMI.RemoveOperand(
2730 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp));
2731
2732 if (Opc == AMDGPU::V_MAC_F32_e64 ||
2733 Opc == AMDGPU::V_MAC_F16_e64 ||
2734 Opc == AMDGPU::V_FMAC_F32_e64 ||
2735 Opc == AMDGPU::V_FMAC_F16_e64)
2736 UseMI.untieRegOperand(
2737 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2));
2738
2739 // ChangingToImmediate adds Src2 back to the instruction.
2740 Src2->ChangeToImmediate(Imm);
2741
2742 // These come before src2.
2743 removeModOperands(UseMI);
2744 UseMI.setDesc(get(NewOpc));
2745 // It might happen that UseMI was commuted
2746 // and we now have SGPR as SRC1. If so 2 inlined
2747 // constant and SGPR are illegal.
2748 legalizeOperands(UseMI);
2749
2750 bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
2751 if (DeleteDef)
2752 DefMI.eraseFromParent();
2753
2754 return true;
2755 }
2756 }
2757
2758 return false;
2759 }
2760
2761 static bool
memOpsHaveSameBaseOperands(ArrayRef<const MachineOperand * > BaseOps1,ArrayRef<const MachineOperand * > BaseOps2)2762 memOpsHaveSameBaseOperands(ArrayRef<const MachineOperand *> BaseOps1,
2763 ArrayRef<const MachineOperand *> BaseOps2) {
2764 if (BaseOps1.size() != BaseOps2.size())
2765 return false;
2766 for (size_t I = 0, E = BaseOps1.size(); I < E; ++I) {
2767 if (!BaseOps1[I]->isIdenticalTo(*BaseOps2[I]))
2768 return false;
2769 }
2770 return true;
2771 }
2772
offsetsDoNotOverlap(int WidthA,int OffsetA,int WidthB,int OffsetB)2773 static bool offsetsDoNotOverlap(int WidthA, int OffsetA,
2774 int WidthB, int OffsetB) {
2775 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
2776 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
2777 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
2778 return LowOffset + LowWidth <= HighOffset;
2779 }
2780
checkInstOffsetsDoNotOverlap(const MachineInstr & MIa,const MachineInstr & MIb) const2781 bool SIInstrInfo::checkInstOffsetsDoNotOverlap(const MachineInstr &MIa,
2782 const MachineInstr &MIb) const {
2783 SmallVector<const MachineOperand *, 4> BaseOps0, BaseOps1;
2784 int64_t Offset0, Offset1;
2785 unsigned Dummy0, Dummy1;
2786 bool Offset0IsScalable, Offset1IsScalable;
2787 if (!getMemOperandsWithOffsetWidth(MIa, BaseOps0, Offset0, Offset0IsScalable,
2788 Dummy0, &RI) ||
2789 !getMemOperandsWithOffsetWidth(MIb, BaseOps1, Offset1, Offset1IsScalable,
2790 Dummy1, &RI))
2791 return false;
2792
2793 if (!memOpsHaveSameBaseOperands(BaseOps0, BaseOps1))
2794 return false;
2795
2796 if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) {
2797 // FIXME: Handle ds_read2 / ds_write2.
2798 return false;
2799 }
2800 unsigned Width0 = MIa.memoperands().front()->getSize();
2801 unsigned Width1 = MIb.memoperands().front()->getSize();
2802 return offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1);
2803 }
2804
areMemAccessesTriviallyDisjoint(const MachineInstr & MIa,const MachineInstr & MIb) const2805 bool SIInstrInfo::areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
2806 const MachineInstr &MIb) const {
2807 assert(MIa.mayLoadOrStore() &&
2808 "MIa must load from or modify a memory location");
2809 assert(MIb.mayLoadOrStore() &&
2810 "MIb must load from or modify a memory location");
2811
2812 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects())
2813 return false;
2814
2815 // XXX - Can we relax this between address spaces?
2816 if (MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
2817 return false;
2818
2819 // TODO: Should we check the address space from the MachineMemOperand? That
2820 // would allow us to distinguish objects we know don't alias based on the
2821 // underlying address space, even if it was lowered to a different one,
2822 // e.g. private accesses lowered to use MUBUF instructions on a scratch
2823 // buffer.
2824 if (isDS(MIa)) {
2825 if (isDS(MIb))
2826 return checkInstOffsetsDoNotOverlap(MIa, MIb);
2827
2828 return !isFLAT(MIb) || isSegmentSpecificFLAT(MIb);
2829 }
2830
2831 if (isMUBUF(MIa) || isMTBUF(MIa)) {
2832 if (isMUBUF(MIb) || isMTBUF(MIb))
2833 return checkInstOffsetsDoNotOverlap(MIa, MIb);
2834
2835 return !isFLAT(MIb) && !isSMRD(MIb);
2836 }
2837
2838 if (isSMRD(MIa)) {
2839 if (isSMRD(MIb))
2840 return checkInstOffsetsDoNotOverlap(MIa, MIb);
2841
2842 return !isFLAT(MIb) && !isMUBUF(MIb) && !isMTBUF(MIb);
2843 }
2844
2845 if (isFLAT(MIa)) {
2846 if (isFLAT(MIb))
2847 return checkInstOffsetsDoNotOverlap(MIa, MIb);
2848
2849 return false;
2850 }
2851
2852 return false;
2853 }
2854
getFoldableImm(const MachineOperand * MO)2855 static int64_t getFoldableImm(const MachineOperand* MO) {
2856 if (!MO->isReg())
2857 return false;
2858 const MachineFunction *MF = MO->getParent()->getParent()->getParent();
2859 const MachineRegisterInfo &MRI = MF->getRegInfo();
2860 auto Def = MRI.getUniqueVRegDef(MO->getReg());
2861 if (Def && Def->getOpcode() == AMDGPU::V_MOV_B32_e32 &&
2862 Def->getOperand(1).isImm())
2863 return Def->getOperand(1).getImm();
2864 return AMDGPU::NoRegister;
2865 }
2866
convertToThreeAddress(MachineFunction::iterator & MBB,MachineInstr & MI,LiveVariables * LV) const2867 MachineInstr *SIInstrInfo::convertToThreeAddress(MachineFunction::iterator &MBB,
2868 MachineInstr &MI,
2869 LiveVariables *LV) const {
2870 unsigned Opc = MI.getOpcode();
2871 bool IsF16 = false;
2872 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e32 || Opc == AMDGPU::V_FMAC_F32_e64 ||
2873 Opc == AMDGPU::V_FMAC_F16_e32 || Opc == AMDGPU::V_FMAC_F16_e64;
2874
2875 switch (Opc) {
2876 default:
2877 return nullptr;
2878 case AMDGPU::V_MAC_F16_e64:
2879 case AMDGPU::V_FMAC_F16_e64:
2880 IsF16 = true;
2881 LLVM_FALLTHROUGH;
2882 case AMDGPU::V_MAC_F32_e64:
2883 case AMDGPU::V_FMAC_F32_e64:
2884 break;
2885 case AMDGPU::V_MAC_F16_e32:
2886 case AMDGPU::V_FMAC_F16_e32:
2887 IsF16 = true;
2888 LLVM_FALLTHROUGH;
2889 case AMDGPU::V_MAC_F32_e32:
2890 case AMDGPU::V_FMAC_F32_e32: {
2891 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
2892 AMDGPU::OpName::src0);
2893 const MachineOperand *Src0 = &MI.getOperand(Src0Idx);
2894 if (!Src0->isReg() && !Src0->isImm())
2895 return nullptr;
2896
2897 if (Src0->isImm() && !isInlineConstant(MI, Src0Idx, *Src0))
2898 return nullptr;
2899
2900 break;
2901 }
2902 }
2903
2904 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
2905 const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0);
2906 const MachineOperand *Src0Mods =
2907 getNamedOperand(MI, AMDGPU::OpName::src0_modifiers);
2908 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1);
2909 const MachineOperand *Src1Mods =
2910 getNamedOperand(MI, AMDGPU::OpName::src1_modifiers);
2911 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2);
2912 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp);
2913 const MachineOperand *Omod = getNamedOperand(MI, AMDGPU::OpName::omod);
2914
2915 if (!Src0Mods && !Src1Mods && !Clamp && !Omod &&
2916 // If we have an SGPR input, we will violate the constant bus restriction.
2917 (ST.getConstantBusLimit(Opc) > 1 ||
2918 !Src0->isReg() ||
2919 !RI.isSGPRReg(MBB->getParent()->getRegInfo(), Src0->getReg()))) {
2920 if (auto Imm = getFoldableImm(Src2)) {
2921 unsigned NewOpc =
2922 IsFMA ? (IsF16 ? AMDGPU::V_FMAAK_F16 : AMDGPU::V_FMAAK_F32)
2923 : (IsF16 ? AMDGPU::V_MADAK_F16 : AMDGPU::V_MADAK_F32);
2924 if (pseudoToMCOpcode(NewOpc) != -1)
2925 return BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc))
2926 .add(*Dst)
2927 .add(*Src0)
2928 .add(*Src1)
2929 .addImm(Imm);
2930 }
2931 unsigned NewOpc =
2932 IsFMA ? (IsF16 ? AMDGPU::V_FMAMK_F16 : AMDGPU::V_FMAMK_F32)
2933 : (IsF16 ? AMDGPU::V_MADMK_F16 : AMDGPU::V_MADMK_F32);
2934 if (auto Imm = getFoldableImm(Src1)) {
2935 if (pseudoToMCOpcode(NewOpc) != -1)
2936 return BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc))
2937 .add(*Dst)
2938 .add(*Src0)
2939 .addImm(Imm)
2940 .add(*Src2);
2941 }
2942 if (auto Imm = getFoldableImm(Src0)) {
2943 if (pseudoToMCOpcode(NewOpc) != -1 &&
2944 isOperandLegal(MI, AMDGPU::getNamedOperandIdx(NewOpc,
2945 AMDGPU::OpName::src0), Src1))
2946 return BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc))
2947 .add(*Dst)
2948 .add(*Src1)
2949 .addImm(Imm)
2950 .add(*Src2);
2951 }
2952 }
2953
2954 unsigned NewOpc = IsFMA ? (IsF16 ? AMDGPU::V_FMA_F16 : AMDGPU::V_FMA_F32)
2955 : (IsF16 ? AMDGPU::V_MAD_F16 : AMDGPU::V_MAD_F32);
2956 if (pseudoToMCOpcode(NewOpc) == -1)
2957 return nullptr;
2958
2959 return BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc))
2960 .add(*Dst)
2961 .addImm(Src0Mods ? Src0Mods->getImm() : 0)
2962 .add(*Src0)
2963 .addImm(Src1Mods ? Src1Mods->getImm() : 0)
2964 .add(*Src1)
2965 .addImm(0) // Src mods
2966 .add(*Src2)
2967 .addImm(Clamp ? Clamp->getImm() : 0)
2968 .addImm(Omod ? Omod->getImm() : 0);
2969 }
2970
2971 // It's not generally safe to move VALU instructions across these since it will
2972 // start using the register as a base index rather than directly.
2973 // XXX - Why isn't hasSideEffects sufficient for these?
changesVGPRIndexingMode(const MachineInstr & MI)2974 static bool changesVGPRIndexingMode(const MachineInstr &MI) {
2975 switch (MI.getOpcode()) {
2976 case AMDGPU::S_SET_GPR_IDX_ON:
2977 case AMDGPU::S_SET_GPR_IDX_MODE:
2978 case AMDGPU::S_SET_GPR_IDX_OFF:
2979 return true;
2980 default:
2981 return false;
2982 }
2983 }
2984
isSchedulingBoundary(const MachineInstr & MI,const MachineBasicBlock * MBB,const MachineFunction & MF) const2985 bool SIInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
2986 const MachineBasicBlock *MBB,
2987 const MachineFunction &MF) const {
2988 // Skipping the check for SP writes in the base implementation. The reason it
2989 // was added was apparently due to compile time concerns.
2990 //
2991 // TODO: Do we really want this barrier? It triggers unnecessary hazard nops
2992 // but is probably avoidable.
2993
2994 // Copied from base implementation.
2995 // Terminators and labels can't be scheduled around.
2996 if (MI.isTerminator() || MI.isPosition())
2997 return true;
2998
2999 // INLINEASM_BR can jump to another block
3000 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
3001 return true;
3002
3003 // Target-independent instructions do not have an implicit-use of EXEC, even
3004 // when they operate on VGPRs. Treating EXEC modifications as scheduling
3005 // boundaries prevents incorrect movements of such instructions.
3006
3007 // TODO: Don't treat setreg with known constant that only changes MODE as
3008 // barrier.
3009 return MI.modifiesRegister(AMDGPU::EXEC, &RI) ||
3010 MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 ||
3011 MI.getOpcode() == AMDGPU::S_SETREG_B32 ||
3012 changesVGPRIndexingMode(MI);
3013 }
3014
isAlwaysGDS(uint16_t Opcode) const3015 bool SIInstrInfo::isAlwaysGDS(uint16_t Opcode) const {
3016 return Opcode == AMDGPU::DS_ORDERED_COUNT ||
3017 Opcode == AMDGPU::DS_GWS_INIT ||
3018 Opcode == AMDGPU::DS_GWS_SEMA_V ||
3019 Opcode == AMDGPU::DS_GWS_SEMA_BR ||
3020 Opcode == AMDGPU::DS_GWS_SEMA_P ||
3021 Opcode == AMDGPU::DS_GWS_SEMA_RELEASE_ALL ||
3022 Opcode == AMDGPU::DS_GWS_BARRIER;
3023 }
3024
modifiesModeRegister(const MachineInstr & MI)3025 bool SIInstrInfo::modifiesModeRegister(const MachineInstr &MI) {
3026 // Skip the full operand and register alias search modifiesRegister
3027 // does. There's only a handful of instructions that touch this, it's only an
3028 // implicit def, and doesn't alias any other registers.
3029 if (const MCPhysReg *ImpDef = MI.getDesc().getImplicitDefs()) {
3030 for (; ImpDef && *ImpDef; ++ImpDef) {
3031 if (*ImpDef == AMDGPU::MODE)
3032 return true;
3033 }
3034 }
3035
3036 return false;
3037 }
3038
hasUnwantedEffectsWhenEXECEmpty(const MachineInstr & MI) const3039 bool SIInstrInfo::hasUnwantedEffectsWhenEXECEmpty(const MachineInstr &MI) const {
3040 unsigned Opcode = MI.getOpcode();
3041
3042 if (MI.mayStore() && isSMRD(MI))
3043 return true; // scalar store or atomic
3044
3045 // This will terminate the function when other lanes may need to continue.
3046 if (MI.isReturn())
3047 return true;
3048
3049 // These instructions cause shader I/O that may cause hardware lockups
3050 // when executed with an empty EXEC mask.
3051 //
3052 // Note: exp with VM = DONE = 0 is automatically skipped by hardware when
3053 // EXEC = 0, but checking for that case here seems not worth it
3054 // given the typical code patterns.
3055 if (Opcode == AMDGPU::S_SENDMSG || Opcode == AMDGPU::S_SENDMSGHALT ||
3056 Opcode == AMDGPU::EXP || Opcode == AMDGPU::EXP_DONE ||
3057 Opcode == AMDGPU::DS_ORDERED_COUNT || Opcode == AMDGPU::S_TRAP ||
3058 Opcode == AMDGPU::DS_GWS_INIT || Opcode == AMDGPU::DS_GWS_BARRIER)
3059 return true;
3060
3061 if (MI.isCall() || MI.isInlineAsm())
3062 return true; // conservative assumption
3063
3064 // A mode change is a scalar operation that influences vector instructions.
3065 if (modifiesModeRegister(MI))
3066 return true;
3067
3068 // These are like SALU instructions in terms of effects, so it's questionable
3069 // whether we should return true for those.
3070 //
3071 // However, executing them with EXEC = 0 causes them to operate on undefined
3072 // data, which we avoid by returning true here.
3073 if (Opcode == AMDGPU::V_READFIRSTLANE_B32 || Opcode == AMDGPU::V_READLANE_B32)
3074 return true;
3075
3076 return false;
3077 }
3078
mayReadEXEC(const MachineRegisterInfo & MRI,const MachineInstr & MI) const3079 bool SIInstrInfo::mayReadEXEC(const MachineRegisterInfo &MRI,
3080 const MachineInstr &MI) const {
3081 if (MI.isMetaInstruction())
3082 return false;
3083
3084 // This won't read exec if this is an SGPR->SGPR copy.
3085 if (MI.isCopyLike()) {
3086 if (!RI.isSGPRReg(MRI, MI.getOperand(0).getReg()))
3087 return true;
3088
3089 // Make sure this isn't copying exec as a normal operand
3090 return MI.readsRegister(AMDGPU::EXEC, &RI);
3091 }
3092
3093 // Make a conservative assumption about the callee.
3094 if (MI.isCall())
3095 return true;
3096
3097 // Be conservative with any unhandled generic opcodes.
3098 if (!isTargetSpecificOpcode(MI.getOpcode()))
3099 return true;
3100
3101 return !isSALU(MI) || MI.readsRegister(AMDGPU::EXEC, &RI);
3102 }
3103
isInlineConstant(const APInt & Imm) const3104 bool SIInstrInfo::isInlineConstant(const APInt &Imm) const {
3105 switch (Imm.getBitWidth()) {
3106 case 1: // This likely will be a condition code mask.
3107 return true;
3108
3109 case 32:
3110 return AMDGPU::isInlinableLiteral32(Imm.getSExtValue(),
3111 ST.hasInv2PiInlineImm());
3112 case 64:
3113 return AMDGPU::isInlinableLiteral64(Imm.getSExtValue(),
3114 ST.hasInv2PiInlineImm());
3115 case 16:
3116 return ST.has16BitInsts() &&
3117 AMDGPU::isInlinableLiteral16(Imm.getSExtValue(),
3118 ST.hasInv2PiInlineImm());
3119 default:
3120 llvm_unreachable("invalid bitwidth");
3121 }
3122 }
3123
isInlineConstant(const MachineOperand & MO,uint8_t OperandType) const3124 bool SIInstrInfo::isInlineConstant(const MachineOperand &MO,
3125 uint8_t OperandType) const {
3126 if (!MO.isImm() ||
3127 OperandType < AMDGPU::OPERAND_SRC_FIRST ||
3128 OperandType > AMDGPU::OPERAND_SRC_LAST)
3129 return false;
3130
3131 // MachineOperand provides no way to tell the true operand size, since it only
3132 // records a 64-bit value. We need to know the size to determine if a 32-bit
3133 // floating point immediate bit pattern is legal for an integer immediate. It
3134 // would be for any 32-bit integer operand, but would not be for a 64-bit one.
3135
3136 int64_t Imm = MO.getImm();
3137 switch (OperandType) {
3138 case AMDGPU::OPERAND_REG_IMM_INT32:
3139 case AMDGPU::OPERAND_REG_IMM_FP32:
3140 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
3141 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
3142 case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
3143 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: {
3144 int32_t Trunc = static_cast<int32_t>(Imm);
3145 return AMDGPU::isInlinableLiteral32(Trunc, ST.hasInv2PiInlineImm());
3146 }
3147 case AMDGPU::OPERAND_REG_IMM_INT64:
3148 case AMDGPU::OPERAND_REG_IMM_FP64:
3149 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
3150 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
3151 return AMDGPU::isInlinableLiteral64(MO.getImm(),
3152 ST.hasInv2PiInlineImm());
3153 case AMDGPU::OPERAND_REG_IMM_INT16:
3154 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
3155 case AMDGPU::OPERAND_REG_INLINE_AC_INT16:
3156 // We would expect inline immediates to not be concerned with an integer/fp
3157 // distinction. However, in the case of 16-bit integer operations, the
3158 // "floating point" values appear to not work. It seems read the low 16-bits
3159 // of 32-bit immediates, which happens to always work for the integer
3160 // values.
3161 //
3162 // See llvm bugzilla 46302.
3163 //
3164 // TODO: Theoretically we could use op-sel to use the high bits of the
3165 // 32-bit FP values.
3166 return AMDGPU::isInlinableIntLiteral(Imm);
3167 case AMDGPU::OPERAND_REG_IMM_V2INT16:
3168 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
3169 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16:
3170 // This suffers the same problem as the scalar 16-bit cases.
3171 return AMDGPU::isInlinableIntLiteralV216(Imm);
3172 case AMDGPU::OPERAND_REG_IMM_FP16:
3173 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
3174 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: {
3175 if (isInt<16>(Imm) || isUInt<16>(Imm)) {
3176 // A few special case instructions have 16-bit operands on subtargets
3177 // where 16-bit instructions are not legal.
3178 // TODO: Do the 32-bit immediates work? We shouldn't really need to handle
3179 // constants in these cases
3180 int16_t Trunc = static_cast<int16_t>(Imm);
3181 return ST.has16BitInsts() &&
3182 AMDGPU::isInlinableLiteral16(Trunc, ST.hasInv2PiInlineImm());
3183 }
3184
3185 return false;
3186 }
3187 case AMDGPU::OPERAND_REG_IMM_V2FP16:
3188 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
3189 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: {
3190 uint32_t Trunc = static_cast<uint32_t>(Imm);
3191 return AMDGPU::isInlinableLiteralV216(Trunc, ST.hasInv2PiInlineImm());
3192 }
3193 default:
3194 llvm_unreachable("invalid bitwidth");
3195 }
3196 }
3197
isLiteralConstantLike(const MachineOperand & MO,const MCOperandInfo & OpInfo) const3198 bool SIInstrInfo::isLiteralConstantLike(const MachineOperand &MO,
3199 const MCOperandInfo &OpInfo) const {
3200 switch (MO.getType()) {
3201 case MachineOperand::MO_Register:
3202 return false;
3203 case MachineOperand::MO_Immediate:
3204 return !isInlineConstant(MO, OpInfo);
3205 case MachineOperand::MO_FrameIndex:
3206 case MachineOperand::MO_MachineBasicBlock:
3207 case MachineOperand::MO_ExternalSymbol:
3208 case MachineOperand::MO_GlobalAddress:
3209 case MachineOperand::MO_MCSymbol:
3210 return true;
3211 default:
3212 llvm_unreachable("unexpected operand type");
3213 }
3214 }
3215
compareMachineOp(const MachineOperand & Op0,const MachineOperand & Op1)3216 static bool compareMachineOp(const MachineOperand &Op0,
3217 const MachineOperand &Op1) {
3218 if (Op0.getType() != Op1.getType())
3219 return false;
3220
3221 switch (Op0.getType()) {
3222 case MachineOperand::MO_Register:
3223 return Op0.getReg() == Op1.getReg();
3224 case MachineOperand::MO_Immediate:
3225 return Op0.getImm() == Op1.getImm();
3226 default:
3227 llvm_unreachable("Didn't expect to be comparing these operand types");
3228 }
3229 }
3230
isImmOperandLegal(const MachineInstr & MI,unsigned OpNo,const MachineOperand & MO) const3231 bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo,
3232 const MachineOperand &MO) const {
3233 const MCInstrDesc &InstDesc = MI.getDesc();
3234 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo];
3235
3236 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal());
3237
3238 if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE)
3239 return true;
3240
3241 if (OpInfo.RegClass < 0)
3242 return false;
3243
3244 const MachineFunction *MF = MI.getParent()->getParent();
3245 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3246
3247 if (MO.isImm() && isInlineConstant(MO, OpInfo)) {
3248 if (isMAI(MI) && ST.hasMFMAInlineLiteralBug() &&
3249 OpNo ==(unsigned)AMDGPU::getNamedOperandIdx(MI.getOpcode(),
3250 AMDGPU::OpName::src2))
3251 return false;
3252 return RI.opCanUseInlineConstant(OpInfo.OperandType);
3253 }
3254
3255 if (!RI.opCanUseLiteralConstant(OpInfo.OperandType))
3256 return false;
3257
3258 if (!isVOP3(MI) || !AMDGPU::isSISrcOperand(InstDesc, OpNo))
3259 return true;
3260
3261 return ST.hasVOP3Literal();
3262 }
3263
hasVALU32BitEncoding(unsigned Opcode) const3264 bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const {
3265 int Op32 = AMDGPU::getVOPe32(Opcode);
3266 if (Op32 == -1)
3267 return false;
3268
3269 return pseudoToMCOpcode(Op32) != -1;
3270 }
3271
hasModifiers(unsigned Opcode) const3272 bool SIInstrInfo::hasModifiers(unsigned Opcode) const {
3273 // The src0_modifier operand is present on all instructions
3274 // that have modifiers.
3275
3276 return AMDGPU::getNamedOperandIdx(Opcode,
3277 AMDGPU::OpName::src0_modifiers) != -1;
3278 }
3279
hasModifiersSet(const MachineInstr & MI,unsigned OpName) const3280 bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI,
3281 unsigned OpName) const {
3282 const MachineOperand *Mods = getNamedOperand(MI, OpName);
3283 return Mods && Mods->getImm();
3284 }
3285
hasAnyModifiersSet(const MachineInstr & MI) const3286 bool SIInstrInfo::hasAnyModifiersSet(const MachineInstr &MI) const {
3287 return hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
3288 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
3289 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers) ||
3290 hasModifiersSet(MI, AMDGPU::OpName::clamp) ||
3291 hasModifiersSet(MI, AMDGPU::OpName::omod);
3292 }
3293
canShrink(const MachineInstr & MI,const MachineRegisterInfo & MRI) const3294 bool SIInstrInfo::canShrink(const MachineInstr &MI,
3295 const MachineRegisterInfo &MRI) const {
3296 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2);
3297 // Can't shrink instruction with three operands.
3298 // FIXME: v_cndmask_b32 has 3 operands and is shrinkable, but we need to add
3299 // a special case for it. It can only be shrunk if the third operand
3300 // is vcc, and src0_modifiers and src1_modifiers are not set.
3301 // We should handle this the same way we handle vopc, by addding
3302 // a register allocation hint pre-regalloc and then do the shrinking
3303 // post-regalloc.
3304 if (Src2) {
3305 switch (MI.getOpcode()) {
3306 default: return false;
3307
3308 case AMDGPU::V_ADDC_U32_e64:
3309 case AMDGPU::V_SUBB_U32_e64:
3310 case AMDGPU::V_SUBBREV_U32_e64: {
3311 const MachineOperand *Src1
3312 = getNamedOperand(MI, AMDGPU::OpName::src1);
3313 if (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg()))
3314 return false;
3315 // Additional verification is needed for sdst/src2.
3316 return true;
3317 }
3318 case AMDGPU::V_MAC_F32_e64:
3319 case AMDGPU::V_MAC_F16_e64:
3320 case AMDGPU::V_FMAC_F32_e64:
3321 case AMDGPU::V_FMAC_F16_e64:
3322 if (!Src2->isReg() || !RI.isVGPR(MRI, Src2->getReg()) ||
3323 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers))
3324 return false;
3325 break;
3326
3327 case AMDGPU::V_CNDMASK_B32_e64:
3328 break;
3329 }
3330 }
3331
3332 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1);
3333 if (Src1 && (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg()) ||
3334 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers)))
3335 return false;
3336
3337 // We don't need to check src0, all input types are legal, so just make sure
3338 // src0 isn't using any modifiers.
3339 if (hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers))
3340 return false;
3341
3342 // Can it be shrunk to a valid 32 bit opcode?
3343 if (!hasVALU32BitEncoding(MI.getOpcode()))
3344 return false;
3345
3346 // Check output modifiers
3347 return !hasModifiersSet(MI, AMDGPU::OpName::omod) &&
3348 !hasModifiersSet(MI, AMDGPU::OpName::clamp);
3349 }
3350
3351 // Set VCC operand with all flags from \p Orig, except for setting it as
3352 // implicit.
copyFlagsToImplicitVCC(MachineInstr & MI,const MachineOperand & Orig)3353 static void copyFlagsToImplicitVCC(MachineInstr &MI,
3354 const MachineOperand &Orig) {
3355
3356 for (MachineOperand &Use : MI.implicit_operands()) {
3357 if (Use.isUse() &&
3358 (Use.getReg() == AMDGPU::VCC || Use.getReg() == AMDGPU::VCC_LO)) {
3359 Use.setIsUndef(Orig.isUndef());
3360 Use.setIsKill(Orig.isKill());
3361 return;
3362 }
3363 }
3364 }
3365
buildShrunkInst(MachineInstr & MI,unsigned Op32) const3366 MachineInstr *SIInstrInfo::buildShrunkInst(MachineInstr &MI,
3367 unsigned Op32) const {
3368 MachineBasicBlock *MBB = MI.getParent();;
3369 MachineInstrBuilder Inst32 =
3370 BuildMI(*MBB, MI, MI.getDebugLoc(), get(Op32))
3371 .setMIFlags(MI.getFlags());
3372
3373 // Add the dst operand if the 32-bit encoding also has an explicit $vdst.
3374 // For VOPC instructions, this is replaced by an implicit def of vcc.
3375 int Op32DstIdx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::vdst);
3376 if (Op32DstIdx != -1) {
3377 // dst
3378 Inst32.add(MI.getOperand(0));
3379 } else {
3380 assert(((MI.getOperand(0).getReg() == AMDGPU::VCC) ||
3381 (MI.getOperand(0).getReg() == AMDGPU::VCC_LO)) &&
3382 "Unexpected case");
3383 }
3384
3385 Inst32.add(*getNamedOperand(MI, AMDGPU::OpName::src0));
3386
3387 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1);
3388 if (Src1)
3389 Inst32.add(*Src1);
3390
3391 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2);
3392
3393 if (Src2) {
3394 int Op32Src2Idx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2);
3395 if (Op32Src2Idx != -1) {
3396 Inst32.add(*Src2);
3397 } else {
3398 // In the case of V_CNDMASK_B32_e32, the explicit operand src2 is
3399 // replaced with an implicit read of vcc. This was already added
3400 // during the initial BuildMI, so find it to preserve the flags.
3401 copyFlagsToImplicitVCC(*Inst32, *Src2);
3402 }
3403 }
3404
3405 return Inst32;
3406 }
3407
usesConstantBus(const MachineRegisterInfo & MRI,const MachineOperand & MO,const MCOperandInfo & OpInfo) const3408 bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI,
3409 const MachineOperand &MO,
3410 const MCOperandInfo &OpInfo) const {
3411 // Literal constants use the constant bus.
3412 //if (isLiteralConstantLike(MO, OpInfo))
3413 // return true;
3414 if (MO.isImm())
3415 return !isInlineConstant(MO, OpInfo);
3416
3417 if (!MO.isReg())
3418 return true; // Misc other operands like FrameIndex
3419
3420 if (!MO.isUse())
3421 return false;
3422
3423 if (Register::isVirtualRegister(MO.getReg()))
3424 return RI.isSGPRClass(MRI.getRegClass(MO.getReg()));
3425
3426 // Null is free
3427 if (MO.getReg() == AMDGPU::SGPR_NULL)
3428 return false;
3429
3430 // SGPRs use the constant bus
3431 if (MO.isImplicit()) {
3432 return MO.getReg() == AMDGPU::M0 ||
3433 MO.getReg() == AMDGPU::VCC ||
3434 MO.getReg() == AMDGPU::VCC_LO;
3435 } else {
3436 return AMDGPU::SReg_32RegClass.contains(MO.getReg()) ||
3437 AMDGPU::SReg_64RegClass.contains(MO.getReg());
3438 }
3439 }
3440
findImplicitSGPRRead(const MachineInstr & MI)3441 static Register findImplicitSGPRRead(const MachineInstr &MI) {
3442 for (const MachineOperand &MO : MI.implicit_operands()) {
3443 // We only care about reads.
3444 if (MO.isDef())
3445 continue;
3446
3447 switch (MO.getReg()) {
3448 case AMDGPU::VCC:
3449 case AMDGPU::VCC_LO:
3450 case AMDGPU::VCC_HI:
3451 case AMDGPU::M0:
3452 case AMDGPU::FLAT_SCR:
3453 return MO.getReg();
3454
3455 default:
3456 break;
3457 }
3458 }
3459
3460 return AMDGPU::NoRegister;
3461 }
3462
shouldReadExec(const MachineInstr & MI)3463 static bool shouldReadExec(const MachineInstr &MI) {
3464 if (SIInstrInfo::isVALU(MI)) {
3465 switch (MI.getOpcode()) {
3466 case AMDGPU::V_READLANE_B32:
3467 case AMDGPU::V_READLANE_B32_gfx6_gfx7:
3468 case AMDGPU::V_READLANE_B32_gfx10:
3469 case AMDGPU::V_READLANE_B32_vi:
3470 case AMDGPU::V_WRITELANE_B32:
3471 case AMDGPU::V_WRITELANE_B32_gfx6_gfx7:
3472 case AMDGPU::V_WRITELANE_B32_gfx10:
3473 case AMDGPU::V_WRITELANE_B32_vi:
3474 return false;
3475 }
3476
3477 return true;
3478 }
3479
3480 if (MI.isPreISelOpcode() ||
3481 SIInstrInfo::isGenericOpcode(MI.getOpcode()) ||
3482 SIInstrInfo::isSALU(MI) ||
3483 SIInstrInfo::isSMRD(MI))
3484 return false;
3485
3486 return true;
3487 }
3488
isSubRegOf(const SIRegisterInfo & TRI,const MachineOperand & SuperVec,const MachineOperand & SubReg)3489 static bool isSubRegOf(const SIRegisterInfo &TRI,
3490 const MachineOperand &SuperVec,
3491 const MachineOperand &SubReg) {
3492 if (Register::isPhysicalRegister(SubReg.getReg()))
3493 return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg());
3494
3495 return SubReg.getSubReg() != AMDGPU::NoSubRegister &&
3496 SubReg.getReg() == SuperVec.getReg();
3497 }
3498
verifyInstruction(const MachineInstr & MI,StringRef & ErrInfo) const3499 bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
3500 StringRef &ErrInfo) const {
3501 uint16_t Opcode = MI.getOpcode();
3502 if (SIInstrInfo::isGenericOpcode(MI.getOpcode()))
3503 return true;
3504
3505 const MachineFunction *MF = MI.getParent()->getParent();
3506 const MachineRegisterInfo &MRI = MF->getRegInfo();
3507
3508 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
3509 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
3510 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
3511
3512 // Make sure the number of operands is correct.
3513 const MCInstrDesc &Desc = get(Opcode);
3514 if (!Desc.isVariadic() &&
3515 Desc.getNumOperands() != MI.getNumExplicitOperands()) {
3516 ErrInfo = "Instruction has wrong number of operands.";
3517 return false;
3518 }
3519
3520 if (MI.isInlineAsm()) {
3521 // Verify register classes for inlineasm constraints.
3522 for (unsigned I = InlineAsm::MIOp_FirstOperand, E = MI.getNumOperands();
3523 I != E; ++I) {
3524 const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI);
3525 if (!RC)
3526 continue;
3527
3528 const MachineOperand &Op = MI.getOperand(I);
3529 if (!Op.isReg())
3530 continue;
3531
3532 Register Reg = Op.getReg();
3533 if (!Register::isVirtualRegister(Reg) && !RC->contains(Reg)) {
3534 ErrInfo = "inlineasm operand has incorrect register class.";
3535 return false;
3536 }
3537 }
3538
3539 return true;
3540 }
3541
3542 if (isMIMG(MI) && MI.memoperands_empty() && MI.mayLoadOrStore()) {
3543 ErrInfo = "missing memory operand from MIMG instruction.";
3544 return false;
3545 }
3546
3547 // Make sure the register classes are correct.
3548 for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) {
3549 if (MI.getOperand(i).isFPImm()) {
3550 ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast "
3551 "all fp values to integers.";
3552 return false;
3553 }
3554
3555 int RegClass = Desc.OpInfo[i].RegClass;
3556
3557 switch (Desc.OpInfo[i].OperandType) {
3558 case MCOI::OPERAND_REGISTER:
3559 if (MI.getOperand(i).isImm() || MI.getOperand(i).isGlobal()) {
3560 ErrInfo = "Illegal immediate value for operand.";
3561 return false;
3562 }
3563 break;
3564 case AMDGPU::OPERAND_REG_IMM_INT32:
3565 case AMDGPU::OPERAND_REG_IMM_FP32:
3566 break;
3567 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
3568 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
3569 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
3570 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
3571 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
3572 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
3573 case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
3574 case AMDGPU::OPERAND_REG_INLINE_AC_FP32:
3575 case AMDGPU::OPERAND_REG_INLINE_AC_INT16:
3576 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: {
3577 const MachineOperand &MO = MI.getOperand(i);
3578 if (!MO.isReg() && (!MO.isImm() || !isInlineConstant(MI, i))) {
3579 ErrInfo = "Illegal immediate value for operand.";
3580 return false;
3581 }
3582 break;
3583 }
3584 case MCOI::OPERAND_IMMEDIATE:
3585 case AMDGPU::OPERAND_KIMM32:
3586 // Check if this operand is an immediate.
3587 // FrameIndex operands will be replaced by immediates, so they are
3588 // allowed.
3589 if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) {
3590 ErrInfo = "Expected immediate, but got non-immediate";
3591 return false;
3592 }
3593 LLVM_FALLTHROUGH;
3594 default:
3595 continue;
3596 }
3597
3598 if (!MI.getOperand(i).isReg())
3599 continue;
3600
3601 if (RegClass != -1) {
3602 Register Reg = MI.getOperand(i).getReg();
3603 if (Reg == AMDGPU::NoRegister || Register::isVirtualRegister(Reg))
3604 continue;
3605
3606 const TargetRegisterClass *RC = RI.getRegClass(RegClass);
3607 if (!RC->contains(Reg)) {
3608 ErrInfo = "Operand has incorrect register class.";
3609 return false;
3610 }
3611 }
3612 }
3613
3614 // Verify SDWA
3615 if (isSDWA(MI)) {
3616 if (!ST.hasSDWA()) {
3617 ErrInfo = "SDWA is not supported on this target";
3618 return false;
3619 }
3620
3621 int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst);
3622
3623 const int OpIndicies[] = { DstIdx, Src0Idx, Src1Idx, Src2Idx };
3624
3625 for (int OpIdx: OpIndicies) {
3626 if (OpIdx == -1)
3627 continue;
3628 const MachineOperand &MO = MI.getOperand(OpIdx);
3629
3630 if (!ST.hasSDWAScalar()) {
3631 // Only VGPRS on VI
3632 if (!MO.isReg() || !RI.hasVGPRs(RI.getRegClassForReg(MRI, MO.getReg()))) {
3633 ErrInfo = "Only VGPRs allowed as operands in SDWA instructions on VI";
3634 return false;
3635 }
3636 } else {
3637 // No immediates on GFX9
3638 if (!MO.isReg()) {
3639 ErrInfo = "Only reg allowed as operands in SDWA instructions on GFX9";
3640 return false;
3641 }
3642 }
3643 }
3644
3645 if (!ST.hasSDWAOmod()) {
3646 // No omod allowed on VI
3647 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod);
3648 if (OMod != nullptr &&
3649 (!OMod->isImm() || OMod->getImm() != 0)) {
3650 ErrInfo = "OMod not allowed in SDWA instructions on VI";
3651 return false;
3652 }
3653 }
3654
3655 uint16_t BasicOpcode = AMDGPU::getBasicFromSDWAOp(Opcode);
3656 if (isVOPC(BasicOpcode)) {
3657 if (!ST.hasSDWASdst() && DstIdx != -1) {
3658 // Only vcc allowed as dst on VI for VOPC
3659 const MachineOperand &Dst = MI.getOperand(DstIdx);
3660 if (!Dst.isReg() || Dst.getReg() != AMDGPU::VCC) {
3661 ErrInfo = "Only VCC allowed as dst in SDWA instructions on VI";
3662 return false;
3663 }
3664 } else if (!ST.hasSDWAOutModsVOPC()) {
3665 // No clamp allowed on GFX9 for VOPC
3666 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp);
3667 if (Clamp && (!Clamp->isImm() || Clamp->getImm() != 0)) {
3668 ErrInfo = "Clamp not allowed in VOPC SDWA instructions on VI";
3669 return false;
3670 }
3671
3672 // No omod allowed on GFX9 for VOPC
3673 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod);
3674 if (OMod && (!OMod->isImm() || OMod->getImm() != 0)) {
3675 ErrInfo = "OMod not allowed in VOPC SDWA instructions on VI";
3676 return false;
3677 }
3678 }
3679 }
3680
3681 const MachineOperand *DstUnused = getNamedOperand(MI, AMDGPU::OpName::dst_unused);
3682 if (DstUnused && DstUnused->isImm() &&
3683 DstUnused->getImm() == AMDGPU::SDWA::UNUSED_PRESERVE) {
3684 const MachineOperand &Dst = MI.getOperand(DstIdx);
3685 if (!Dst.isReg() || !Dst.isTied()) {
3686 ErrInfo = "Dst register should have tied register";
3687 return false;
3688 }
3689
3690 const MachineOperand &TiedMO =
3691 MI.getOperand(MI.findTiedOperandIdx(DstIdx));
3692 if (!TiedMO.isReg() || !TiedMO.isImplicit() || !TiedMO.isUse()) {
3693 ErrInfo =
3694 "Dst register should be tied to implicit use of preserved register";
3695 return false;
3696 } else if (Register::isPhysicalRegister(TiedMO.getReg()) &&
3697 Dst.getReg() != TiedMO.getReg()) {
3698 ErrInfo = "Dst register should use same physical register as preserved";
3699 return false;
3700 }
3701 }
3702 }
3703
3704 // Verify MIMG
3705 if (isMIMG(MI.getOpcode()) && !MI.mayStore()) {
3706 // Ensure that the return type used is large enough for all the options
3707 // being used TFE/LWE require an extra result register.
3708 const MachineOperand *DMask = getNamedOperand(MI, AMDGPU::OpName::dmask);
3709 if (DMask) {
3710 uint64_t DMaskImm = DMask->getImm();
3711 uint32_t RegCount =
3712 isGather4(MI.getOpcode()) ? 4 : countPopulation(DMaskImm);
3713 const MachineOperand *TFE = getNamedOperand(MI, AMDGPU::OpName::tfe);
3714 const MachineOperand *LWE = getNamedOperand(MI, AMDGPU::OpName::lwe);
3715 const MachineOperand *D16 = getNamedOperand(MI, AMDGPU::OpName::d16);
3716
3717 // Adjust for packed 16 bit values
3718 if (D16 && D16->getImm() && !ST.hasUnpackedD16VMem())
3719 RegCount >>= 1;
3720
3721 // Adjust if using LWE or TFE
3722 if ((LWE && LWE->getImm()) || (TFE && TFE->getImm()))
3723 RegCount += 1;
3724
3725 const uint32_t DstIdx =
3726 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata);
3727 const MachineOperand &Dst = MI.getOperand(DstIdx);
3728 if (Dst.isReg()) {
3729 const TargetRegisterClass *DstRC = getOpRegClass(MI, DstIdx);
3730 uint32_t DstSize = RI.getRegSizeInBits(*DstRC) / 32;
3731 if (RegCount > DstSize) {
3732 ErrInfo = "MIMG instruction returns too many registers for dst "
3733 "register class";
3734 return false;
3735 }
3736 }
3737 }
3738 }
3739
3740 // Verify VOP*. Ignore multiple sgpr operands on writelane.
3741 if (Desc.getOpcode() != AMDGPU::V_WRITELANE_B32
3742 && (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isVOPC(MI) || isSDWA(MI))) {
3743 // Only look at the true operands. Only a real operand can use the constant
3744 // bus, and we don't want to check pseudo-operands like the source modifier
3745 // flags.
3746 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx };
3747
3748 unsigned ConstantBusCount = 0;
3749 unsigned LiteralCount = 0;
3750
3751 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1)
3752 ++ConstantBusCount;
3753
3754 SmallVector<Register, 2> SGPRsUsed;
3755 Register SGPRUsed = findImplicitSGPRRead(MI);
3756 if (SGPRUsed != AMDGPU::NoRegister) {
3757 ++ConstantBusCount;
3758 SGPRsUsed.push_back(SGPRUsed);
3759 }
3760
3761 for (int OpIdx : OpIndices) {
3762 if (OpIdx == -1)
3763 break;
3764 const MachineOperand &MO = MI.getOperand(OpIdx);
3765 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) {
3766 if (MO.isReg()) {
3767 SGPRUsed = MO.getReg();
3768 if (llvm::all_of(SGPRsUsed, [this, SGPRUsed](unsigned SGPR) {
3769 return !RI.regsOverlap(SGPRUsed, SGPR);
3770 })) {
3771 ++ConstantBusCount;
3772 SGPRsUsed.push_back(SGPRUsed);
3773 }
3774 } else {
3775 ++ConstantBusCount;
3776 ++LiteralCount;
3777 }
3778 }
3779 }
3780 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
3781 // v_writelane_b32 is an exception from constant bus restriction:
3782 // vsrc0 can be sgpr, const or m0 and lane select sgpr, m0 or inline-const
3783 if (ConstantBusCount > ST.getConstantBusLimit(Opcode) &&
3784 Opcode != AMDGPU::V_WRITELANE_B32) {
3785 ErrInfo = "VOP* instruction violates constant bus restriction";
3786 return false;
3787 }
3788
3789 if (isVOP3(MI) && LiteralCount) {
3790 if (!ST.hasVOP3Literal()) {
3791 ErrInfo = "VOP3 instruction uses literal";
3792 return false;
3793 }
3794 if (LiteralCount > 1) {
3795 ErrInfo = "VOP3 instruction uses more than one literal";
3796 return false;
3797 }
3798 }
3799 }
3800
3801 // Special case for writelane - this can break the multiple constant bus rule,
3802 // but still can't use more than one SGPR register
3803 if (Desc.getOpcode() == AMDGPU::V_WRITELANE_B32) {
3804 unsigned SGPRCount = 0;
3805 Register SGPRUsed = AMDGPU::NoRegister;
3806
3807 for (int OpIdx : {Src0Idx, Src1Idx, Src2Idx}) {
3808 if (OpIdx == -1)
3809 break;
3810
3811 const MachineOperand &MO = MI.getOperand(OpIdx);
3812
3813 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) {
3814 if (MO.isReg() && MO.getReg() != AMDGPU::M0) {
3815 if (MO.getReg() != SGPRUsed)
3816 ++SGPRCount;
3817 SGPRUsed = MO.getReg();
3818 }
3819 }
3820 if (SGPRCount > ST.getConstantBusLimit(Opcode)) {
3821 ErrInfo = "WRITELANE instruction violates constant bus restriction";
3822 return false;
3823 }
3824 }
3825 }
3826
3827 // Verify misc. restrictions on specific instructions.
3828 if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32 ||
3829 Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64) {
3830 const MachineOperand &Src0 = MI.getOperand(Src0Idx);
3831 const MachineOperand &Src1 = MI.getOperand(Src1Idx);
3832 const MachineOperand &Src2 = MI.getOperand(Src2Idx);
3833 if (Src0.isReg() && Src1.isReg() && Src2.isReg()) {
3834 if (!compareMachineOp(Src0, Src1) &&
3835 !compareMachineOp(Src0, Src2)) {
3836 ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2";
3837 return false;
3838 }
3839 }
3840 }
3841
3842 if (isSOP2(MI) || isSOPC(MI)) {
3843 const MachineOperand &Src0 = MI.getOperand(Src0Idx);
3844 const MachineOperand &Src1 = MI.getOperand(Src1Idx);
3845 unsigned Immediates = 0;
3846
3847 if (!Src0.isReg() &&
3848 !isInlineConstant(Src0, Desc.OpInfo[Src0Idx].OperandType))
3849 Immediates++;
3850 if (!Src1.isReg() &&
3851 !isInlineConstant(Src1, Desc.OpInfo[Src1Idx].OperandType))
3852 Immediates++;
3853
3854 if (Immediates > 1) {
3855 ErrInfo = "SOP2/SOPC instruction requires too many immediate constants";
3856 return false;
3857 }
3858 }
3859
3860 if (isSOPK(MI)) {
3861 auto Op = getNamedOperand(MI, AMDGPU::OpName::simm16);
3862 if (Desc.isBranch()) {
3863 if (!Op->isMBB()) {
3864 ErrInfo = "invalid branch target for SOPK instruction";
3865 return false;
3866 }
3867 } else {
3868 uint64_t Imm = Op->getImm();
3869 if (sopkIsZext(MI)) {
3870 if (!isUInt<16>(Imm)) {
3871 ErrInfo = "invalid immediate for SOPK instruction";
3872 return false;
3873 }
3874 } else {
3875 if (!isInt<16>(Imm)) {
3876 ErrInfo = "invalid immediate for SOPK instruction";
3877 return false;
3878 }
3879 }
3880 }
3881 }
3882
3883 if (Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e32 ||
3884 Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e64 ||
3885 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 ||
3886 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64) {
3887 const bool IsDst = Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 ||
3888 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64;
3889
3890 const unsigned StaticNumOps = Desc.getNumOperands() +
3891 Desc.getNumImplicitUses();
3892 const unsigned NumImplicitOps = IsDst ? 2 : 1;
3893
3894 // Allow additional implicit operands. This allows a fixup done by the post
3895 // RA scheduler where the main implicit operand is killed and implicit-defs
3896 // are added for sub-registers that remain live after this instruction.
3897 if (MI.getNumOperands() < StaticNumOps + NumImplicitOps) {
3898 ErrInfo = "missing implicit register operands";
3899 return false;
3900 }
3901
3902 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
3903 if (IsDst) {
3904 if (!Dst->isUse()) {
3905 ErrInfo = "v_movreld_b32 vdst should be a use operand";
3906 return false;
3907 }
3908
3909 unsigned UseOpIdx;
3910 if (!MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) ||
3911 UseOpIdx != StaticNumOps + 1) {
3912 ErrInfo = "movrel implicit operands should be tied";
3913 return false;
3914 }
3915 }
3916
3917 const MachineOperand &Src0 = MI.getOperand(Src0Idx);
3918 const MachineOperand &ImpUse
3919 = MI.getOperand(StaticNumOps + NumImplicitOps - 1);
3920 if (!ImpUse.isReg() || !ImpUse.isUse() ||
3921 !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) {
3922 ErrInfo = "src0 should be subreg of implicit vector use";
3923 return false;
3924 }
3925 }
3926
3927 // Make sure we aren't losing exec uses in the td files. This mostly requires
3928 // being careful when using let Uses to try to add other use registers.
3929 if (shouldReadExec(MI)) {
3930 if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) {
3931 ErrInfo = "VALU instruction does not implicitly read exec mask";
3932 return false;
3933 }
3934 }
3935
3936 if (isSMRD(MI)) {
3937 if (MI.mayStore()) {
3938 // The register offset form of scalar stores may only use m0 as the
3939 // soffset register.
3940 const MachineOperand *Soff = getNamedOperand(MI, AMDGPU::OpName::soff);
3941 if (Soff && Soff->getReg() != AMDGPU::M0) {
3942 ErrInfo = "scalar stores must use m0 as offset register";
3943 return false;
3944 }
3945 }
3946 }
3947
3948 if (isFLAT(MI) && !MF->getSubtarget<GCNSubtarget>().hasFlatInstOffsets()) {
3949 const MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset);
3950 if (Offset->getImm() != 0) {
3951 ErrInfo = "subtarget does not support offsets in flat instructions";
3952 return false;
3953 }
3954 }
3955
3956 if (isMIMG(MI)) {
3957 const MachineOperand *DimOp = getNamedOperand(MI, AMDGPU::OpName::dim);
3958 if (DimOp) {
3959 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opcode,
3960 AMDGPU::OpName::vaddr0);
3961 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::srsrc);
3962 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opcode);
3963 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
3964 AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode);
3965 const AMDGPU::MIMGDimInfo *Dim =
3966 AMDGPU::getMIMGDimInfoByEncoding(DimOp->getImm());
3967
3968 if (!Dim) {
3969 ErrInfo = "dim is out of range";
3970 return false;
3971 }
3972
3973 bool IsA16 = false;
3974 if (ST.hasR128A16()) {
3975 const MachineOperand *R128A16 = getNamedOperand(MI, AMDGPU::OpName::r128);
3976 IsA16 = R128A16->getImm() != 0;
3977 } else if (ST.hasGFX10A16()) {
3978 const MachineOperand *A16 = getNamedOperand(MI, AMDGPU::OpName::a16);
3979 IsA16 = A16->getImm() != 0;
3980 }
3981
3982 bool PackDerivatives = IsA16 || BaseOpcode->G16;
3983 bool IsNSA = SRsrcIdx - VAddr0Idx > 1;
3984
3985 unsigned AddrWords = BaseOpcode->NumExtraArgs;
3986 unsigned AddrComponents = (BaseOpcode->Coordinates ? Dim->NumCoords : 0) +
3987 (BaseOpcode->LodOrClampOrMip ? 1 : 0);
3988 if (IsA16)
3989 AddrWords += (AddrComponents + 1) / 2;
3990 else
3991 AddrWords += AddrComponents;
3992
3993 if (BaseOpcode->Gradients) {
3994 if (PackDerivatives)
3995 // There are two gradients per coordinate, we pack them separately.
3996 // For the 3d case, we get (dy/du, dx/du) (-, dz/du) (dy/dv, dx/dv) (-, dz/dv)
3997 AddrWords += (Dim->NumGradients / 2 + 1) / 2 * 2;
3998 else
3999 AddrWords += Dim->NumGradients;
4000 }
4001
4002 unsigned VAddrWords;
4003 if (IsNSA) {
4004 VAddrWords = SRsrcIdx - VAddr0Idx;
4005 } else {
4006 const TargetRegisterClass *RC = getOpRegClass(MI, VAddr0Idx);
4007 VAddrWords = MRI.getTargetRegisterInfo()->getRegSizeInBits(*RC) / 32;
4008 if (AddrWords > 8)
4009 AddrWords = 16;
4010 else if (AddrWords > 4)
4011 AddrWords = 8;
4012 else if (AddrWords == 4)
4013 AddrWords = 4;
4014 else if (AddrWords == 3)
4015 AddrWords = 3;
4016 }
4017
4018 if (VAddrWords != AddrWords) {
4019 LLVM_DEBUG(dbgs() << "bad vaddr size, expected " << AddrWords
4020 << " but got " << VAddrWords << "\n");
4021 ErrInfo = "bad vaddr size";
4022 return false;
4023 }
4024 }
4025 }
4026
4027 const MachineOperand *DppCt = getNamedOperand(MI, AMDGPU::OpName::dpp_ctrl);
4028 if (DppCt) {
4029 using namespace AMDGPU::DPP;
4030
4031 unsigned DC = DppCt->getImm();
4032 if (DC == DppCtrl::DPP_UNUSED1 || DC == DppCtrl::DPP_UNUSED2 ||
4033 DC == DppCtrl::DPP_UNUSED3 || DC > DppCtrl::DPP_LAST ||
4034 (DC >= DppCtrl::DPP_UNUSED4_FIRST && DC <= DppCtrl::DPP_UNUSED4_LAST) ||
4035 (DC >= DppCtrl::DPP_UNUSED5_FIRST && DC <= DppCtrl::DPP_UNUSED5_LAST) ||
4036 (DC >= DppCtrl::DPP_UNUSED6_FIRST && DC <= DppCtrl::DPP_UNUSED6_LAST) ||
4037 (DC >= DppCtrl::DPP_UNUSED7_FIRST && DC <= DppCtrl::DPP_UNUSED7_LAST) ||
4038 (DC >= DppCtrl::DPP_UNUSED8_FIRST && DC <= DppCtrl::DPP_UNUSED8_LAST)) {
4039 ErrInfo = "Invalid dpp_ctrl value";
4040 return false;
4041 }
4042 if (DC >= DppCtrl::WAVE_SHL1 && DC <= DppCtrl::WAVE_ROR1 &&
4043 ST.getGeneration() >= AMDGPUSubtarget::GFX10) {
4044 ErrInfo = "Invalid dpp_ctrl value: "
4045 "wavefront shifts are not supported on GFX10+";
4046 return false;
4047 }
4048 if (DC >= DppCtrl::BCAST15 && DC <= DppCtrl::BCAST31 &&
4049 ST.getGeneration() >= AMDGPUSubtarget::GFX10) {
4050 ErrInfo = "Invalid dpp_ctrl value: "
4051 "broadcasts are not supported on GFX10+";
4052 return false;
4053 }
4054 if (DC >= DppCtrl::ROW_SHARE_FIRST && DC <= DppCtrl::ROW_XMASK_LAST &&
4055 ST.getGeneration() < AMDGPUSubtarget::GFX10) {
4056 ErrInfo = "Invalid dpp_ctrl value: "
4057 "row_share and row_xmask are not supported before GFX10";
4058 return false;
4059 }
4060 }
4061
4062 return true;
4063 }
4064
getVALUOp(const MachineInstr & MI) const4065 unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) const {
4066 switch (MI.getOpcode()) {
4067 default: return AMDGPU::INSTRUCTION_LIST_END;
4068 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE;
4069 case AMDGPU::COPY: return AMDGPU::COPY;
4070 case AMDGPU::PHI: return AMDGPU::PHI;
4071 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG;
4072 case AMDGPU::WQM: return AMDGPU::WQM;
4073 case AMDGPU::SOFT_WQM: return AMDGPU::SOFT_WQM;
4074 case AMDGPU::WWM: return AMDGPU::WWM;
4075 case AMDGPU::S_MOV_B32: {
4076 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
4077 return MI.getOperand(1).isReg() ||
4078 RI.isAGPR(MRI, MI.getOperand(0).getReg()) ?
4079 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32;
4080 }
4081 case AMDGPU::S_ADD_I32:
4082 return ST.hasAddNoCarry() ? AMDGPU::V_ADD_U32_e64 : AMDGPU::V_ADD_I32_e32;
4083 case AMDGPU::S_ADDC_U32:
4084 return AMDGPU::V_ADDC_U32_e32;
4085 case AMDGPU::S_SUB_I32:
4086 return ST.hasAddNoCarry() ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_SUB_I32_e32;
4087 // FIXME: These are not consistently handled, and selected when the carry is
4088 // used.
4089 case AMDGPU::S_ADD_U32:
4090 return AMDGPU::V_ADD_I32_e32;
4091 case AMDGPU::S_SUB_U32:
4092 return AMDGPU::V_SUB_I32_e32;
4093 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32;
4094 case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_U32;
4095 case AMDGPU::S_MUL_HI_U32: return AMDGPU::V_MUL_HI_U32;
4096 case AMDGPU::S_MUL_HI_I32: return AMDGPU::V_MUL_HI_I32;
4097 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e64;
4098 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e64;
4099 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e64;
4100 case AMDGPU::S_XNOR_B32:
4101 return ST.hasDLInsts() ? AMDGPU::V_XNOR_B32_e64 : AMDGPU::INSTRUCTION_LIST_END;
4102 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e64;
4103 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e64;
4104 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e64;
4105 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e64;
4106 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32;
4107 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64;
4108 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32;
4109 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64;
4110 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32;
4111 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64;
4112 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32;
4113 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32;
4114 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32;
4115 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32;
4116 case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64;
4117 case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32;
4118 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32;
4119 case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32;
4120 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32;
4121 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32;
4122 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32;
4123 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32;
4124 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32;
4125 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32;
4126 case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e32;
4127 case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e32;
4128 case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e32;
4129 case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e32;
4130 case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e32;
4131 case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e32;
4132 case AMDGPU::S_CMP_EQ_U64: return AMDGPU::V_CMP_EQ_U64_e32;
4133 case AMDGPU::S_CMP_LG_U64: return AMDGPU::V_CMP_NE_U64_e32;
4134 case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64;
4135 case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32;
4136 case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32;
4137 case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64;
4138 case AMDGPU::S_CBRANCH_SCC0: return AMDGPU::S_CBRANCH_VCCZ;
4139 case AMDGPU::S_CBRANCH_SCC1: return AMDGPU::S_CBRANCH_VCCNZ;
4140 }
4141 llvm_unreachable(
4142 "Unexpected scalar opcode without corresponding vector one!");
4143 }
4144
getOpRegClass(const MachineInstr & MI,unsigned OpNo) const4145 const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI,
4146 unsigned OpNo) const {
4147 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
4148 const MCInstrDesc &Desc = get(MI.getOpcode());
4149 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() ||
4150 Desc.OpInfo[OpNo].RegClass == -1) {
4151 Register Reg = MI.getOperand(OpNo).getReg();
4152
4153 if (Register::isVirtualRegister(Reg))
4154 return MRI.getRegClass(Reg);
4155 return RI.getPhysRegClass(Reg);
4156 }
4157
4158 unsigned RCID = Desc.OpInfo[OpNo].RegClass;
4159 return RI.getRegClass(RCID);
4160 }
4161
legalizeOpWithMove(MachineInstr & MI,unsigned OpIdx) const4162 void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const {
4163 MachineBasicBlock::iterator I = MI;
4164 MachineBasicBlock *MBB = MI.getParent();
4165 MachineOperand &MO = MI.getOperand(OpIdx);
4166 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
4167 const SIRegisterInfo *TRI =
4168 static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo());
4169 unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass;
4170 const TargetRegisterClass *RC = RI.getRegClass(RCID);
4171 unsigned Size = TRI->getRegSizeInBits(*RC);
4172 unsigned Opcode = (Size == 64) ? AMDGPU::V_MOV_B64_PSEUDO : AMDGPU::V_MOV_B32_e32;
4173 if (MO.isReg())
4174 Opcode = AMDGPU::COPY;
4175 else if (RI.isSGPRClass(RC))
4176 Opcode = (Size == 64) ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
4177
4178 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC);
4179 if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC))
4180 VRC = &AMDGPU::VReg_64RegClass;
4181 else
4182 VRC = &AMDGPU::VGPR_32RegClass;
4183
4184 Register Reg = MRI.createVirtualRegister(VRC);
4185 DebugLoc DL = MBB->findDebugLoc(I);
4186 BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).add(MO);
4187 MO.ChangeToRegister(Reg, false);
4188 }
4189
buildExtractSubReg(MachineBasicBlock::iterator MI,MachineRegisterInfo & MRI,MachineOperand & SuperReg,const TargetRegisterClass * SuperRC,unsigned SubIdx,const TargetRegisterClass * SubRC) const4190 unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI,
4191 MachineRegisterInfo &MRI,
4192 MachineOperand &SuperReg,
4193 const TargetRegisterClass *SuperRC,
4194 unsigned SubIdx,
4195 const TargetRegisterClass *SubRC)
4196 const {
4197 MachineBasicBlock *MBB = MI->getParent();
4198 DebugLoc DL = MI->getDebugLoc();
4199 Register SubReg = MRI.createVirtualRegister(SubRC);
4200
4201 if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) {
4202 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg)
4203 .addReg(SuperReg.getReg(), 0, SubIdx);
4204 return SubReg;
4205 }
4206
4207 // Just in case the super register is itself a sub-register, copy it to a new
4208 // value so we don't need to worry about merging its subreg index with the
4209 // SubIdx passed to this function. The register coalescer should be able to
4210 // eliminate this extra copy.
4211 Register NewSuperReg = MRI.createVirtualRegister(SuperRC);
4212
4213 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg)
4214 .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg());
4215
4216 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg)
4217 .addReg(NewSuperReg, 0, SubIdx);
4218
4219 return SubReg;
4220 }
4221
buildExtractSubRegOrImm(MachineBasicBlock::iterator MII,MachineRegisterInfo & MRI,MachineOperand & Op,const TargetRegisterClass * SuperRC,unsigned SubIdx,const TargetRegisterClass * SubRC) const4222 MachineOperand SIInstrInfo::buildExtractSubRegOrImm(
4223 MachineBasicBlock::iterator MII,
4224 MachineRegisterInfo &MRI,
4225 MachineOperand &Op,
4226 const TargetRegisterClass *SuperRC,
4227 unsigned SubIdx,
4228 const TargetRegisterClass *SubRC) const {
4229 if (Op.isImm()) {
4230 if (SubIdx == AMDGPU::sub0)
4231 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm()));
4232 if (SubIdx == AMDGPU::sub1)
4233 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm() >> 32));
4234
4235 llvm_unreachable("Unhandled register index for immediate");
4236 }
4237
4238 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC,
4239 SubIdx, SubRC);
4240 return MachineOperand::CreateReg(SubReg, false);
4241 }
4242
4243 // Change the order of operands from (0, 1, 2) to (0, 2, 1)
swapOperands(MachineInstr & Inst) const4244 void SIInstrInfo::swapOperands(MachineInstr &Inst) const {
4245 assert(Inst.getNumExplicitOperands() == 3);
4246 MachineOperand Op1 = Inst.getOperand(1);
4247 Inst.RemoveOperand(1);
4248 Inst.addOperand(Op1);
4249 }
4250
isLegalRegOperand(const MachineRegisterInfo & MRI,const MCOperandInfo & OpInfo,const MachineOperand & MO) const4251 bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI,
4252 const MCOperandInfo &OpInfo,
4253 const MachineOperand &MO) const {
4254 if (!MO.isReg())
4255 return false;
4256
4257 Register Reg = MO.getReg();
4258 const TargetRegisterClass *RC = Register::isVirtualRegister(Reg)
4259 ? MRI.getRegClass(Reg)
4260 : RI.getPhysRegClass(Reg);
4261
4262 const TargetRegisterClass *DRC = RI.getRegClass(OpInfo.RegClass);
4263 if (MO.getSubReg()) {
4264 const MachineFunction *MF = MO.getParent()->getParent()->getParent();
4265 const TargetRegisterClass *SuperRC = RI.getLargestLegalSuperClass(RC, *MF);
4266 if (!SuperRC)
4267 return false;
4268
4269 DRC = RI.getMatchingSuperRegClass(SuperRC, DRC, MO.getSubReg());
4270 if (!DRC)
4271 return false;
4272 }
4273 return RC->hasSuperClassEq(DRC);
4274 }
4275
isLegalVSrcOperand(const MachineRegisterInfo & MRI,const MCOperandInfo & OpInfo,const MachineOperand & MO) const4276 bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI,
4277 const MCOperandInfo &OpInfo,
4278 const MachineOperand &MO) const {
4279 if (MO.isReg())
4280 return isLegalRegOperand(MRI, OpInfo, MO);
4281
4282 // Handle non-register types that are treated like immediates.
4283 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal());
4284 return true;
4285 }
4286
isOperandLegal(const MachineInstr & MI,unsigned OpIdx,const MachineOperand * MO) const4287 bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx,
4288 const MachineOperand *MO) const {
4289 const MachineFunction &MF = *MI.getParent()->getParent();
4290 const MachineRegisterInfo &MRI = MF.getRegInfo();
4291 const MCInstrDesc &InstDesc = MI.getDesc();
4292 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx];
4293 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
4294 const TargetRegisterClass *DefinedRC =
4295 OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr;
4296 if (!MO)
4297 MO = &MI.getOperand(OpIdx);
4298
4299 int ConstantBusLimit = ST.getConstantBusLimit(MI.getOpcode());
4300 int VOP3LiteralLimit = ST.hasVOP3Literal() ? 1 : 0;
4301 if (isVALU(MI) && usesConstantBus(MRI, *MO, OpInfo)) {
4302 if (isVOP3(MI) && isLiteralConstantLike(*MO, OpInfo) && !VOP3LiteralLimit--)
4303 return false;
4304
4305 SmallDenseSet<RegSubRegPair> SGPRsUsed;
4306 if (MO->isReg())
4307 SGPRsUsed.insert(RegSubRegPair(MO->getReg(), MO->getSubReg()));
4308
4309 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
4310 if (i == OpIdx)
4311 continue;
4312 const MachineOperand &Op = MI.getOperand(i);
4313 if (Op.isReg()) {
4314 RegSubRegPair SGPR(Op.getReg(), Op.getSubReg());
4315 if (!SGPRsUsed.count(SGPR) &&
4316 usesConstantBus(MRI, Op, InstDesc.OpInfo[i])) {
4317 if (--ConstantBusLimit <= 0)
4318 return false;
4319 SGPRsUsed.insert(SGPR);
4320 }
4321 } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) {
4322 if (--ConstantBusLimit <= 0)
4323 return false;
4324 } else if (isVOP3(MI) && AMDGPU::isSISrcOperand(InstDesc, i) &&
4325 isLiteralConstantLike(Op, InstDesc.OpInfo[i])) {
4326 if (!VOP3LiteralLimit--)
4327 return false;
4328 if (--ConstantBusLimit <= 0)
4329 return false;
4330 }
4331 }
4332 }
4333
4334 if (MO->isReg()) {
4335 assert(DefinedRC);
4336 return isLegalRegOperand(MRI, OpInfo, *MO);
4337 }
4338
4339 // Handle non-register types that are treated like immediates.
4340 assert(MO->isImm() || MO->isTargetIndex() || MO->isFI() || MO->isGlobal());
4341
4342 if (!DefinedRC) {
4343 // This operand expects an immediate.
4344 return true;
4345 }
4346
4347 return isImmOperandLegal(MI, OpIdx, *MO);
4348 }
4349
legalizeOperandsVOP2(MachineRegisterInfo & MRI,MachineInstr & MI) const4350 void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI,
4351 MachineInstr &MI) const {
4352 unsigned Opc = MI.getOpcode();
4353 const MCInstrDesc &InstrDesc = get(Opc);
4354
4355 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
4356 MachineOperand &Src0 = MI.getOperand(Src0Idx);
4357
4358 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
4359 MachineOperand &Src1 = MI.getOperand(Src1Idx);
4360
4361 // If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32
4362 // we need to only have one constant bus use before GFX10.
4363 bool HasImplicitSGPR = findImplicitSGPRRead(MI) != AMDGPU::NoRegister;
4364 if (HasImplicitSGPR && ST.getConstantBusLimit(Opc) <= 1 &&
4365 Src0.isReg() && (RI.isSGPRReg(MRI, Src0.getReg()) ||
4366 isLiteralConstantLike(Src0, InstrDesc.OpInfo[Src0Idx])))
4367 legalizeOpWithMove(MI, Src0Idx);
4368
4369 // Special case: V_WRITELANE_B32 accepts only immediate or SGPR operands for
4370 // both the value to write (src0) and lane select (src1). Fix up non-SGPR
4371 // src0/src1 with V_READFIRSTLANE.
4372 if (Opc == AMDGPU::V_WRITELANE_B32) {
4373 const DebugLoc &DL = MI.getDebugLoc();
4374 if (Src0.isReg() && RI.isVGPR(MRI, Src0.getReg())) {
4375 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
4376 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
4377 .add(Src0);
4378 Src0.ChangeToRegister(Reg, false);
4379 }
4380 if (Src1.isReg() && RI.isVGPR(MRI, Src1.getReg())) {
4381 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
4382 const DebugLoc &DL = MI.getDebugLoc();
4383 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
4384 .add(Src1);
4385 Src1.ChangeToRegister(Reg, false);
4386 }
4387 return;
4388 }
4389
4390 // No VOP2 instructions support AGPRs.
4391 if (Src0.isReg() && RI.isAGPR(MRI, Src0.getReg()))
4392 legalizeOpWithMove(MI, Src0Idx);
4393
4394 if (Src1.isReg() && RI.isAGPR(MRI, Src1.getReg()))
4395 legalizeOpWithMove(MI, Src1Idx);
4396
4397 // VOP2 src0 instructions support all operand types, so we don't need to check
4398 // their legality. If src1 is already legal, we don't need to do anything.
4399 if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1))
4400 return;
4401
4402 // Special case: V_READLANE_B32 accepts only immediate or SGPR operands for
4403 // lane select. Fix up using V_READFIRSTLANE, since we assume that the lane
4404 // select is uniform.
4405 if (Opc == AMDGPU::V_READLANE_B32 && Src1.isReg() &&
4406 RI.isVGPR(MRI, Src1.getReg())) {
4407 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
4408 const DebugLoc &DL = MI.getDebugLoc();
4409 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
4410 .add(Src1);
4411 Src1.ChangeToRegister(Reg, false);
4412 return;
4413 }
4414
4415 // We do not use commuteInstruction here because it is too aggressive and will
4416 // commute if it is possible. We only want to commute here if it improves
4417 // legality. This can be called a fairly large number of times so don't waste
4418 // compile time pointlessly swapping and checking legality again.
4419 if (HasImplicitSGPR || !MI.isCommutable()) {
4420 legalizeOpWithMove(MI, Src1Idx);
4421 return;
4422 }
4423
4424 // If src0 can be used as src1, commuting will make the operands legal.
4425 // Otherwise we have to give up and insert a move.
4426 //
4427 // TODO: Other immediate-like operand kinds could be commuted if there was a
4428 // MachineOperand::ChangeTo* for them.
4429 if ((!Src1.isImm() && !Src1.isReg()) ||
4430 !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) {
4431 legalizeOpWithMove(MI, Src1Idx);
4432 return;
4433 }
4434
4435 int CommutedOpc = commuteOpcode(MI);
4436 if (CommutedOpc == -1) {
4437 legalizeOpWithMove(MI, Src1Idx);
4438 return;
4439 }
4440
4441 MI.setDesc(get(CommutedOpc));
4442
4443 Register Src0Reg = Src0.getReg();
4444 unsigned Src0SubReg = Src0.getSubReg();
4445 bool Src0Kill = Src0.isKill();
4446
4447 if (Src1.isImm())
4448 Src0.ChangeToImmediate(Src1.getImm());
4449 else if (Src1.isReg()) {
4450 Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill());
4451 Src0.setSubReg(Src1.getSubReg());
4452 } else
4453 llvm_unreachable("Should only have register or immediate operands");
4454
4455 Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill);
4456 Src1.setSubReg(Src0SubReg);
4457 fixImplicitOperands(MI);
4458 }
4459
4460 // Legalize VOP3 operands. All operand types are supported for any operand
4461 // but only one literal constant and only starting from GFX10.
legalizeOperandsVOP3(MachineRegisterInfo & MRI,MachineInstr & MI) const4462 void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI,
4463 MachineInstr &MI) const {
4464 unsigned Opc = MI.getOpcode();
4465
4466 int VOP3Idx[3] = {
4467 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0),
4468 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1),
4469 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)
4470 };
4471
4472 if (Opc == AMDGPU::V_PERMLANE16_B32 ||
4473 Opc == AMDGPU::V_PERMLANEX16_B32) {
4474 // src1 and src2 must be scalar
4475 MachineOperand &Src1 = MI.getOperand(VOP3Idx[1]);
4476 MachineOperand &Src2 = MI.getOperand(VOP3Idx[2]);
4477 const DebugLoc &DL = MI.getDebugLoc();
4478 if (Src1.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src1.getReg()))) {
4479 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
4480 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
4481 .add(Src1);
4482 Src1.ChangeToRegister(Reg, false);
4483 }
4484 if (Src2.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src2.getReg()))) {
4485 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
4486 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
4487 .add(Src2);
4488 Src2.ChangeToRegister(Reg, false);
4489 }
4490 }
4491
4492 // Find the one SGPR operand we are allowed to use.
4493 int ConstantBusLimit = ST.getConstantBusLimit(Opc);
4494 int LiteralLimit = ST.hasVOP3Literal() ? 1 : 0;
4495 SmallDenseSet<unsigned> SGPRsUsed;
4496 unsigned SGPRReg = findUsedSGPR(MI, VOP3Idx);
4497 if (SGPRReg != AMDGPU::NoRegister) {
4498 SGPRsUsed.insert(SGPRReg);
4499 --ConstantBusLimit;
4500 }
4501
4502 for (unsigned i = 0; i < 3; ++i) {
4503 int Idx = VOP3Idx[i];
4504 if (Idx == -1)
4505 break;
4506 MachineOperand &MO = MI.getOperand(Idx);
4507
4508 if (!MO.isReg()) {
4509 if (!isLiteralConstantLike(MO, get(Opc).OpInfo[Idx]))
4510 continue;
4511
4512 if (LiteralLimit > 0 && ConstantBusLimit > 0) {
4513 --LiteralLimit;
4514 --ConstantBusLimit;
4515 continue;
4516 }
4517
4518 --LiteralLimit;
4519 --ConstantBusLimit;
4520 legalizeOpWithMove(MI, Idx);
4521 continue;
4522 }
4523
4524 if (RI.hasAGPRs(MRI.getRegClass(MO.getReg())) &&
4525 !isOperandLegal(MI, Idx, &MO)) {
4526 legalizeOpWithMove(MI, Idx);
4527 continue;
4528 }
4529
4530 if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg())))
4531 continue; // VGPRs are legal
4532
4533 // We can use one SGPR in each VOP3 instruction prior to GFX10
4534 // and two starting from GFX10.
4535 if (SGPRsUsed.count(MO.getReg()))
4536 continue;
4537 if (ConstantBusLimit > 0) {
4538 SGPRsUsed.insert(MO.getReg());
4539 --ConstantBusLimit;
4540 continue;
4541 }
4542
4543 // If we make it this far, then the operand is not legal and we must
4544 // legalize it.
4545 legalizeOpWithMove(MI, Idx);
4546 }
4547 }
4548
readlaneVGPRToSGPR(Register SrcReg,MachineInstr & UseMI,MachineRegisterInfo & MRI) const4549 Register SIInstrInfo::readlaneVGPRToSGPR(Register SrcReg, MachineInstr &UseMI,
4550 MachineRegisterInfo &MRI) const {
4551 const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg);
4552 const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC);
4553 Register DstReg = MRI.createVirtualRegister(SRC);
4554 unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32;
4555
4556 if (RI.hasAGPRs(VRC)) {
4557 VRC = RI.getEquivalentVGPRClass(VRC);
4558 Register NewSrcReg = MRI.createVirtualRegister(VRC);
4559 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
4560 get(TargetOpcode::COPY), NewSrcReg)
4561 .addReg(SrcReg);
4562 SrcReg = NewSrcReg;
4563 }
4564
4565 if (SubRegs == 1) {
4566 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
4567 get(AMDGPU::V_READFIRSTLANE_B32), DstReg)
4568 .addReg(SrcReg);
4569 return DstReg;
4570 }
4571
4572 SmallVector<unsigned, 8> SRegs;
4573 for (unsigned i = 0; i < SubRegs; ++i) {
4574 Register SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
4575 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
4576 get(AMDGPU::V_READFIRSTLANE_B32), SGPR)
4577 .addReg(SrcReg, 0, RI.getSubRegFromChannel(i));
4578 SRegs.push_back(SGPR);
4579 }
4580
4581 MachineInstrBuilder MIB =
4582 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
4583 get(AMDGPU::REG_SEQUENCE), DstReg);
4584 for (unsigned i = 0; i < SubRegs; ++i) {
4585 MIB.addReg(SRegs[i]);
4586 MIB.addImm(RI.getSubRegFromChannel(i));
4587 }
4588 return DstReg;
4589 }
4590
legalizeOperandsSMRD(MachineRegisterInfo & MRI,MachineInstr & MI) const4591 void SIInstrInfo::legalizeOperandsSMRD(MachineRegisterInfo &MRI,
4592 MachineInstr &MI) const {
4593
4594 // If the pointer is store in VGPRs, then we need to move them to
4595 // SGPRs using v_readfirstlane. This is safe because we only select
4596 // loads with uniform pointers to SMRD instruction so we know the
4597 // pointer value is uniform.
4598 MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase);
4599 if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) {
4600 unsigned SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI);
4601 SBase->setReg(SGPR);
4602 }
4603 MachineOperand *SOff = getNamedOperand(MI, AMDGPU::OpName::soff);
4604 if (SOff && !RI.isSGPRClass(MRI.getRegClass(SOff->getReg()))) {
4605 unsigned SGPR = readlaneVGPRToSGPR(SOff->getReg(), MI, MRI);
4606 SOff->setReg(SGPR);
4607 }
4608 }
4609
legalizeGenericOperand(MachineBasicBlock & InsertMBB,MachineBasicBlock::iterator I,const TargetRegisterClass * DstRC,MachineOperand & Op,MachineRegisterInfo & MRI,const DebugLoc & DL) const4610 void SIInstrInfo::legalizeGenericOperand(MachineBasicBlock &InsertMBB,
4611 MachineBasicBlock::iterator I,
4612 const TargetRegisterClass *DstRC,
4613 MachineOperand &Op,
4614 MachineRegisterInfo &MRI,
4615 const DebugLoc &DL) const {
4616 Register OpReg = Op.getReg();
4617 unsigned OpSubReg = Op.getSubReg();
4618
4619 const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg(
4620 RI.getRegClassForReg(MRI, OpReg), OpSubReg);
4621
4622 // Check if operand is already the correct register class.
4623 if (DstRC == OpRC)
4624 return;
4625
4626 Register DstReg = MRI.createVirtualRegister(DstRC);
4627 MachineInstr *Copy =
4628 BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg).add(Op);
4629
4630 Op.setReg(DstReg);
4631 Op.setSubReg(0);
4632
4633 MachineInstr *Def = MRI.getVRegDef(OpReg);
4634 if (!Def)
4635 return;
4636
4637 // Try to eliminate the copy if it is copying an immediate value.
4638 if (Def->isMoveImmediate() && DstRC != &AMDGPU::VReg_1RegClass)
4639 FoldImmediate(*Copy, *Def, OpReg, &MRI);
4640
4641 bool ImpDef = Def->isImplicitDef();
4642 while (!ImpDef && Def && Def->isCopy()) {
4643 if (Def->getOperand(1).getReg().isPhysical())
4644 break;
4645 Def = MRI.getUniqueVRegDef(Def->getOperand(1).getReg());
4646 ImpDef = Def && Def->isImplicitDef();
4647 }
4648 if (!RI.isSGPRClass(DstRC) && !Copy->readsRegister(AMDGPU::EXEC, &RI) &&
4649 !ImpDef)
4650 Copy->addOperand(MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
4651 }
4652
4653 // Emit the actual waterfall loop, executing the wrapped instruction for each
4654 // unique value of \p Rsrc across all lanes. In the best case we execute 1
4655 // iteration, in the worst case we execute 64 (once per lane).
4656 static void
emitLoadSRsrcFromVGPRLoop(const SIInstrInfo & TII,MachineRegisterInfo & MRI,MachineBasicBlock & OrigBB,MachineBasicBlock & LoopBB,const DebugLoc & DL,MachineOperand & Rsrc)4657 emitLoadSRsrcFromVGPRLoop(const SIInstrInfo &TII, MachineRegisterInfo &MRI,
4658 MachineBasicBlock &OrigBB, MachineBasicBlock &LoopBB,
4659 const DebugLoc &DL, MachineOperand &Rsrc) {
4660 MachineFunction &MF = *OrigBB.getParent();
4661 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
4662 const SIRegisterInfo *TRI = ST.getRegisterInfo();
4663 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
4664 unsigned SaveExecOpc =
4665 ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32 : AMDGPU::S_AND_SAVEEXEC_B64;
4666 unsigned XorTermOpc =
4667 ST.isWave32() ? AMDGPU::S_XOR_B32_term : AMDGPU::S_XOR_B64_term;
4668 unsigned AndOpc =
4669 ST.isWave32() ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64;
4670 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
4671
4672 MachineBasicBlock::iterator I = LoopBB.begin();
4673
4674 Register VRsrc = Rsrc.getReg();
4675 unsigned VRsrcUndef = getUndefRegState(Rsrc.isUndef());
4676
4677 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC);
4678 Register CondReg0 = MRI.createVirtualRegister(BoolXExecRC);
4679 Register CondReg1 = MRI.createVirtualRegister(BoolXExecRC);
4680 Register AndCond = MRI.createVirtualRegister(BoolXExecRC);
4681 Register SRsrcSub0 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
4682 Register SRsrcSub1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
4683 Register SRsrcSub2 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
4684 Register SRsrcSub3 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
4685 Register SRsrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
4686
4687 // Beginning of the loop, read the next Rsrc variant.
4688 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub0)
4689 .addReg(VRsrc, VRsrcUndef, AMDGPU::sub0);
4690 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub1)
4691 .addReg(VRsrc, VRsrcUndef, AMDGPU::sub1);
4692 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub2)
4693 .addReg(VRsrc, VRsrcUndef, AMDGPU::sub2);
4694 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub3)
4695 .addReg(VRsrc, VRsrcUndef, AMDGPU::sub3);
4696
4697 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), SRsrc)
4698 .addReg(SRsrcSub0)
4699 .addImm(AMDGPU::sub0)
4700 .addReg(SRsrcSub1)
4701 .addImm(AMDGPU::sub1)
4702 .addReg(SRsrcSub2)
4703 .addImm(AMDGPU::sub2)
4704 .addReg(SRsrcSub3)
4705 .addImm(AMDGPU::sub3);
4706
4707 // Update Rsrc operand to use the SGPR Rsrc.
4708 Rsrc.setReg(SRsrc);
4709 Rsrc.setIsKill(true);
4710
4711 // Identify all lanes with identical Rsrc operands in their VGPRs.
4712 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), CondReg0)
4713 .addReg(SRsrc, 0, AMDGPU::sub0_sub1)
4714 .addReg(VRsrc, 0, AMDGPU::sub0_sub1);
4715 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), CondReg1)
4716 .addReg(SRsrc, 0, AMDGPU::sub2_sub3)
4717 .addReg(VRsrc, 0, AMDGPU::sub2_sub3);
4718 BuildMI(LoopBB, I, DL, TII.get(AndOpc), AndCond)
4719 .addReg(CondReg0)
4720 .addReg(CondReg1);
4721
4722 MRI.setSimpleHint(SaveExec, AndCond);
4723
4724 // Update EXEC to matching lanes, saving original to SaveExec.
4725 BuildMI(LoopBB, I, DL, TII.get(SaveExecOpc), SaveExec)
4726 .addReg(AndCond, RegState::Kill);
4727
4728 // The original instruction is here; we insert the terminators after it.
4729 I = LoopBB.end();
4730
4731 // Update EXEC, switch all done bits to 0 and all todo bits to 1.
4732 BuildMI(LoopBB, I, DL, TII.get(XorTermOpc), Exec)
4733 .addReg(Exec)
4734 .addReg(SaveExec);
4735 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_CBRANCH_EXECNZ)).addMBB(&LoopBB);
4736 }
4737
4738 // Build a waterfall loop around \p MI, replacing the VGPR \p Rsrc register
4739 // with SGPRs by iterating over all unique values across all lanes.
loadSRsrcFromVGPR(const SIInstrInfo & TII,MachineInstr & MI,MachineOperand & Rsrc,MachineDominatorTree * MDT)4740 static void loadSRsrcFromVGPR(const SIInstrInfo &TII, MachineInstr &MI,
4741 MachineOperand &Rsrc, MachineDominatorTree *MDT) {
4742 MachineBasicBlock &MBB = *MI.getParent();
4743 MachineFunction &MF = *MBB.getParent();
4744 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
4745 const SIRegisterInfo *TRI = ST.getRegisterInfo();
4746 MachineRegisterInfo &MRI = MF.getRegInfo();
4747 MachineBasicBlock::iterator I(&MI);
4748 const DebugLoc &DL = MI.getDebugLoc();
4749 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
4750 unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
4751 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
4752
4753 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC);
4754
4755 // Save the EXEC mask
4756 BuildMI(MBB, I, DL, TII.get(MovExecOpc), SaveExec).addReg(Exec);
4757
4758 // Killed uses in the instruction we are waterfalling around will be
4759 // incorrect due to the added control-flow.
4760 for (auto &MO : MI.uses()) {
4761 if (MO.isReg() && MO.isUse()) {
4762 MRI.clearKillFlags(MO.getReg());
4763 }
4764 }
4765
4766 // To insert the loop we need to split the block. Move everything after this
4767 // point to a new block, and insert a new empty block between the two.
4768 MachineBasicBlock *LoopBB = MF.CreateMachineBasicBlock();
4769 MachineBasicBlock *RemainderBB = MF.CreateMachineBasicBlock();
4770 MachineFunction::iterator MBBI(MBB);
4771 ++MBBI;
4772
4773 MF.insert(MBBI, LoopBB);
4774 MF.insert(MBBI, RemainderBB);
4775
4776 LoopBB->addSuccessor(LoopBB);
4777 LoopBB->addSuccessor(RemainderBB);
4778
4779 // Move MI to the LoopBB, and the remainder of the block to RemainderBB.
4780 MachineBasicBlock::iterator J = I++;
4781 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
4782 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
4783 LoopBB->splice(LoopBB->begin(), &MBB, J);
4784
4785 MBB.addSuccessor(LoopBB);
4786
4787 // Update dominators. We know that MBB immediately dominates LoopBB, that
4788 // LoopBB immediately dominates RemainderBB, and that RemainderBB immediately
4789 // dominates all of the successors transferred to it from MBB that MBB used
4790 // to properly dominate.
4791 if (MDT) {
4792 MDT->addNewBlock(LoopBB, &MBB);
4793 MDT->addNewBlock(RemainderBB, LoopBB);
4794 for (auto &Succ : RemainderBB->successors()) {
4795 if (MDT->properlyDominates(&MBB, Succ)) {
4796 MDT->changeImmediateDominator(Succ, RemainderBB);
4797 }
4798 }
4799 }
4800
4801 emitLoadSRsrcFromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, Rsrc);
4802
4803 // Restore the EXEC mask
4804 MachineBasicBlock::iterator First = RemainderBB->begin();
4805 BuildMI(*RemainderBB, First, DL, TII.get(MovExecOpc), Exec).addReg(SaveExec);
4806 }
4807
4808 // Extract pointer from Rsrc and return a zero-value Rsrc replacement.
4809 static std::tuple<unsigned, unsigned>
extractRsrcPtr(const SIInstrInfo & TII,MachineInstr & MI,MachineOperand & Rsrc)4810 extractRsrcPtr(const SIInstrInfo &TII, MachineInstr &MI, MachineOperand &Rsrc) {
4811 MachineBasicBlock &MBB = *MI.getParent();
4812 MachineFunction &MF = *MBB.getParent();
4813 MachineRegisterInfo &MRI = MF.getRegInfo();
4814
4815 // Extract the ptr from the resource descriptor.
4816 unsigned RsrcPtr =
4817 TII.buildExtractSubReg(MI, MRI, Rsrc, &AMDGPU::VReg_128RegClass,
4818 AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass);
4819
4820 // Create an empty resource descriptor
4821 Register Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4822 Register SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
4823 Register SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
4824 Register NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
4825 uint64_t RsrcDataFormat = TII.getDefaultRsrcDataFormat();
4826
4827 // Zero64 = 0
4828 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B64), Zero64)
4829 .addImm(0);
4830
4831 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0}
4832 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatLo)
4833 .addImm(RsrcDataFormat & 0xFFFFFFFF);
4834
4835 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32}
4836 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatHi)
4837 .addImm(RsrcDataFormat >> 32);
4838
4839 // NewSRsrc = {Zero64, SRsrcFormat}
4840 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::REG_SEQUENCE), NewSRsrc)
4841 .addReg(Zero64)
4842 .addImm(AMDGPU::sub0_sub1)
4843 .addReg(SRsrcFormatLo)
4844 .addImm(AMDGPU::sub2)
4845 .addReg(SRsrcFormatHi)
4846 .addImm(AMDGPU::sub3);
4847
4848 return std::make_tuple(RsrcPtr, NewSRsrc);
4849 }
4850
legalizeOperands(MachineInstr & MI,MachineDominatorTree * MDT) const4851 void SIInstrInfo::legalizeOperands(MachineInstr &MI,
4852 MachineDominatorTree *MDT) const {
4853 MachineFunction &MF = *MI.getParent()->getParent();
4854 MachineRegisterInfo &MRI = MF.getRegInfo();
4855
4856 // Legalize VOP2
4857 if (isVOP2(MI) || isVOPC(MI)) {
4858 legalizeOperandsVOP2(MRI, MI);
4859 return;
4860 }
4861
4862 // Legalize VOP3
4863 if (isVOP3(MI)) {
4864 legalizeOperandsVOP3(MRI, MI);
4865 return;
4866 }
4867
4868 // Legalize SMRD
4869 if (isSMRD(MI)) {
4870 legalizeOperandsSMRD(MRI, MI);
4871 return;
4872 }
4873
4874 // Legalize REG_SEQUENCE and PHI
4875 // The register class of the operands much be the same type as the register
4876 // class of the output.
4877 if (MI.getOpcode() == AMDGPU::PHI) {
4878 const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr;
4879 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
4880 if (!MI.getOperand(i).isReg() ||
4881 !Register::isVirtualRegister(MI.getOperand(i).getReg()))
4882 continue;
4883 const TargetRegisterClass *OpRC =
4884 MRI.getRegClass(MI.getOperand(i).getReg());
4885 if (RI.hasVectorRegisters(OpRC)) {
4886 VRC = OpRC;
4887 } else {
4888 SRC = OpRC;
4889 }
4890 }
4891
4892 // If any of the operands are VGPR registers, then they all most be
4893 // otherwise we will create illegal VGPR->SGPR copies when legalizing
4894 // them.
4895 if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) {
4896 if (!VRC) {
4897 assert(SRC);
4898 if (getOpRegClass(MI, 0) == &AMDGPU::VReg_1RegClass) {
4899 VRC = &AMDGPU::VReg_1RegClass;
4900 } else
4901 VRC = RI.hasAGPRs(getOpRegClass(MI, 0))
4902 ? RI.getEquivalentAGPRClass(SRC)
4903 : RI.getEquivalentVGPRClass(SRC);
4904 } else {
4905 VRC = RI.hasAGPRs(getOpRegClass(MI, 0))
4906 ? RI.getEquivalentAGPRClass(VRC)
4907 : RI.getEquivalentVGPRClass(VRC);
4908 }
4909 RC = VRC;
4910 } else {
4911 RC = SRC;
4912 }
4913
4914 // Update all the operands so they have the same type.
4915 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
4916 MachineOperand &Op = MI.getOperand(I);
4917 if (!Op.isReg() || !Register::isVirtualRegister(Op.getReg()))
4918 continue;
4919
4920 // MI is a PHI instruction.
4921 MachineBasicBlock *InsertBB = MI.getOperand(I + 1).getMBB();
4922 MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator();
4923
4924 // Avoid creating no-op copies with the same src and dst reg class. These
4925 // confuse some of the machine passes.
4926 legalizeGenericOperand(*InsertBB, Insert, RC, Op, MRI, MI.getDebugLoc());
4927 }
4928 }
4929
4930 // REG_SEQUENCE doesn't really require operand legalization, but if one has a
4931 // VGPR dest type and SGPR sources, insert copies so all operands are
4932 // VGPRs. This seems to help operand folding / the register coalescer.
4933 if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) {
4934 MachineBasicBlock *MBB = MI.getParent();
4935 const TargetRegisterClass *DstRC = getOpRegClass(MI, 0);
4936 if (RI.hasVGPRs(DstRC)) {
4937 // Update all the operands so they are VGPR register classes. These may
4938 // not be the same register class because REG_SEQUENCE supports mixing
4939 // subregister index types e.g. sub0_sub1 + sub2 + sub3
4940 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
4941 MachineOperand &Op = MI.getOperand(I);
4942 if (!Op.isReg() || !Register::isVirtualRegister(Op.getReg()))
4943 continue;
4944
4945 const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg());
4946 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC);
4947 if (VRC == OpRC)
4948 continue;
4949
4950 legalizeGenericOperand(*MBB, MI, VRC, Op, MRI, MI.getDebugLoc());
4951 Op.setIsKill();
4952 }
4953 }
4954
4955 return;
4956 }
4957
4958 // Legalize INSERT_SUBREG
4959 // src0 must have the same register class as dst
4960 if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) {
4961 Register Dst = MI.getOperand(0).getReg();
4962 Register Src0 = MI.getOperand(1).getReg();
4963 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
4964 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0);
4965 if (DstRC != Src0RC) {
4966 MachineBasicBlock *MBB = MI.getParent();
4967 MachineOperand &Op = MI.getOperand(1);
4968 legalizeGenericOperand(*MBB, MI, DstRC, Op, MRI, MI.getDebugLoc());
4969 }
4970 return;
4971 }
4972
4973 // Legalize SI_INIT_M0
4974 if (MI.getOpcode() == AMDGPU::SI_INIT_M0) {
4975 MachineOperand &Src = MI.getOperand(0);
4976 if (Src.isReg() && RI.hasVectorRegisters(MRI.getRegClass(Src.getReg())))
4977 Src.setReg(readlaneVGPRToSGPR(Src.getReg(), MI, MRI));
4978 return;
4979 }
4980
4981 // Legalize MIMG and MUBUF/MTBUF for shaders.
4982 //
4983 // Shaders only generate MUBUF/MTBUF instructions via intrinsics or via
4984 // scratch memory access. In both cases, the legalization never involves
4985 // conversion to the addr64 form.
4986 if (isMIMG(MI) ||
4987 (AMDGPU::isShader(MF.getFunction().getCallingConv()) &&
4988 (isMUBUF(MI) || isMTBUF(MI)))) {
4989 MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc);
4990 if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) {
4991 unsigned SGPR = readlaneVGPRToSGPR(SRsrc->getReg(), MI, MRI);
4992 SRsrc->setReg(SGPR);
4993 }
4994
4995 MachineOperand *SSamp = getNamedOperand(MI, AMDGPU::OpName::ssamp);
4996 if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()))) {
4997 unsigned SGPR = readlaneVGPRToSGPR(SSamp->getReg(), MI, MRI);
4998 SSamp->setReg(SGPR);
4999 }
5000 return;
5001 }
5002
5003 // Legalize MUBUF* instructions.
5004 int RsrcIdx =
5005 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc);
5006 if (RsrcIdx != -1) {
5007 // We have an MUBUF instruction
5008 MachineOperand *Rsrc = &MI.getOperand(RsrcIdx);
5009 unsigned RsrcRC = get(MI.getOpcode()).OpInfo[RsrcIdx].RegClass;
5010 if (RI.getCommonSubClass(MRI.getRegClass(Rsrc->getReg()),
5011 RI.getRegClass(RsrcRC))) {
5012 // The operands are legal.
5013 // FIXME: We may need to legalize operands besided srsrc.
5014 return;
5015 }
5016
5017 // Legalize a VGPR Rsrc.
5018 //
5019 // If the instruction is _ADDR64, we can avoid a waterfall by extracting
5020 // the base pointer from the VGPR Rsrc, adding it to the VAddr, then using
5021 // a zero-value SRsrc.
5022 //
5023 // If the instruction is _OFFSET (both idxen and offen disabled), and we
5024 // support ADDR64 instructions, we can convert to ADDR64 and do the same as
5025 // above.
5026 //
5027 // Otherwise we are on non-ADDR64 hardware, and/or we have
5028 // idxen/offen/bothen and we fall back to a waterfall loop.
5029
5030 MachineBasicBlock &MBB = *MI.getParent();
5031
5032 MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr);
5033 if (VAddr && AMDGPU::getIfAddr64Inst(MI.getOpcode()) != -1) {
5034 // This is already an ADDR64 instruction so we need to add the pointer
5035 // extracted from the resource descriptor to the current value of VAddr.
5036 Register NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
5037 Register NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
5038 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
5039
5040 const auto *BoolXExecRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
5041 Register CondReg0 = MRI.createVirtualRegister(BoolXExecRC);
5042 Register CondReg1 = MRI.createVirtualRegister(BoolXExecRC);
5043
5044 unsigned RsrcPtr, NewSRsrc;
5045 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc);
5046
5047 // NewVaddrLo = RsrcPtr:sub0 + VAddr:sub0
5048 const DebugLoc &DL = MI.getDebugLoc();
5049 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e64), NewVAddrLo)
5050 .addDef(CondReg0)
5051 .addReg(RsrcPtr, 0, AMDGPU::sub0)
5052 .addReg(VAddr->getReg(), 0, AMDGPU::sub0)
5053 .addImm(0);
5054
5055 // NewVaddrHi = RsrcPtr:sub1 + VAddr:sub1
5056 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e64), NewVAddrHi)
5057 .addDef(CondReg1, RegState::Dead)
5058 .addReg(RsrcPtr, 0, AMDGPU::sub1)
5059 .addReg(VAddr->getReg(), 0, AMDGPU::sub1)
5060 .addReg(CondReg0, RegState::Kill)
5061 .addImm(0);
5062
5063 // NewVaddr = {NewVaddrHi, NewVaddrLo}
5064 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr)
5065 .addReg(NewVAddrLo)
5066 .addImm(AMDGPU::sub0)
5067 .addReg(NewVAddrHi)
5068 .addImm(AMDGPU::sub1);
5069
5070 VAddr->setReg(NewVAddr);
5071 Rsrc->setReg(NewSRsrc);
5072 } else if (!VAddr && ST.hasAddr64()) {
5073 // This instructions is the _OFFSET variant, so we need to convert it to
5074 // ADDR64.
5075 assert(MBB.getParent()->getSubtarget<GCNSubtarget>().getGeneration()
5076 < AMDGPUSubtarget::VOLCANIC_ISLANDS &&
5077 "FIXME: Need to emit flat atomics here");
5078
5079 unsigned RsrcPtr, NewSRsrc;
5080 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc);
5081
5082 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
5083 MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata);
5084 MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset);
5085 MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset);
5086 unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode());
5087
5088 // Atomics rith return have have an additional tied operand and are
5089 // missing some of the special bits.
5090 MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in);
5091 MachineInstr *Addr64;
5092
5093 if (!VDataIn) {
5094 // Regular buffer load / store.
5095 MachineInstrBuilder MIB =
5096 BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
5097 .add(*VData)
5098 .addReg(NewVAddr)
5099 .addReg(NewSRsrc)
5100 .add(*SOffset)
5101 .add(*Offset);
5102
5103 // Atomics do not have this operand.
5104 if (const MachineOperand *GLC =
5105 getNamedOperand(MI, AMDGPU::OpName::glc)) {
5106 MIB.addImm(GLC->getImm());
5107 }
5108 if (const MachineOperand *DLC =
5109 getNamedOperand(MI, AMDGPU::OpName::dlc)) {
5110 MIB.addImm(DLC->getImm());
5111 }
5112
5113 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc));
5114
5115 if (const MachineOperand *TFE =
5116 getNamedOperand(MI, AMDGPU::OpName::tfe)) {
5117 MIB.addImm(TFE->getImm());
5118 }
5119
5120 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::swz));
5121
5122 MIB.cloneMemRefs(MI);
5123 Addr64 = MIB;
5124 } else {
5125 // Atomics with return.
5126 Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
5127 .add(*VData)
5128 .add(*VDataIn)
5129 .addReg(NewVAddr)
5130 .addReg(NewSRsrc)
5131 .add(*SOffset)
5132 .add(*Offset)
5133 .addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc))
5134 .cloneMemRefs(MI);
5135 }
5136
5137 MI.removeFromParent();
5138
5139 // NewVaddr = {NewVaddrHi, NewVaddrLo}
5140 BuildMI(MBB, Addr64, Addr64->getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
5141 NewVAddr)
5142 .addReg(RsrcPtr, 0, AMDGPU::sub0)
5143 .addImm(AMDGPU::sub0)
5144 .addReg(RsrcPtr, 0, AMDGPU::sub1)
5145 .addImm(AMDGPU::sub1);
5146 } else {
5147 // This is another variant; legalize Rsrc with waterfall loop from VGPRs
5148 // to SGPRs.
5149 loadSRsrcFromVGPR(*this, MI, *Rsrc, MDT);
5150 }
5151 }
5152 }
5153
moveToVALU(MachineInstr & TopInst,MachineDominatorTree * MDT) const5154 void SIInstrInfo::moveToVALU(MachineInstr &TopInst,
5155 MachineDominatorTree *MDT) const {
5156 SetVectorType Worklist;
5157 Worklist.insert(&TopInst);
5158
5159 while (!Worklist.empty()) {
5160 MachineInstr &Inst = *Worklist.pop_back_val();
5161 MachineBasicBlock *MBB = Inst.getParent();
5162 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
5163
5164 unsigned Opcode = Inst.getOpcode();
5165 unsigned NewOpcode = getVALUOp(Inst);
5166
5167 // Handle some special cases
5168 switch (Opcode) {
5169 default:
5170 break;
5171 case AMDGPU::S_ADD_U64_PSEUDO:
5172 case AMDGPU::S_SUB_U64_PSEUDO:
5173 splitScalar64BitAddSub(Worklist, Inst, MDT);
5174 Inst.eraseFromParent();
5175 continue;
5176 case AMDGPU::S_ADD_I32:
5177 case AMDGPU::S_SUB_I32:
5178 // FIXME: The u32 versions currently selected use the carry.
5179 if (moveScalarAddSub(Worklist, Inst, MDT))
5180 continue;
5181
5182 // Default handling
5183 break;
5184 case AMDGPU::S_AND_B64:
5185 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_AND_B32, MDT);
5186 Inst.eraseFromParent();
5187 continue;
5188
5189 case AMDGPU::S_OR_B64:
5190 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_OR_B32, MDT);
5191 Inst.eraseFromParent();
5192 continue;
5193
5194 case AMDGPU::S_XOR_B64:
5195 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XOR_B32, MDT);
5196 Inst.eraseFromParent();
5197 continue;
5198
5199 case AMDGPU::S_NAND_B64:
5200 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NAND_B32, MDT);
5201 Inst.eraseFromParent();
5202 continue;
5203
5204 case AMDGPU::S_NOR_B64:
5205 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NOR_B32, MDT);
5206 Inst.eraseFromParent();
5207 continue;
5208
5209 case AMDGPU::S_XNOR_B64:
5210 if (ST.hasDLInsts())
5211 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32, MDT);
5212 else
5213 splitScalar64BitXnor(Worklist, Inst, MDT);
5214 Inst.eraseFromParent();
5215 continue;
5216
5217 case AMDGPU::S_ANDN2_B64:
5218 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ANDN2_B32, MDT);
5219 Inst.eraseFromParent();
5220 continue;
5221
5222 case AMDGPU::S_ORN2_B64:
5223 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ORN2_B32, MDT);
5224 Inst.eraseFromParent();
5225 continue;
5226
5227 case AMDGPU::S_NOT_B64:
5228 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_NOT_B32);
5229 Inst.eraseFromParent();
5230 continue;
5231
5232 case AMDGPU::S_BCNT1_I32_B64:
5233 splitScalar64BitBCNT(Worklist, Inst);
5234 Inst.eraseFromParent();
5235 continue;
5236
5237 case AMDGPU::S_BFE_I64:
5238 splitScalar64BitBFE(Worklist, Inst);
5239 Inst.eraseFromParent();
5240 continue;
5241
5242 case AMDGPU::S_LSHL_B32:
5243 if (ST.hasOnlyRevVALUShifts()) {
5244 NewOpcode = AMDGPU::V_LSHLREV_B32_e64;
5245 swapOperands(Inst);
5246 }
5247 break;
5248 case AMDGPU::S_ASHR_I32:
5249 if (ST.hasOnlyRevVALUShifts()) {
5250 NewOpcode = AMDGPU::V_ASHRREV_I32_e64;
5251 swapOperands(Inst);
5252 }
5253 break;
5254 case AMDGPU::S_LSHR_B32:
5255 if (ST.hasOnlyRevVALUShifts()) {
5256 NewOpcode = AMDGPU::V_LSHRREV_B32_e64;
5257 swapOperands(Inst);
5258 }
5259 break;
5260 case AMDGPU::S_LSHL_B64:
5261 if (ST.hasOnlyRevVALUShifts()) {
5262 NewOpcode = AMDGPU::V_LSHLREV_B64;
5263 swapOperands(Inst);
5264 }
5265 break;
5266 case AMDGPU::S_ASHR_I64:
5267 if (ST.hasOnlyRevVALUShifts()) {
5268 NewOpcode = AMDGPU::V_ASHRREV_I64;
5269 swapOperands(Inst);
5270 }
5271 break;
5272 case AMDGPU::S_LSHR_B64:
5273 if (ST.hasOnlyRevVALUShifts()) {
5274 NewOpcode = AMDGPU::V_LSHRREV_B64;
5275 swapOperands(Inst);
5276 }
5277 break;
5278
5279 case AMDGPU::S_ABS_I32:
5280 lowerScalarAbs(Worklist, Inst);
5281 Inst.eraseFromParent();
5282 continue;
5283
5284 case AMDGPU::S_CBRANCH_SCC0:
5285 case AMDGPU::S_CBRANCH_SCC1:
5286 // Clear unused bits of vcc
5287 if (ST.isWave32())
5288 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B32),
5289 AMDGPU::VCC_LO)
5290 .addReg(AMDGPU::EXEC_LO)
5291 .addReg(AMDGPU::VCC_LO);
5292 else
5293 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B64),
5294 AMDGPU::VCC)
5295 .addReg(AMDGPU::EXEC)
5296 .addReg(AMDGPU::VCC);
5297 break;
5298
5299 case AMDGPU::S_BFE_U64:
5300 case AMDGPU::S_BFM_B64:
5301 llvm_unreachable("Moving this op to VALU not implemented");
5302
5303 case AMDGPU::S_PACK_LL_B32_B16:
5304 case AMDGPU::S_PACK_LH_B32_B16:
5305 case AMDGPU::S_PACK_HH_B32_B16:
5306 movePackToVALU(Worklist, MRI, Inst);
5307 Inst.eraseFromParent();
5308 continue;
5309
5310 case AMDGPU::S_XNOR_B32:
5311 lowerScalarXnor(Worklist, Inst);
5312 Inst.eraseFromParent();
5313 continue;
5314
5315 case AMDGPU::S_NAND_B32:
5316 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_AND_B32);
5317 Inst.eraseFromParent();
5318 continue;
5319
5320 case AMDGPU::S_NOR_B32:
5321 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_OR_B32);
5322 Inst.eraseFromParent();
5323 continue;
5324
5325 case AMDGPU::S_ANDN2_B32:
5326 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_AND_B32);
5327 Inst.eraseFromParent();
5328 continue;
5329
5330 case AMDGPU::S_ORN2_B32:
5331 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_OR_B32);
5332 Inst.eraseFromParent();
5333 continue;
5334
5335 // TODO: remove as soon as everything is ready
5336 // to replace VGPR to SGPR copy with V_READFIRSTLANEs.
5337 // S_ADD/SUB_CO_PSEUDO as well as S_UADDO/USUBO_PSEUDO
5338 // can only be selected from the uniform SDNode.
5339 case AMDGPU::S_ADD_CO_PSEUDO:
5340 case AMDGPU::S_SUB_CO_PSEUDO: {
5341 unsigned Opc = (Inst.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO)
5342 ? AMDGPU::V_ADDC_U32_e64
5343 : AMDGPU::V_SUBB_U32_e64;
5344 const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
5345
5346 Register CarryInReg = Inst.getOperand(4).getReg();
5347 if (!MRI.constrainRegClass(CarryInReg, CarryRC)) {
5348 Register NewCarryReg = MRI.createVirtualRegister(CarryRC);
5349 BuildMI(*MBB, &Inst, Inst.getDebugLoc(), get(AMDGPU::COPY), NewCarryReg)
5350 .addReg(CarryInReg);
5351 }
5352
5353 Register CarryOutReg = Inst.getOperand(1).getReg();
5354
5355 Register DestReg = MRI.createVirtualRegister(RI.getEquivalentVGPRClass(
5356 MRI.getRegClass(Inst.getOperand(0).getReg())));
5357 MachineInstr *CarryOp =
5358 BuildMI(*MBB, &Inst, Inst.getDebugLoc(), get(Opc), DestReg)
5359 .addReg(CarryOutReg, RegState::Define)
5360 .add(Inst.getOperand(2))
5361 .add(Inst.getOperand(3))
5362 .addReg(CarryInReg)
5363 .addImm(0);
5364 legalizeOperands(*CarryOp);
5365 MRI.replaceRegWith(Inst.getOperand(0).getReg(), DestReg);
5366 addUsersToMoveToVALUWorklist(DestReg, MRI, Worklist);
5367 Inst.eraseFromParent();
5368 }
5369 continue;
5370 case AMDGPU::S_UADDO_PSEUDO:
5371 case AMDGPU::S_USUBO_PSEUDO: {
5372 const DebugLoc &DL = Inst.getDebugLoc();
5373 MachineOperand &Dest0 = Inst.getOperand(0);
5374 MachineOperand &Dest1 = Inst.getOperand(1);
5375 MachineOperand &Src0 = Inst.getOperand(2);
5376 MachineOperand &Src1 = Inst.getOperand(3);
5377
5378 unsigned Opc = (Inst.getOpcode() == AMDGPU::S_UADDO_PSEUDO)
5379 ? AMDGPU::V_ADD_I32_e64
5380 : AMDGPU::V_SUB_I32_e64;
5381 const TargetRegisterClass *NewRC =
5382 RI.getEquivalentVGPRClass(MRI.getRegClass(Dest0.getReg()));
5383 Register DestReg = MRI.createVirtualRegister(NewRC);
5384 MachineInstr *NewInstr = BuildMI(*MBB, &Inst, DL, get(Opc), DestReg)
5385 .addReg(Dest1.getReg(), RegState::Define)
5386 .add(Src0)
5387 .add(Src1)
5388 .addImm(0); // clamp bit
5389
5390 legalizeOperands(*NewInstr, MDT);
5391
5392 MRI.replaceRegWith(Dest0.getReg(), DestReg);
5393 addUsersToMoveToVALUWorklist(NewInstr->getOperand(0).getReg(), MRI,
5394 Worklist);
5395 Inst.eraseFromParent();
5396 }
5397 continue;
5398
5399 case AMDGPU::S_CSELECT_B32:
5400 case AMDGPU::S_CSELECT_B64:
5401 lowerSelect(Worklist, Inst, MDT);
5402 Inst.eraseFromParent();
5403 continue;
5404 }
5405
5406 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) {
5407 // We cannot move this instruction to the VALU, so we should try to
5408 // legalize its operands instead.
5409 legalizeOperands(Inst, MDT);
5410 continue;
5411 }
5412
5413 // Use the new VALU Opcode.
5414 const MCInstrDesc &NewDesc = get(NewOpcode);
5415 Inst.setDesc(NewDesc);
5416
5417 // Remove any references to SCC. Vector instructions can't read from it, and
5418 // We're just about to add the implicit use / defs of VCC, and we don't want
5419 // both.
5420 for (unsigned i = Inst.getNumOperands() - 1; i > 0; --i) {
5421 MachineOperand &Op = Inst.getOperand(i);
5422 if (Op.isReg() && Op.getReg() == AMDGPU::SCC) {
5423 // Only propagate through live-def of SCC.
5424 if (Op.isDef() && !Op.isDead())
5425 addSCCDefUsersToVALUWorklist(Op, Inst, Worklist);
5426 Inst.RemoveOperand(i);
5427 }
5428 }
5429
5430 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) {
5431 // We are converting these to a BFE, so we need to add the missing
5432 // operands for the size and offset.
5433 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16;
5434 Inst.addOperand(MachineOperand::CreateImm(0));
5435 Inst.addOperand(MachineOperand::CreateImm(Size));
5436
5437 } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) {
5438 // The VALU version adds the second operand to the result, so insert an
5439 // extra 0 operand.
5440 Inst.addOperand(MachineOperand::CreateImm(0));
5441 }
5442
5443 Inst.addImplicitDefUseOperands(*Inst.getParent()->getParent());
5444 fixImplicitOperands(Inst);
5445
5446 if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) {
5447 const MachineOperand &OffsetWidthOp = Inst.getOperand(2);
5448 // If we need to move this to VGPRs, we need to unpack the second operand
5449 // back into the 2 separate ones for bit offset and width.
5450 assert(OffsetWidthOp.isImm() &&
5451 "Scalar BFE is only implemented for constant width and offset");
5452 uint32_t Imm = OffsetWidthOp.getImm();
5453
5454 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0].
5455 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16].
5456 Inst.RemoveOperand(2); // Remove old immediate.
5457 Inst.addOperand(MachineOperand::CreateImm(Offset));
5458 Inst.addOperand(MachineOperand::CreateImm(BitWidth));
5459 }
5460
5461 bool HasDst = Inst.getOperand(0).isReg() && Inst.getOperand(0).isDef();
5462 unsigned NewDstReg = AMDGPU::NoRegister;
5463 if (HasDst) {
5464 Register DstReg = Inst.getOperand(0).getReg();
5465 if (Register::isPhysicalRegister(DstReg))
5466 continue;
5467
5468 // Update the destination register class.
5469 const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(Inst);
5470 if (!NewDstRC)
5471 continue;
5472
5473 if (Inst.isCopy() &&
5474 Register::isVirtualRegister(Inst.getOperand(1).getReg()) &&
5475 NewDstRC == RI.getRegClassForReg(MRI, Inst.getOperand(1).getReg())) {
5476 // Instead of creating a copy where src and dst are the same register
5477 // class, we just replace all uses of dst with src. These kinds of
5478 // copies interfere with the heuristics MachineSink uses to decide
5479 // whether or not to split a critical edge. Since the pass assumes
5480 // that copies will end up as machine instructions and not be
5481 // eliminated.
5482 addUsersToMoveToVALUWorklist(DstReg, MRI, Worklist);
5483 MRI.replaceRegWith(DstReg, Inst.getOperand(1).getReg());
5484 MRI.clearKillFlags(Inst.getOperand(1).getReg());
5485 Inst.getOperand(0).setReg(DstReg);
5486
5487 // Make sure we don't leave around a dead VGPR->SGPR copy. Normally
5488 // these are deleted later, but at -O0 it would leave a suspicious
5489 // looking illegal copy of an undef register.
5490 for (unsigned I = Inst.getNumOperands() - 1; I != 0; --I)
5491 Inst.RemoveOperand(I);
5492 Inst.setDesc(get(AMDGPU::IMPLICIT_DEF));
5493 continue;
5494 }
5495
5496 NewDstReg = MRI.createVirtualRegister(NewDstRC);
5497 MRI.replaceRegWith(DstReg, NewDstReg);
5498 }
5499
5500 // Legalize the operands
5501 legalizeOperands(Inst, MDT);
5502
5503 if (HasDst)
5504 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist);
5505 }
5506 }
5507
5508 // Add/sub require special handling to deal with carry outs.
moveScalarAddSub(SetVectorType & Worklist,MachineInstr & Inst,MachineDominatorTree * MDT) const5509 bool SIInstrInfo::moveScalarAddSub(SetVectorType &Worklist, MachineInstr &Inst,
5510 MachineDominatorTree *MDT) const {
5511 if (ST.hasAddNoCarry()) {
5512 // Assume there is no user of scc since we don't select this in that case.
5513 // Since scc isn't used, it doesn't really matter if the i32 or u32 variant
5514 // is used.
5515
5516 MachineBasicBlock &MBB = *Inst.getParent();
5517 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
5518
5519 Register OldDstReg = Inst.getOperand(0).getReg();
5520 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
5521
5522 unsigned Opc = Inst.getOpcode();
5523 assert(Opc == AMDGPU::S_ADD_I32 || Opc == AMDGPU::S_SUB_I32);
5524
5525 unsigned NewOpc = Opc == AMDGPU::S_ADD_I32 ?
5526 AMDGPU::V_ADD_U32_e64 : AMDGPU::V_SUB_U32_e64;
5527
5528 assert(Inst.getOperand(3).getReg() == AMDGPU::SCC);
5529 Inst.RemoveOperand(3);
5530
5531 Inst.setDesc(get(NewOpc));
5532 Inst.addOperand(MachineOperand::CreateImm(0)); // clamp bit
5533 Inst.addImplicitDefUseOperands(*MBB.getParent());
5534 MRI.replaceRegWith(OldDstReg, ResultReg);
5535 legalizeOperands(Inst, MDT);
5536
5537 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
5538 return true;
5539 }
5540
5541 return false;
5542 }
5543
lowerSelect(SetVectorType & Worklist,MachineInstr & Inst,MachineDominatorTree * MDT) const5544 void SIInstrInfo::lowerSelect(SetVectorType &Worklist, MachineInstr &Inst,
5545 MachineDominatorTree *MDT) const {
5546
5547 MachineBasicBlock &MBB = *Inst.getParent();
5548 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
5549 MachineBasicBlock::iterator MII = Inst;
5550 DebugLoc DL = Inst.getDebugLoc();
5551
5552 MachineOperand &Dest = Inst.getOperand(0);
5553 MachineOperand &Src0 = Inst.getOperand(1);
5554 MachineOperand &Src1 = Inst.getOperand(2);
5555 MachineOperand &Cond = Inst.getOperand(3);
5556
5557 Register SCCSource = Cond.getReg();
5558 // Find SCC def, and if that is a copy (SCC = COPY reg) then use reg instead.
5559 if (!Cond.isUndef()) {
5560 for (MachineInstr &CandI :
5561 make_range(std::next(MachineBasicBlock::reverse_iterator(Inst)),
5562 Inst.getParent()->rend())) {
5563 if (CandI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) !=
5564 -1) {
5565 if (CandI.isCopy() && CandI.getOperand(0).getReg() == AMDGPU::SCC) {
5566 SCCSource = CandI.getOperand(1).getReg();
5567 }
5568 break;
5569 }
5570 }
5571 }
5572
5573 // If this is a trivial select where the condition is effectively not SCC
5574 // (SCCSource is a source of copy to SCC), then the select is semantically
5575 // equivalent to copying SCCSource. Hence, there is no need to create
5576 // V_CNDMASK, we can just use that and bail out.
5577 if ((SCCSource != AMDGPU::SCC) && Src0.isImm() && (Src0.getImm() == -1) &&
5578 Src1.isImm() && (Src1.getImm() == 0)) {
5579 MRI.replaceRegWith(Dest.getReg(), SCCSource);
5580 return;
5581 }
5582
5583 const TargetRegisterClass *TC = ST.getWavefrontSize() == 64
5584 ? &AMDGPU::SReg_64_XEXECRegClass
5585 : &AMDGPU::SReg_32_XM0_XEXECRegClass;
5586 Register CopySCC = MRI.createVirtualRegister(TC);
5587
5588 if (SCCSource == AMDGPU::SCC) {
5589 // Insert a trivial select instead of creating a copy, because a copy from
5590 // SCC would semantically mean just copying a single bit, but we may need
5591 // the result to be a vector condition mask that needs preserving.
5592 unsigned Opcode = (ST.getWavefrontSize() == 64) ? AMDGPU::S_CSELECT_B64
5593 : AMDGPU::S_CSELECT_B32;
5594 auto NewSelect =
5595 BuildMI(MBB, MII, DL, get(Opcode), CopySCC).addImm(-1).addImm(0);
5596 NewSelect->getOperand(3).setIsUndef(Cond.isUndef());
5597 } else {
5598 BuildMI(MBB, MII, DL, get(AMDGPU::COPY), CopySCC).addReg(SCCSource);
5599 }
5600
5601 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
5602
5603 auto UpdatedInst =
5604 BuildMI(MBB, MII, DL, get(AMDGPU::V_CNDMASK_B32_e64), ResultReg)
5605 .addImm(0)
5606 .add(Src1) // False
5607 .addImm(0)
5608 .add(Src0) // True
5609 .addReg(CopySCC);
5610
5611 MRI.replaceRegWith(Dest.getReg(), ResultReg);
5612 legalizeOperands(*UpdatedInst, MDT);
5613 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
5614 }
5615
lowerScalarAbs(SetVectorType & Worklist,MachineInstr & Inst) const5616 void SIInstrInfo::lowerScalarAbs(SetVectorType &Worklist,
5617 MachineInstr &Inst) const {
5618 MachineBasicBlock &MBB = *Inst.getParent();
5619 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
5620 MachineBasicBlock::iterator MII = Inst;
5621 DebugLoc DL = Inst.getDebugLoc();
5622
5623 MachineOperand &Dest = Inst.getOperand(0);
5624 MachineOperand &Src = Inst.getOperand(1);
5625 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
5626 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
5627
5628 unsigned SubOp = ST.hasAddNoCarry() ?
5629 AMDGPU::V_SUB_U32_e32 : AMDGPU::V_SUB_I32_e32;
5630
5631 BuildMI(MBB, MII, DL, get(SubOp), TmpReg)
5632 .addImm(0)
5633 .addReg(Src.getReg());
5634
5635 BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg)
5636 .addReg(Src.getReg())
5637 .addReg(TmpReg);
5638
5639 MRI.replaceRegWith(Dest.getReg(), ResultReg);
5640 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
5641 }
5642
lowerScalarXnor(SetVectorType & Worklist,MachineInstr & Inst) const5643 void SIInstrInfo::lowerScalarXnor(SetVectorType &Worklist,
5644 MachineInstr &Inst) const {
5645 MachineBasicBlock &MBB = *Inst.getParent();
5646 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
5647 MachineBasicBlock::iterator MII = Inst;
5648 const DebugLoc &DL = Inst.getDebugLoc();
5649
5650 MachineOperand &Dest = Inst.getOperand(0);
5651 MachineOperand &Src0 = Inst.getOperand(1);
5652 MachineOperand &Src1 = Inst.getOperand(2);
5653
5654 if (ST.hasDLInsts()) {
5655 Register NewDest = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
5656 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src0, MRI, DL);
5657 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src1, MRI, DL);
5658
5659 BuildMI(MBB, MII, DL, get(AMDGPU::V_XNOR_B32_e64), NewDest)
5660 .add(Src0)
5661 .add(Src1);
5662
5663 MRI.replaceRegWith(Dest.getReg(), NewDest);
5664 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist);
5665 } else {
5666 // Using the identity !(x ^ y) == (!x ^ y) == (x ^ !y), we can
5667 // invert either source and then perform the XOR. If either source is a
5668 // scalar register, then we can leave the inversion on the scalar unit to
5669 // acheive a better distrubution of scalar and vector instructions.
5670 bool Src0IsSGPR = Src0.isReg() &&
5671 RI.isSGPRClass(MRI.getRegClass(Src0.getReg()));
5672 bool Src1IsSGPR = Src1.isReg() &&
5673 RI.isSGPRClass(MRI.getRegClass(Src1.getReg()));
5674 MachineInstr *Xor;
5675 Register Temp = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
5676 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
5677
5678 // Build a pair of scalar instructions and add them to the work list.
5679 // The next iteration over the work list will lower these to the vector
5680 // unit as necessary.
5681 if (Src0IsSGPR) {
5682 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src0);
5683 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest)
5684 .addReg(Temp)
5685 .add(Src1);
5686 } else if (Src1IsSGPR) {
5687 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src1);
5688 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest)
5689 .add(Src0)
5690 .addReg(Temp);
5691 } else {
5692 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), Temp)
5693 .add(Src0)
5694 .add(Src1);
5695 MachineInstr *Not =
5696 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest).addReg(Temp);
5697 Worklist.insert(Not);
5698 }
5699
5700 MRI.replaceRegWith(Dest.getReg(), NewDest);
5701
5702 Worklist.insert(Xor);
5703
5704 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist);
5705 }
5706 }
5707
splitScalarNotBinop(SetVectorType & Worklist,MachineInstr & Inst,unsigned Opcode) const5708 void SIInstrInfo::splitScalarNotBinop(SetVectorType &Worklist,
5709 MachineInstr &Inst,
5710 unsigned Opcode) const {
5711 MachineBasicBlock &MBB = *Inst.getParent();
5712 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
5713 MachineBasicBlock::iterator MII = Inst;
5714 const DebugLoc &DL = Inst.getDebugLoc();
5715
5716 MachineOperand &Dest = Inst.getOperand(0);
5717 MachineOperand &Src0 = Inst.getOperand(1);
5718 MachineOperand &Src1 = Inst.getOperand(2);
5719
5720 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
5721 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
5722
5723 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), Interm)
5724 .add(Src0)
5725 .add(Src1);
5726
5727 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest)
5728 .addReg(Interm);
5729
5730 Worklist.insert(&Op);
5731 Worklist.insert(&Not);
5732
5733 MRI.replaceRegWith(Dest.getReg(), NewDest);
5734 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist);
5735 }
5736
splitScalarBinOpN2(SetVectorType & Worklist,MachineInstr & Inst,unsigned Opcode) const5737 void SIInstrInfo::splitScalarBinOpN2(SetVectorType& Worklist,
5738 MachineInstr &Inst,
5739 unsigned Opcode) const {
5740 MachineBasicBlock &MBB = *Inst.getParent();
5741 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
5742 MachineBasicBlock::iterator MII = Inst;
5743 const DebugLoc &DL = Inst.getDebugLoc();
5744
5745 MachineOperand &Dest = Inst.getOperand(0);
5746 MachineOperand &Src0 = Inst.getOperand(1);
5747 MachineOperand &Src1 = Inst.getOperand(2);
5748
5749 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
5750 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
5751
5752 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Interm)
5753 .add(Src1);
5754
5755 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), NewDest)
5756 .add(Src0)
5757 .addReg(Interm);
5758
5759 Worklist.insert(&Not);
5760 Worklist.insert(&Op);
5761
5762 MRI.replaceRegWith(Dest.getReg(), NewDest);
5763 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist);
5764 }
5765
splitScalar64BitUnaryOp(SetVectorType & Worklist,MachineInstr & Inst,unsigned Opcode) const5766 void SIInstrInfo::splitScalar64BitUnaryOp(
5767 SetVectorType &Worklist, MachineInstr &Inst,
5768 unsigned Opcode) const {
5769 MachineBasicBlock &MBB = *Inst.getParent();
5770 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
5771
5772 MachineOperand &Dest = Inst.getOperand(0);
5773 MachineOperand &Src0 = Inst.getOperand(1);
5774 DebugLoc DL = Inst.getDebugLoc();
5775
5776 MachineBasicBlock::iterator MII = Inst;
5777
5778 const MCInstrDesc &InstDesc = get(Opcode);
5779 const TargetRegisterClass *Src0RC = Src0.isReg() ?
5780 MRI.getRegClass(Src0.getReg()) :
5781 &AMDGPU::SGPR_32RegClass;
5782
5783 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
5784
5785 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
5786 AMDGPU::sub0, Src0SubRC);
5787
5788 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
5789 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC);
5790 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0);
5791
5792 Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC);
5793 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0).add(SrcReg0Sub0);
5794
5795 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
5796 AMDGPU::sub1, Src0SubRC);
5797
5798 Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC);
5799 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1).add(SrcReg0Sub1);
5800
5801 Register FullDestReg = MRI.createVirtualRegister(NewDestRC);
5802 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
5803 .addReg(DestSub0)
5804 .addImm(AMDGPU::sub0)
5805 .addReg(DestSub1)
5806 .addImm(AMDGPU::sub1);
5807
5808 MRI.replaceRegWith(Dest.getReg(), FullDestReg);
5809
5810 Worklist.insert(&LoHalf);
5811 Worklist.insert(&HiHalf);
5812
5813 // We don't need to legalizeOperands here because for a single operand, src0
5814 // will support any kind of input.
5815
5816 // Move all users of this moved value.
5817 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
5818 }
5819
splitScalar64BitAddSub(SetVectorType & Worklist,MachineInstr & Inst,MachineDominatorTree * MDT) const5820 void SIInstrInfo::splitScalar64BitAddSub(SetVectorType &Worklist,
5821 MachineInstr &Inst,
5822 MachineDominatorTree *MDT) const {
5823 bool IsAdd = (Inst.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO);
5824
5825 MachineBasicBlock &MBB = *Inst.getParent();
5826 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
5827 const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
5828
5829 Register FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
5830 Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
5831 Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
5832
5833 Register CarryReg = MRI.createVirtualRegister(CarryRC);
5834 Register DeadCarryReg = MRI.createVirtualRegister(CarryRC);
5835
5836 MachineOperand &Dest = Inst.getOperand(0);
5837 MachineOperand &Src0 = Inst.getOperand(1);
5838 MachineOperand &Src1 = Inst.getOperand(2);
5839 const DebugLoc &DL = Inst.getDebugLoc();
5840 MachineBasicBlock::iterator MII = Inst;
5841
5842 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg());
5843 const TargetRegisterClass *Src1RC = MRI.getRegClass(Src1.getReg());
5844 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
5845 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0);
5846
5847 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
5848 AMDGPU::sub0, Src0SubRC);
5849 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
5850 AMDGPU::sub0, Src1SubRC);
5851
5852
5853 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
5854 AMDGPU::sub1, Src0SubRC);
5855 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
5856 AMDGPU::sub1, Src1SubRC);
5857
5858 unsigned LoOpc = IsAdd ? AMDGPU::V_ADD_I32_e64 : AMDGPU::V_SUB_I32_e64;
5859 MachineInstr *LoHalf =
5860 BuildMI(MBB, MII, DL, get(LoOpc), DestSub0)
5861 .addReg(CarryReg, RegState::Define)
5862 .add(SrcReg0Sub0)
5863 .add(SrcReg1Sub0)
5864 .addImm(0); // clamp bit
5865
5866 unsigned HiOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
5867 MachineInstr *HiHalf =
5868 BuildMI(MBB, MII, DL, get(HiOpc), DestSub1)
5869 .addReg(DeadCarryReg, RegState::Define | RegState::Dead)
5870 .add(SrcReg0Sub1)
5871 .add(SrcReg1Sub1)
5872 .addReg(CarryReg, RegState::Kill)
5873 .addImm(0); // clamp bit
5874
5875 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
5876 .addReg(DestSub0)
5877 .addImm(AMDGPU::sub0)
5878 .addReg(DestSub1)
5879 .addImm(AMDGPU::sub1);
5880
5881 MRI.replaceRegWith(Dest.getReg(), FullDestReg);
5882
5883 // Try to legalize the operands in case we need to swap the order to keep it
5884 // valid.
5885 legalizeOperands(*LoHalf, MDT);
5886 legalizeOperands(*HiHalf, MDT);
5887
5888 // Move all users of this moved vlaue.
5889 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
5890 }
5891
splitScalar64BitBinaryOp(SetVectorType & Worklist,MachineInstr & Inst,unsigned Opcode,MachineDominatorTree * MDT) const5892 void SIInstrInfo::splitScalar64BitBinaryOp(SetVectorType &Worklist,
5893 MachineInstr &Inst, unsigned Opcode,
5894 MachineDominatorTree *MDT) const {
5895 MachineBasicBlock &MBB = *Inst.getParent();
5896 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
5897
5898 MachineOperand &Dest = Inst.getOperand(0);
5899 MachineOperand &Src0 = Inst.getOperand(1);
5900 MachineOperand &Src1 = Inst.getOperand(2);
5901 DebugLoc DL = Inst.getDebugLoc();
5902
5903 MachineBasicBlock::iterator MII = Inst;
5904
5905 const MCInstrDesc &InstDesc = get(Opcode);
5906 const TargetRegisterClass *Src0RC = Src0.isReg() ?
5907 MRI.getRegClass(Src0.getReg()) :
5908 &AMDGPU::SGPR_32RegClass;
5909
5910 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
5911 const TargetRegisterClass *Src1RC = Src1.isReg() ?
5912 MRI.getRegClass(Src1.getReg()) :
5913 &AMDGPU::SGPR_32RegClass;
5914
5915 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0);
5916
5917 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
5918 AMDGPU::sub0, Src0SubRC);
5919 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
5920 AMDGPU::sub0, Src1SubRC);
5921 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
5922 AMDGPU::sub1, Src0SubRC);
5923 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
5924 AMDGPU::sub1, Src1SubRC);
5925
5926 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
5927 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC);
5928 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0);
5929
5930 Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC);
5931 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0)
5932 .add(SrcReg0Sub0)
5933 .add(SrcReg1Sub0);
5934
5935 Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC);
5936 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1)
5937 .add(SrcReg0Sub1)
5938 .add(SrcReg1Sub1);
5939
5940 Register FullDestReg = MRI.createVirtualRegister(NewDestRC);
5941 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
5942 .addReg(DestSub0)
5943 .addImm(AMDGPU::sub0)
5944 .addReg(DestSub1)
5945 .addImm(AMDGPU::sub1);
5946
5947 MRI.replaceRegWith(Dest.getReg(), FullDestReg);
5948
5949 Worklist.insert(&LoHalf);
5950 Worklist.insert(&HiHalf);
5951
5952 // Move all users of this moved vlaue.
5953 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
5954 }
5955
splitScalar64BitXnor(SetVectorType & Worklist,MachineInstr & Inst,MachineDominatorTree * MDT) const5956 void SIInstrInfo::splitScalar64BitXnor(SetVectorType &Worklist,
5957 MachineInstr &Inst,
5958 MachineDominatorTree *MDT) const {
5959 MachineBasicBlock &MBB = *Inst.getParent();
5960 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
5961
5962 MachineOperand &Dest = Inst.getOperand(0);
5963 MachineOperand &Src0 = Inst.getOperand(1);
5964 MachineOperand &Src1 = Inst.getOperand(2);
5965 const DebugLoc &DL = Inst.getDebugLoc();
5966
5967 MachineBasicBlock::iterator MII = Inst;
5968
5969 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
5970
5971 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
5972
5973 MachineOperand* Op0;
5974 MachineOperand* Op1;
5975
5976 if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) {
5977 Op0 = &Src0;
5978 Op1 = &Src1;
5979 } else {
5980 Op0 = &Src1;
5981 Op1 = &Src0;
5982 }
5983
5984 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B64), Interm)
5985 .add(*Op0);
5986
5987 Register NewDest = MRI.createVirtualRegister(DestRC);
5988
5989 MachineInstr &Xor = *BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B64), NewDest)
5990 .addReg(Interm)
5991 .add(*Op1);
5992
5993 MRI.replaceRegWith(Dest.getReg(), NewDest);
5994
5995 Worklist.insert(&Xor);
5996 }
5997
splitScalar64BitBCNT(SetVectorType & Worklist,MachineInstr & Inst) const5998 void SIInstrInfo::splitScalar64BitBCNT(
5999 SetVectorType &Worklist, MachineInstr &Inst) const {
6000 MachineBasicBlock &MBB = *Inst.getParent();
6001 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
6002
6003 MachineBasicBlock::iterator MII = Inst;
6004 const DebugLoc &DL = Inst.getDebugLoc();
6005
6006 MachineOperand &Dest = Inst.getOperand(0);
6007 MachineOperand &Src = Inst.getOperand(1);
6008
6009 const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64);
6010 const TargetRegisterClass *SrcRC = Src.isReg() ?
6011 MRI.getRegClass(Src.getReg()) :
6012 &AMDGPU::SGPR_32RegClass;
6013
6014 Register MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
6015 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
6016
6017 const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0);
6018
6019 MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC,
6020 AMDGPU::sub0, SrcSubRC);
6021 MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC,
6022 AMDGPU::sub1, SrcSubRC);
6023
6024 BuildMI(MBB, MII, DL, InstDesc, MidReg).add(SrcRegSub0).addImm(0);
6025
6026 BuildMI(MBB, MII, DL, InstDesc, ResultReg).add(SrcRegSub1).addReg(MidReg);
6027
6028 MRI.replaceRegWith(Dest.getReg(), ResultReg);
6029
6030 // We don't need to legalize operands here. src0 for etiher instruction can be
6031 // an SGPR, and the second input is unused or determined here.
6032 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
6033 }
6034
splitScalar64BitBFE(SetVectorType & Worklist,MachineInstr & Inst) const6035 void SIInstrInfo::splitScalar64BitBFE(SetVectorType &Worklist,
6036 MachineInstr &Inst) const {
6037 MachineBasicBlock &MBB = *Inst.getParent();
6038 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
6039 MachineBasicBlock::iterator MII = Inst;
6040 const DebugLoc &DL = Inst.getDebugLoc();
6041
6042 MachineOperand &Dest = Inst.getOperand(0);
6043 uint32_t Imm = Inst.getOperand(2).getImm();
6044 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0].
6045 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16].
6046
6047 (void) Offset;
6048
6049 // Only sext_inreg cases handled.
6050 assert(Inst.getOpcode() == AMDGPU::S_BFE_I64 && BitWidth <= 32 &&
6051 Offset == 0 && "Not implemented");
6052
6053 if (BitWidth < 32) {
6054 Register MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
6055 Register MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
6056 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
6057
6058 BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32), MidRegLo)
6059 .addReg(Inst.getOperand(1).getReg(), 0, AMDGPU::sub0)
6060 .addImm(0)
6061 .addImm(BitWidth);
6062
6063 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi)
6064 .addImm(31)
6065 .addReg(MidRegLo);
6066
6067 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg)
6068 .addReg(MidRegLo)
6069 .addImm(AMDGPU::sub0)
6070 .addReg(MidRegHi)
6071 .addImm(AMDGPU::sub1);
6072
6073 MRI.replaceRegWith(Dest.getReg(), ResultReg);
6074 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
6075 return;
6076 }
6077
6078 MachineOperand &Src = Inst.getOperand(1);
6079 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
6080 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
6081
6082 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg)
6083 .addImm(31)
6084 .addReg(Src.getReg(), 0, AMDGPU::sub0);
6085
6086 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg)
6087 .addReg(Src.getReg(), 0, AMDGPU::sub0)
6088 .addImm(AMDGPU::sub0)
6089 .addReg(TmpReg)
6090 .addImm(AMDGPU::sub1);
6091
6092 MRI.replaceRegWith(Dest.getReg(), ResultReg);
6093 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
6094 }
6095
addUsersToMoveToVALUWorklist(Register DstReg,MachineRegisterInfo & MRI,SetVectorType & Worklist) const6096 void SIInstrInfo::addUsersToMoveToVALUWorklist(
6097 Register DstReg,
6098 MachineRegisterInfo &MRI,
6099 SetVectorType &Worklist) const {
6100 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(DstReg),
6101 E = MRI.use_end(); I != E;) {
6102 MachineInstr &UseMI = *I->getParent();
6103
6104 unsigned OpNo = 0;
6105
6106 switch (UseMI.getOpcode()) {
6107 case AMDGPU::COPY:
6108 case AMDGPU::WQM:
6109 case AMDGPU::SOFT_WQM:
6110 case AMDGPU::WWM:
6111 case AMDGPU::REG_SEQUENCE:
6112 case AMDGPU::PHI:
6113 case AMDGPU::INSERT_SUBREG:
6114 break;
6115 default:
6116 OpNo = I.getOperandNo();
6117 break;
6118 }
6119
6120 if (!RI.hasVectorRegisters(getOpRegClass(UseMI, OpNo))) {
6121 Worklist.insert(&UseMI);
6122
6123 do {
6124 ++I;
6125 } while (I != E && I->getParent() == &UseMI);
6126 } else {
6127 ++I;
6128 }
6129 }
6130 }
6131
movePackToVALU(SetVectorType & Worklist,MachineRegisterInfo & MRI,MachineInstr & Inst) const6132 void SIInstrInfo::movePackToVALU(SetVectorType &Worklist,
6133 MachineRegisterInfo &MRI,
6134 MachineInstr &Inst) const {
6135 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
6136 MachineBasicBlock *MBB = Inst.getParent();
6137 MachineOperand &Src0 = Inst.getOperand(1);
6138 MachineOperand &Src1 = Inst.getOperand(2);
6139 const DebugLoc &DL = Inst.getDebugLoc();
6140
6141 switch (Inst.getOpcode()) {
6142 case AMDGPU::S_PACK_LL_B32_B16: {
6143 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
6144 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
6145
6146 // FIXME: Can do a lot better if we know the high bits of src0 or src1 are
6147 // 0.
6148 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg)
6149 .addImm(0xffff);
6150
6151 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_B32_e64), TmpReg)
6152 .addReg(ImmReg, RegState::Kill)
6153 .add(Src0);
6154
6155 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHL_OR_B32), ResultReg)
6156 .add(Src1)
6157 .addImm(16)
6158 .addReg(TmpReg, RegState::Kill);
6159 break;
6160 }
6161 case AMDGPU::S_PACK_LH_B32_B16: {
6162 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
6163 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg)
6164 .addImm(0xffff);
6165 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_BFI_B32), ResultReg)
6166 .addReg(ImmReg, RegState::Kill)
6167 .add(Src0)
6168 .add(Src1);
6169 break;
6170 }
6171 case AMDGPU::S_PACK_HH_B32_B16: {
6172 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
6173 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
6174 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHRREV_B32_e64), TmpReg)
6175 .addImm(16)
6176 .add(Src0);
6177 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg)
6178 .addImm(0xffff0000);
6179 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_OR_B32), ResultReg)
6180 .add(Src1)
6181 .addReg(ImmReg, RegState::Kill)
6182 .addReg(TmpReg, RegState::Kill);
6183 break;
6184 }
6185 default:
6186 llvm_unreachable("unhandled s_pack_* instruction");
6187 }
6188
6189 MachineOperand &Dest = Inst.getOperand(0);
6190 MRI.replaceRegWith(Dest.getReg(), ResultReg);
6191 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
6192 }
6193
addSCCDefUsersToVALUWorklist(MachineOperand & Op,MachineInstr & SCCDefInst,SetVectorType & Worklist) const6194 void SIInstrInfo::addSCCDefUsersToVALUWorklist(MachineOperand &Op,
6195 MachineInstr &SCCDefInst,
6196 SetVectorType &Worklist) const {
6197 bool SCCUsedImplicitly = false;
6198
6199 // Ensure that def inst defines SCC, which is still live.
6200 assert(Op.isReg() && Op.getReg() == AMDGPU::SCC && Op.isDef() &&
6201 !Op.isDead() && Op.getParent() == &SCCDefInst);
6202 SmallVector<MachineInstr *, 4> CopyToDelete;
6203 // This assumes that all the users of SCC are in the same block
6204 // as the SCC def.
6205 for (MachineInstr &MI : // Skip the def inst itself.
6206 make_range(std::next(MachineBasicBlock::iterator(SCCDefInst)),
6207 SCCDefInst.getParent()->end())) {
6208 // Check if SCC is used first.
6209 if (MI.findRegisterUseOperandIdx(AMDGPU::SCC, false, &RI) != -1) {
6210 if (MI.isCopy()) {
6211 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
6212 unsigned DestReg = MI.getOperand(0).getReg();
6213
6214 for (auto &User : MRI.use_nodbg_instructions(DestReg)) {
6215 if ((User.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO) ||
6216 (User.getOpcode() == AMDGPU::S_SUB_CO_PSEUDO)) {
6217 User.getOperand(4).setReg(RI.getVCC());
6218 Worklist.insert(&User);
6219 } else if (User.getOpcode() == AMDGPU::V_CNDMASK_B32_e64) {
6220 User.getOperand(5).setReg(RI.getVCC());
6221 // No need to add to Worklist.
6222 }
6223 }
6224 CopyToDelete.push_back(&MI);
6225 } else {
6226 if (MI.getOpcode() == AMDGPU::S_CSELECT_B32 ||
6227 MI.getOpcode() == AMDGPU::S_CSELECT_B64) {
6228 // This is an implicit use of SCC and it is really expected by
6229 // the SCC users to handle.
6230 // We cannot preserve the edge to the user so add the explicit
6231 // copy: SCC = COPY VCC.
6232 // The copy will be cleaned up during the processing of the user
6233 // in lowerSelect.
6234 SCCUsedImplicitly = true;
6235 }
6236
6237 Worklist.insert(&MI);
6238 }
6239 }
6240 // Exit if we find another SCC def.
6241 if (MI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != -1)
6242 break;
6243 }
6244 for (auto &Copy : CopyToDelete)
6245 Copy->eraseFromParent();
6246
6247 if (SCCUsedImplicitly) {
6248 BuildMI(*SCCDefInst.getParent(), std::next(SCCDefInst.getIterator()),
6249 SCCDefInst.getDebugLoc(), get(AMDGPU::COPY), AMDGPU::SCC)
6250 .addReg(RI.getVCC());
6251 }
6252 }
6253
getDestEquivalentVGPRClass(const MachineInstr & Inst) const6254 const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass(
6255 const MachineInstr &Inst) const {
6256 const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0);
6257
6258 switch (Inst.getOpcode()) {
6259 // For target instructions, getOpRegClass just returns the virtual register
6260 // class associated with the operand, so we need to find an equivalent VGPR
6261 // register class in order to move the instruction to the VALU.
6262 case AMDGPU::COPY:
6263 case AMDGPU::PHI:
6264 case AMDGPU::REG_SEQUENCE:
6265 case AMDGPU::INSERT_SUBREG:
6266 case AMDGPU::WQM:
6267 case AMDGPU::SOFT_WQM:
6268 case AMDGPU::WWM: {
6269 const TargetRegisterClass *SrcRC = getOpRegClass(Inst, 1);
6270 if (RI.hasAGPRs(SrcRC)) {
6271 if (RI.hasAGPRs(NewDstRC))
6272 return nullptr;
6273
6274 switch (Inst.getOpcode()) {
6275 case AMDGPU::PHI:
6276 case AMDGPU::REG_SEQUENCE:
6277 case AMDGPU::INSERT_SUBREG:
6278 NewDstRC = RI.getEquivalentAGPRClass(NewDstRC);
6279 break;
6280 default:
6281 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC);
6282 }
6283
6284 if (!NewDstRC)
6285 return nullptr;
6286 } else {
6287 if (RI.hasVGPRs(NewDstRC) || NewDstRC == &AMDGPU::VReg_1RegClass)
6288 return nullptr;
6289
6290 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC);
6291 if (!NewDstRC)
6292 return nullptr;
6293 }
6294
6295 return NewDstRC;
6296 }
6297 default:
6298 return NewDstRC;
6299 }
6300 }
6301
6302 // Find the one SGPR operand we are allowed to use.
findUsedSGPR(const MachineInstr & MI,int OpIndices[3]) const6303 Register SIInstrInfo::findUsedSGPR(const MachineInstr &MI,
6304 int OpIndices[3]) const {
6305 const MCInstrDesc &Desc = MI.getDesc();
6306
6307 // Find the one SGPR operand we are allowed to use.
6308 //
6309 // First we need to consider the instruction's operand requirements before
6310 // legalizing. Some operands are required to be SGPRs, such as implicit uses
6311 // of VCC, but we are still bound by the constant bus requirement to only use
6312 // one.
6313 //
6314 // If the operand's class is an SGPR, we can never move it.
6315
6316 Register SGPRReg = findImplicitSGPRRead(MI);
6317 if (SGPRReg != AMDGPU::NoRegister)
6318 return SGPRReg;
6319
6320 Register UsedSGPRs[3] = { AMDGPU::NoRegister };
6321 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
6322
6323 for (unsigned i = 0; i < 3; ++i) {
6324 int Idx = OpIndices[i];
6325 if (Idx == -1)
6326 break;
6327
6328 const MachineOperand &MO = MI.getOperand(Idx);
6329 if (!MO.isReg())
6330 continue;
6331
6332 // Is this operand statically required to be an SGPR based on the operand
6333 // constraints?
6334 const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass);
6335 bool IsRequiredSGPR = RI.isSGPRClass(OpRC);
6336 if (IsRequiredSGPR)
6337 return MO.getReg();
6338
6339 // If this could be a VGPR or an SGPR, Check the dynamic register class.
6340 Register Reg = MO.getReg();
6341 const TargetRegisterClass *RegRC = MRI.getRegClass(Reg);
6342 if (RI.isSGPRClass(RegRC))
6343 UsedSGPRs[i] = Reg;
6344 }
6345
6346 // We don't have a required SGPR operand, so we have a bit more freedom in
6347 // selecting operands to move.
6348
6349 // Try to select the most used SGPR. If an SGPR is equal to one of the
6350 // others, we choose that.
6351 //
6352 // e.g.
6353 // V_FMA_F32 v0, s0, s0, s0 -> No moves
6354 // V_FMA_F32 v0, s0, s1, s0 -> Move s1
6355
6356 // TODO: If some of the operands are 64-bit SGPRs and some 32, we should
6357 // prefer those.
6358
6359 if (UsedSGPRs[0] != AMDGPU::NoRegister) {
6360 if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2])
6361 SGPRReg = UsedSGPRs[0];
6362 }
6363
6364 if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) {
6365 if (UsedSGPRs[1] == UsedSGPRs[2])
6366 SGPRReg = UsedSGPRs[1];
6367 }
6368
6369 return SGPRReg;
6370 }
6371
getNamedOperand(MachineInstr & MI,unsigned OperandName) const6372 MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI,
6373 unsigned OperandName) const {
6374 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName);
6375 if (Idx == -1)
6376 return nullptr;
6377
6378 return &MI.getOperand(Idx);
6379 }
6380
getDefaultRsrcDataFormat() const6381 uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const {
6382 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) {
6383 return (22ULL << 44) | // IMG_FORMAT_32_FLOAT
6384 (1ULL << 56) | // RESOURCE_LEVEL = 1
6385 (3ULL << 60); // OOB_SELECT = 3
6386 }
6387
6388 uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT;
6389 if (ST.isAmdHsaOS()) {
6390 // Set ATC = 1. GFX9 doesn't have this bit.
6391 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS)
6392 RsrcDataFormat |= (1ULL << 56);
6393
6394 // Set MTYPE = 2 (MTYPE_UC = uncached). GFX9 doesn't have this.
6395 // BTW, it disables TC L2 and therefore decreases performance.
6396 if (ST.getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS)
6397 RsrcDataFormat |= (2ULL << 59);
6398 }
6399
6400 return RsrcDataFormat;
6401 }
6402
getScratchRsrcWords23() const6403 uint64_t SIInstrInfo::getScratchRsrcWords23() const {
6404 uint64_t Rsrc23 = getDefaultRsrcDataFormat() |
6405 AMDGPU::RSRC_TID_ENABLE |
6406 0xffffffff; // Size;
6407
6408 // GFX9 doesn't have ELEMENT_SIZE.
6409 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
6410 uint64_t EltSizeValue = Log2_32(ST.getMaxPrivateElementSize()) - 1;
6411 Rsrc23 |= EltSizeValue << AMDGPU::RSRC_ELEMENT_SIZE_SHIFT;
6412 }
6413
6414 // IndexStride = 64 / 32.
6415 uint64_t IndexStride = ST.getWavefrontSize() == 64 ? 3 : 2;
6416 Rsrc23 |= IndexStride << AMDGPU::RSRC_INDEX_STRIDE_SHIFT;
6417
6418 // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17].
6419 // Clear them unless we want a huge stride.
6420 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS &&
6421 ST.getGeneration() <= AMDGPUSubtarget::GFX9)
6422 Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT;
6423
6424 return Rsrc23;
6425 }
6426
isLowLatencyInstruction(const MachineInstr & MI) const6427 bool SIInstrInfo::isLowLatencyInstruction(const MachineInstr &MI) const {
6428 unsigned Opc = MI.getOpcode();
6429
6430 return isSMRD(Opc);
6431 }
6432
isHighLatencyDef(int Opc) const6433 bool SIInstrInfo::isHighLatencyDef(int Opc) const {
6434 return get(Opc).mayLoad() &&
6435 (isMUBUF(Opc) || isMTBUF(Opc) || isMIMG(Opc) || isFLAT(Opc));
6436 }
6437
isStackAccess(const MachineInstr & MI,int & FrameIndex) const6438 unsigned SIInstrInfo::isStackAccess(const MachineInstr &MI,
6439 int &FrameIndex) const {
6440 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::vaddr);
6441 if (!Addr || !Addr->isFI())
6442 return AMDGPU::NoRegister;
6443
6444 assert(!MI.memoperands_empty() &&
6445 (*MI.memoperands_begin())->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS);
6446
6447 FrameIndex = Addr->getIndex();
6448 return getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg();
6449 }
6450
isSGPRStackAccess(const MachineInstr & MI,int & FrameIndex) const6451 unsigned SIInstrInfo::isSGPRStackAccess(const MachineInstr &MI,
6452 int &FrameIndex) const {
6453 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::addr);
6454 assert(Addr && Addr->isFI());
6455 FrameIndex = Addr->getIndex();
6456 return getNamedOperand(MI, AMDGPU::OpName::data)->getReg();
6457 }
6458
isLoadFromStackSlot(const MachineInstr & MI,int & FrameIndex) const6459 unsigned SIInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
6460 int &FrameIndex) const {
6461 if (!MI.mayLoad())
6462 return AMDGPU::NoRegister;
6463
6464 if (isMUBUF(MI) || isVGPRSpill(MI))
6465 return isStackAccess(MI, FrameIndex);
6466
6467 if (isSGPRSpill(MI))
6468 return isSGPRStackAccess(MI, FrameIndex);
6469
6470 return AMDGPU::NoRegister;
6471 }
6472
isStoreToStackSlot(const MachineInstr & MI,int & FrameIndex) const6473 unsigned SIInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
6474 int &FrameIndex) const {
6475 if (!MI.mayStore())
6476 return AMDGPU::NoRegister;
6477
6478 if (isMUBUF(MI) || isVGPRSpill(MI))
6479 return isStackAccess(MI, FrameIndex);
6480
6481 if (isSGPRSpill(MI))
6482 return isSGPRStackAccess(MI, FrameIndex);
6483
6484 return AMDGPU::NoRegister;
6485 }
6486
getInstBundleSize(const MachineInstr & MI) const6487 unsigned SIInstrInfo::getInstBundleSize(const MachineInstr &MI) const {
6488 unsigned Size = 0;
6489 MachineBasicBlock::const_instr_iterator I = MI.getIterator();
6490 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end();
6491 while (++I != E && I->isInsideBundle()) {
6492 assert(!I->isBundle() && "No nested bundle!");
6493 Size += getInstSizeInBytes(*I);
6494 }
6495
6496 return Size;
6497 }
6498
getInstSizeInBytes(const MachineInstr & MI) const6499 unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
6500 unsigned Opc = MI.getOpcode();
6501 const MCInstrDesc &Desc = getMCOpcodeFromPseudo(Opc);
6502 unsigned DescSize = Desc.getSize();
6503
6504 // If we have a definitive size, we can use it. Otherwise we need to inspect
6505 // the operands to know the size.
6506 if (isFixedSize(MI))
6507 return DescSize;
6508
6509 // 4-byte instructions may have a 32-bit literal encoded after them. Check
6510 // operands that coud ever be literals.
6511 if (isVALU(MI) || isSALU(MI)) {
6512 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
6513 if (Src0Idx == -1)
6514 return DescSize; // No operands.
6515
6516 if (isLiteralConstantLike(MI.getOperand(Src0Idx), Desc.OpInfo[Src0Idx]))
6517 return isVOP3(MI) ? 12 : (DescSize + 4);
6518
6519 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
6520 if (Src1Idx == -1)
6521 return DescSize;
6522
6523 if (isLiteralConstantLike(MI.getOperand(Src1Idx), Desc.OpInfo[Src1Idx]))
6524 return isVOP3(MI) ? 12 : (DescSize + 4);
6525
6526 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
6527 if (Src2Idx == -1)
6528 return DescSize;
6529
6530 if (isLiteralConstantLike(MI.getOperand(Src2Idx), Desc.OpInfo[Src2Idx]))
6531 return isVOP3(MI) ? 12 : (DescSize + 4);
6532
6533 return DescSize;
6534 }
6535
6536 // Check whether we have extra NSA words.
6537 if (isMIMG(MI)) {
6538 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0);
6539 if (VAddr0Idx < 0)
6540 return 8;
6541
6542 int RSrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc);
6543 return 8 + 4 * ((RSrcIdx - VAddr0Idx + 2) / 4);
6544 }
6545
6546 switch (Opc) {
6547 case TargetOpcode::IMPLICIT_DEF:
6548 case TargetOpcode::KILL:
6549 case TargetOpcode::DBG_VALUE:
6550 case TargetOpcode::EH_LABEL:
6551 return 0;
6552 case TargetOpcode::BUNDLE:
6553 return getInstBundleSize(MI);
6554 case TargetOpcode::INLINEASM:
6555 case TargetOpcode::INLINEASM_BR: {
6556 const MachineFunction *MF = MI.getParent()->getParent();
6557 const char *AsmStr = MI.getOperand(0).getSymbolName();
6558 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo(),
6559 &MF->getSubtarget());
6560 }
6561 default:
6562 return DescSize;
6563 }
6564 }
6565
mayAccessFlatAddressSpace(const MachineInstr & MI) const6566 bool SIInstrInfo::mayAccessFlatAddressSpace(const MachineInstr &MI) const {
6567 if (!isFLAT(MI))
6568 return false;
6569
6570 if (MI.memoperands_empty())
6571 return true;
6572
6573 for (const MachineMemOperand *MMO : MI.memoperands()) {
6574 if (MMO->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS)
6575 return true;
6576 }
6577 return false;
6578 }
6579
isNonUniformBranchInstr(MachineInstr & Branch) const6580 bool SIInstrInfo::isNonUniformBranchInstr(MachineInstr &Branch) const {
6581 return Branch.getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO;
6582 }
6583
convertNonUniformIfRegion(MachineBasicBlock * IfEntry,MachineBasicBlock * IfEnd) const6584 void SIInstrInfo::convertNonUniformIfRegion(MachineBasicBlock *IfEntry,
6585 MachineBasicBlock *IfEnd) const {
6586 MachineBasicBlock::iterator TI = IfEntry->getFirstTerminator();
6587 assert(TI != IfEntry->end());
6588
6589 MachineInstr *Branch = &(*TI);
6590 MachineFunction *MF = IfEntry->getParent();
6591 MachineRegisterInfo &MRI = IfEntry->getParent()->getRegInfo();
6592
6593 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) {
6594 Register DstReg = MRI.createVirtualRegister(RI.getBoolRC());
6595 MachineInstr *SIIF =
6596 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_IF), DstReg)
6597 .add(Branch->getOperand(0))
6598 .add(Branch->getOperand(1));
6599 MachineInstr *SIEND =
6600 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_END_CF))
6601 .addReg(DstReg);
6602
6603 IfEntry->erase(TI);
6604 IfEntry->insert(IfEntry->end(), SIIF);
6605 IfEnd->insert(IfEnd->getFirstNonPHI(), SIEND);
6606 }
6607 }
6608
convertNonUniformLoopRegion(MachineBasicBlock * LoopEntry,MachineBasicBlock * LoopEnd) const6609 void SIInstrInfo::convertNonUniformLoopRegion(
6610 MachineBasicBlock *LoopEntry, MachineBasicBlock *LoopEnd) const {
6611 MachineBasicBlock::iterator TI = LoopEnd->getFirstTerminator();
6612 // We expect 2 terminators, one conditional and one unconditional.
6613 assert(TI != LoopEnd->end());
6614
6615 MachineInstr *Branch = &(*TI);
6616 MachineFunction *MF = LoopEnd->getParent();
6617 MachineRegisterInfo &MRI = LoopEnd->getParent()->getRegInfo();
6618
6619 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) {
6620
6621 Register DstReg = MRI.createVirtualRegister(RI.getBoolRC());
6622 Register BackEdgeReg = MRI.createVirtualRegister(RI.getBoolRC());
6623 MachineInstrBuilder HeaderPHIBuilder =
6624 BuildMI(*(MF), Branch->getDebugLoc(), get(TargetOpcode::PHI), DstReg);
6625 for (MachineBasicBlock::pred_iterator PI = LoopEntry->pred_begin(),
6626 E = LoopEntry->pred_end();
6627 PI != E; ++PI) {
6628 if (*PI == LoopEnd) {
6629 HeaderPHIBuilder.addReg(BackEdgeReg);
6630 } else {
6631 MachineBasicBlock *PMBB = *PI;
6632 Register ZeroReg = MRI.createVirtualRegister(RI.getBoolRC());
6633 materializeImmediate(*PMBB, PMBB->getFirstTerminator(), DebugLoc(),
6634 ZeroReg, 0);
6635 HeaderPHIBuilder.addReg(ZeroReg);
6636 }
6637 HeaderPHIBuilder.addMBB(*PI);
6638 }
6639 MachineInstr *HeaderPhi = HeaderPHIBuilder;
6640 MachineInstr *SIIFBREAK = BuildMI(*(MF), Branch->getDebugLoc(),
6641 get(AMDGPU::SI_IF_BREAK), BackEdgeReg)
6642 .addReg(DstReg)
6643 .add(Branch->getOperand(0));
6644 MachineInstr *SILOOP =
6645 BuildMI(*(MF), Branch->getDebugLoc(), get(AMDGPU::SI_LOOP))
6646 .addReg(BackEdgeReg)
6647 .addMBB(LoopEntry);
6648
6649 LoopEntry->insert(LoopEntry->begin(), HeaderPhi);
6650 LoopEnd->erase(TI);
6651 LoopEnd->insert(LoopEnd->end(), SIIFBREAK);
6652 LoopEnd->insert(LoopEnd->end(), SILOOP);
6653 }
6654 }
6655
6656 ArrayRef<std::pair<int, const char *>>
getSerializableTargetIndices() const6657 SIInstrInfo::getSerializableTargetIndices() const {
6658 static const std::pair<int, const char *> TargetIndices[] = {
6659 {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"},
6660 {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"},
6661 {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"},
6662 {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"},
6663 {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}};
6664 return makeArrayRef(TargetIndices);
6665 }
6666
6667 /// This is used by the post-RA scheduler (SchedulePostRAList.cpp). The
6668 /// post-RA version of misched uses CreateTargetMIHazardRecognizer.
6669 ScheduleHazardRecognizer *
CreateTargetPostRAHazardRecognizer(const InstrItineraryData * II,const ScheduleDAG * DAG) const6670 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
6671 const ScheduleDAG *DAG) const {
6672 return new GCNHazardRecognizer(DAG->MF);
6673 }
6674
6675 /// This is the hazard recognizer used at -O0 by the PostRAHazardRecognizer
6676 /// pass.
6677 ScheduleHazardRecognizer *
CreateTargetPostRAHazardRecognizer(const MachineFunction & MF) const6678 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const {
6679 return new GCNHazardRecognizer(MF);
6680 }
6681
6682 std::pair<unsigned, unsigned>
decomposeMachineOperandsTargetFlags(unsigned TF) const6683 SIInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
6684 return std::make_pair(TF & MO_MASK, TF & ~MO_MASK);
6685 }
6686
6687 ArrayRef<std::pair<unsigned, const char *>>
getSerializableDirectMachineOperandTargetFlags() const6688 SIInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
6689 static const std::pair<unsigned, const char *> TargetFlags[] = {
6690 { MO_GOTPCREL, "amdgpu-gotprel" },
6691 { MO_GOTPCREL32_LO, "amdgpu-gotprel32-lo" },
6692 { MO_GOTPCREL32_HI, "amdgpu-gotprel32-hi" },
6693 { MO_REL32_LO, "amdgpu-rel32-lo" },
6694 { MO_REL32_HI, "amdgpu-rel32-hi" },
6695 { MO_ABS32_LO, "amdgpu-abs32-lo" },
6696 { MO_ABS32_HI, "amdgpu-abs32-hi" },
6697 };
6698
6699 return makeArrayRef(TargetFlags);
6700 }
6701
isBasicBlockPrologue(const MachineInstr & MI) const6702 bool SIInstrInfo::isBasicBlockPrologue(const MachineInstr &MI) const {
6703 return !MI.isTerminator() && MI.getOpcode() != AMDGPU::COPY &&
6704 MI.modifiesRegister(AMDGPU::EXEC, &RI);
6705 }
6706
6707 MachineInstrBuilder
getAddNoCarry(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,const DebugLoc & DL,Register DestReg) const6708 SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB,
6709 MachineBasicBlock::iterator I,
6710 const DebugLoc &DL,
6711 Register DestReg) const {
6712 if (ST.hasAddNoCarry())
6713 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e64), DestReg);
6714
6715 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
6716 Register UnusedCarry = MRI.createVirtualRegister(RI.getBoolRC());
6717 MRI.setRegAllocationHint(UnusedCarry, 0, RI.getVCC());
6718
6719 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_I32_e64), DestReg)
6720 .addReg(UnusedCarry, RegState::Define | RegState::Dead);
6721 }
6722
getAddNoCarry(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,const DebugLoc & DL,Register DestReg,RegScavenger & RS) const6723 MachineInstrBuilder SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB,
6724 MachineBasicBlock::iterator I,
6725 const DebugLoc &DL,
6726 Register DestReg,
6727 RegScavenger &RS) const {
6728 if (ST.hasAddNoCarry())
6729 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e32), DestReg);
6730
6731 // If available, prefer to use vcc.
6732 Register UnusedCarry = !RS.isRegUsed(AMDGPU::VCC)
6733 ? Register(RI.getVCC())
6734 : RS.scavengeRegister(RI.getBoolRC(), I, 0, false);
6735
6736 // TODO: Users need to deal with this.
6737 if (!UnusedCarry.isValid())
6738 return MachineInstrBuilder();
6739
6740 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_I32_e64), DestReg)
6741 .addReg(UnusedCarry, RegState::Define | RegState::Dead);
6742 }
6743
isKillTerminator(unsigned Opcode)6744 bool SIInstrInfo::isKillTerminator(unsigned Opcode) {
6745 switch (Opcode) {
6746 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR:
6747 case AMDGPU::SI_KILL_I1_TERMINATOR:
6748 return true;
6749 default:
6750 return false;
6751 }
6752 }
6753
getKillTerminatorFromPseudo(unsigned Opcode) const6754 const MCInstrDesc &SIInstrInfo::getKillTerminatorFromPseudo(unsigned Opcode) const {
6755 switch (Opcode) {
6756 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO:
6757 return get(AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR);
6758 case AMDGPU::SI_KILL_I1_PSEUDO:
6759 return get(AMDGPU::SI_KILL_I1_TERMINATOR);
6760 default:
6761 llvm_unreachable("invalid opcode, expected SI_KILL_*_PSEUDO");
6762 }
6763 }
6764
fixImplicitOperands(MachineInstr & MI) const6765 void SIInstrInfo::fixImplicitOperands(MachineInstr &MI) const {
6766 MachineBasicBlock *MBB = MI.getParent();
6767 MachineFunction *MF = MBB->getParent();
6768 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
6769
6770 if (!ST.isWave32())
6771 return;
6772
6773 for (auto &Op : MI.implicit_operands()) {
6774 if (Op.isReg() && Op.getReg() == AMDGPU::VCC)
6775 Op.setReg(AMDGPU::VCC_LO);
6776 }
6777 }
6778
isBufferSMRD(const MachineInstr & MI) const6779 bool SIInstrInfo::isBufferSMRD(const MachineInstr &MI) const {
6780 if (!isSMRD(MI))
6781 return false;
6782
6783 // Check that it is using a buffer resource.
6784 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sbase);
6785 if (Idx == -1) // e.g. s_memtime
6786 return false;
6787
6788 const auto RCID = MI.getDesc().OpInfo[Idx].RegClass;
6789 return RI.getRegClass(RCID)->hasSubClassEq(&AMDGPU::SGPR_128RegClass);
6790 }
6791
getNumFlatOffsetBits(unsigned AddrSpace,bool Signed) const6792 unsigned SIInstrInfo::getNumFlatOffsetBits(unsigned AddrSpace,
6793 bool Signed) const {
6794 if (!ST.hasFlatInstOffsets())
6795 return 0;
6796
6797 if (ST.hasFlatSegmentOffsetBug() && AddrSpace == AMDGPUAS::FLAT_ADDRESS)
6798 return 0;
6799
6800 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10)
6801 return Signed ? 12 : 11;
6802
6803 return Signed ? 13 : 12;
6804 }
6805
isLegalFLATOffset(int64_t Offset,unsigned AddrSpace,bool Signed) const6806 bool SIInstrInfo::isLegalFLATOffset(int64_t Offset, unsigned AddrSpace,
6807 bool Signed) const {
6808 // TODO: Should 0 be special cased?
6809 if (!ST.hasFlatInstOffsets())
6810 return false;
6811
6812 if (ST.hasFlatSegmentOffsetBug() && AddrSpace == AMDGPUAS::FLAT_ADDRESS)
6813 return false;
6814
6815 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) {
6816 return (Signed && isInt<12>(Offset)) ||
6817 (!Signed && isUInt<11>(Offset));
6818 }
6819
6820 return (Signed && isInt<13>(Offset)) ||
6821 (!Signed && isUInt<12>(Offset));
6822 }
6823
6824
6825 // This must be kept in sync with the SIEncodingFamily class in SIInstrInfo.td
6826 enum SIEncodingFamily {
6827 SI = 0,
6828 VI = 1,
6829 SDWA = 2,
6830 SDWA9 = 3,
6831 GFX80 = 4,
6832 GFX9 = 5,
6833 GFX10 = 6,
6834 SDWA10 = 7
6835 };
6836
subtargetEncodingFamily(const GCNSubtarget & ST)6837 static SIEncodingFamily subtargetEncodingFamily(const GCNSubtarget &ST) {
6838 switch (ST.getGeneration()) {
6839 default:
6840 break;
6841 case AMDGPUSubtarget::SOUTHERN_ISLANDS:
6842 case AMDGPUSubtarget::SEA_ISLANDS:
6843 return SIEncodingFamily::SI;
6844 case AMDGPUSubtarget::VOLCANIC_ISLANDS:
6845 case AMDGPUSubtarget::GFX9:
6846 return SIEncodingFamily::VI;
6847 case AMDGPUSubtarget::GFX10:
6848 return SIEncodingFamily::GFX10;
6849 }
6850 llvm_unreachable("Unknown subtarget generation!");
6851 }
6852
isAsmOnlyOpcode(int MCOp) const6853 bool SIInstrInfo::isAsmOnlyOpcode(int MCOp) const {
6854 switch(MCOp) {
6855 // These opcodes use indirect register addressing so
6856 // they need special handling by codegen (currently missing).
6857 // Therefore it is too risky to allow these opcodes
6858 // to be selected by dpp combiner or sdwa peepholer.
6859 case AMDGPU::V_MOVRELS_B32_dpp_gfx10:
6860 case AMDGPU::V_MOVRELS_B32_sdwa_gfx10:
6861 case AMDGPU::V_MOVRELD_B32_dpp_gfx10:
6862 case AMDGPU::V_MOVRELD_B32_sdwa_gfx10:
6863 case AMDGPU::V_MOVRELSD_B32_dpp_gfx10:
6864 case AMDGPU::V_MOVRELSD_B32_sdwa_gfx10:
6865 case AMDGPU::V_MOVRELSD_2_B32_dpp_gfx10:
6866 case AMDGPU::V_MOVRELSD_2_B32_sdwa_gfx10:
6867 return true;
6868 default:
6869 return false;
6870 }
6871 }
6872
pseudoToMCOpcode(int Opcode) const6873 int SIInstrInfo::pseudoToMCOpcode(int Opcode) const {
6874 SIEncodingFamily Gen = subtargetEncodingFamily(ST);
6875
6876 if ((get(Opcode).TSFlags & SIInstrFlags::renamedInGFX9) != 0 &&
6877 ST.getGeneration() == AMDGPUSubtarget::GFX9)
6878 Gen = SIEncodingFamily::GFX9;
6879
6880 // Adjust the encoding family to GFX80 for D16 buffer instructions when the
6881 // subtarget has UnpackedD16VMem feature.
6882 // TODO: remove this when we discard GFX80 encoding.
6883 if (ST.hasUnpackedD16VMem() && (get(Opcode).TSFlags & SIInstrFlags::D16Buf))
6884 Gen = SIEncodingFamily::GFX80;
6885
6886 if (get(Opcode).TSFlags & SIInstrFlags::SDWA) {
6887 switch (ST.getGeneration()) {
6888 default:
6889 Gen = SIEncodingFamily::SDWA;
6890 break;
6891 case AMDGPUSubtarget::GFX9:
6892 Gen = SIEncodingFamily::SDWA9;
6893 break;
6894 case AMDGPUSubtarget::GFX10:
6895 Gen = SIEncodingFamily::SDWA10;
6896 break;
6897 }
6898 }
6899
6900 int MCOp = AMDGPU::getMCOpcode(Opcode, Gen);
6901
6902 // -1 means that Opcode is already a native instruction.
6903 if (MCOp == -1)
6904 return Opcode;
6905
6906 // (uint16_t)-1 means that Opcode is a pseudo instruction that has
6907 // no encoding in the given subtarget generation.
6908 if (MCOp == (uint16_t)-1)
6909 return -1;
6910
6911 if (isAsmOnlyOpcode(MCOp))
6912 return -1;
6913
6914 return MCOp;
6915 }
6916
6917 static
getRegOrUndef(const MachineOperand & RegOpnd)6918 TargetInstrInfo::RegSubRegPair getRegOrUndef(const MachineOperand &RegOpnd) {
6919 assert(RegOpnd.isReg());
6920 return RegOpnd.isUndef() ? TargetInstrInfo::RegSubRegPair() :
6921 getRegSubRegPair(RegOpnd);
6922 }
6923
6924 TargetInstrInfo::RegSubRegPair
getRegSequenceSubReg(MachineInstr & MI,unsigned SubReg)6925 llvm::getRegSequenceSubReg(MachineInstr &MI, unsigned SubReg) {
6926 assert(MI.isRegSequence());
6927 for (unsigned I = 0, E = (MI.getNumOperands() - 1)/ 2; I < E; ++I)
6928 if (MI.getOperand(1 + 2 * I + 1).getImm() == SubReg) {
6929 auto &RegOp = MI.getOperand(1 + 2 * I);
6930 return getRegOrUndef(RegOp);
6931 }
6932 return TargetInstrInfo::RegSubRegPair();
6933 }
6934
6935 // Try to find the definition of reg:subreg in subreg-manipulation pseudos
6936 // Following a subreg of reg:subreg isn't supported
followSubRegDef(MachineInstr & MI,TargetInstrInfo::RegSubRegPair & RSR)6937 static bool followSubRegDef(MachineInstr &MI,
6938 TargetInstrInfo::RegSubRegPair &RSR) {
6939 if (!RSR.SubReg)
6940 return false;
6941 switch (MI.getOpcode()) {
6942 default: break;
6943 case AMDGPU::REG_SEQUENCE:
6944 RSR = getRegSequenceSubReg(MI, RSR.SubReg);
6945 return true;
6946 // EXTRACT_SUBREG ins't supported as this would follow a subreg of subreg
6947 case AMDGPU::INSERT_SUBREG:
6948 if (RSR.SubReg == (unsigned)MI.getOperand(3).getImm())
6949 // inserted the subreg we're looking for
6950 RSR = getRegOrUndef(MI.getOperand(2));
6951 else { // the subreg in the rest of the reg
6952 auto R1 = getRegOrUndef(MI.getOperand(1));
6953 if (R1.SubReg) // subreg of subreg isn't supported
6954 return false;
6955 RSR.Reg = R1.Reg;
6956 }
6957 return true;
6958 }
6959 return false;
6960 }
6961
getVRegSubRegDef(const TargetInstrInfo::RegSubRegPair & P,MachineRegisterInfo & MRI)6962 MachineInstr *llvm::getVRegSubRegDef(const TargetInstrInfo::RegSubRegPair &P,
6963 MachineRegisterInfo &MRI) {
6964 assert(MRI.isSSA());
6965 if (!Register::isVirtualRegister(P.Reg))
6966 return nullptr;
6967
6968 auto RSR = P;
6969 auto *DefInst = MRI.getVRegDef(RSR.Reg);
6970 while (auto *MI = DefInst) {
6971 DefInst = nullptr;
6972 switch (MI->getOpcode()) {
6973 case AMDGPU::COPY:
6974 case AMDGPU::V_MOV_B32_e32: {
6975 auto &Op1 = MI->getOperand(1);
6976 if (Op1.isReg() && Register::isVirtualRegister(Op1.getReg())) {
6977 if (Op1.isUndef())
6978 return nullptr;
6979 RSR = getRegSubRegPair(Op1);
6980 DefInst = MRI.getVRegDef(RSR.Reg);
6981 }
6982 break;
6983 }
6984 default:
6985 if (followSubRegDef(*MI, RSR)) {
6986 if (!RSR.Reg)
6987 return nullptr;
6988 DefInst = MRI.getVRegDef(RSR.Reg);
6989 }
6990 }
6991 if (!DefInst)
6992 return MI;
6993 }
6994 return nullptr;
6995 }
6996
execMayBeModifiedBeforeUse(const MachineRegisterInfo & MRI,Register VReg,const MachineInstr & DefMI,const MachineInstr & UseMI)6997 bool llvm::execMayBeModifiedBeforeUse(const MachineRegisterInfo &MRI,
6998 Register VReg,
6999 const MachineInstr &DefMI,
7000 const MachineInstr &UseMI) {
7001 assert(MRI.isSSA() && "Must be run on SSA");
7002
7003 auto *TRI = MRI.getTargetRegisterInfo();
7004 auto *DefBB = DefMI.getParent();
7005
7006 // Don't bother searching between blocks, although it is possible this block
7007 // doesn't modify exec.
7008 if (UseMI.getParent() != DefBB)
7009 return true;
7010
7011 const int MaxInstScan = 20;
7012 int NumInst = 0;
7013
7014 // Stop scan at the use.
7015 auto E = UseMI.getIterator();
7016 for (auto I = std::next(DefMI.getIterator()); I != E; ++I) {
7017 if (I->isDebugInstr())
7018 continue;
7019
7020 if (++NumInst > MaxInstScan)
7021 return true;
7022
7023 if (I->modifiesRegister(AMDGPU::EXEC, TRI))
7024 return true;
7025 }
7026
7027 return false;
7028 }
7029
execMayBeModifiedBeforeAnyUse(const MachineRegisterInfo & MRI,Register VReg,const MachineInstr & DefMI)7030 bool llvm::execMayBeModifiedBeforeAnyUse(const MachineRegisterInfo &MRI,
7031 Register VReg,
7032 const MachineInstr &DefMI) {
7033 assert(MRI.isSSA() && "Must be run on SSA");
7034
7035 auto *TRI = MRI.getTargetRegisterInfo();
7036 auto *DefBB = DefMI.getParent();
7037
7038 const int MaxUseInstScan = 10;
7039 int NumUseInst = 0;
7040
7041 for (auto &UseInst : MRI.use_nodbg_instructions(VReg)) {
7042 // Don't bother searching between blocks, although it is possible this block
7043 // doesn't modify exec.
7044 if (UseInst.getParent() != DefBB)
7045 return true;
7046
7047 if (++NumUseInst > MaxUseInstScan)
7048 return true;
7049 }
7050
7051 const int MaxInstScan = 20;
7052 int NumInst = 0;
7053
7054 // Stop scan when we have seen all the uses.
7055 for (auto I = std::next(DefMI.getIterator()); ; ++I) {
7056 if (I->isDebugInstr())
7057 continue;
7058
7059 if (++NumInst > MaxInstScan)
7060 return true;
7061
7062 if (I->readsRegister(VReg))
7063 if (--NumUseInst == 0)
7064 return false;
7065
7066 if (I->modifiesRegister(AMDGPU::EXEC, TRI))
7067 return true;
7068 }
7069 }
7070
createPHIDestinationCopy(MachineBasicBlock & MBB,MachineBasicBlock::iterator LastPHIIt,const DebugLoc & DL,Register Src,Register Dst) const7071 MachineInstr *SIInstrInfo::createPHIDestinationCopy(
7072 MachineBasicBlock &MBB, MachineBasicBlock::iterator LastPHIIt,
7073 const DebugLoc &DL, Register Src, Register Dst) const {
7074 auto Cur = MBB.begin();
7075 if (Cur != MBB.end())
7076 do {
7077 if (!Cur->isPHI() && Cur->readsRegister(Dst))
7078 return BuildMI(MBB, Cur, DL, get(TargetOpcode::COPY), Dst).addReg(Src);
7079 ++Cur;
7080 } while (Cur != MBB.end() && Cur != LastPHIIt);
7081
7082 return TargetInstrInfo::createPHIDestinationCopy(MBB, LastPHIIt, DL, Src,
7083 Dst);
7084 }
7085
createPHISourceCopy(MachineBasicBlock & MBB,MachineBasicBlock::iterator InsPt,const DebugLoc & DL,Register Src,unsigned SrcSubReg,Register Dst) const7086 MachineInstr *SIInstrInfo::createPHISourceCopy(
7087 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt,
7088 const DebugLoc &DL, Register Src, unsigned SrcSubReg, Register Dst) const {
7089 if (InsPt != MBB.end() &&
7090 (InsPt->getOpcode() == AMDGPU::SI_IF ||
7091 InsPt->getOpcode() == AMDGPU::SI_ELSE ||
7092 InsPt->getOpcode() == AMDGPU::SI_IF_BREAK) &&
7093 InsPt->definesRegister(Src)) {
7094 InsPt++;
7095 return BuildMI(MBB, InsPt, DL,
7096 get(ST.isWave32() ? AMDGPU::S_MOV_B32_term
7097 : AMDGPU::S_MOV_B64_term),
7098 Dst)
7099 .addReg(Src, 0, SrcSubReg)
7100 .addReg(AMDGPU::EXEC, RegState::Implicit);
7101 }
7102 return TargetInstrInfo::createPHISourceCopy(MBB, InsPt, DL, Src, SrcSubReg,
7103 Dst);
7104 }
7105
isWave32() const7106 bool llvm::SIInstrInfo::isWave32() const { return ST.isWave32(); }
7107
foldMemoryOperandImpl(MachineFunction & MF,MachineInstr & MI,ArrayRef<unsigned> Ops,MachineBasicBlock::iterator InsertPt,int FrameIndex,LiveIntervals * LIS,VirtRegMap * VRM) const7108 MachineInstr *SIInstrInfo::foldMemoryOperandImpl(
7109 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
7110 MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS,
7111 VirtRegMap *VRM) const {
7112 // This is a bit of a hack (copied from AArch64). Consider this instruction:
7113 //
7114 // %0:sreg_32 = COPY $m0
7115 //
7116 // We explicitly chose SReg_32 for the virtual register so such a copy might
7117 // be eliminated by RegisterCoalescer. However, that may not be possible, and
7118 // %0 may even spill. We can't spill $m0 normally (it would require copying to
7119 // a numbered SGPR anyway), and since it is in the SReg_32 register class,
7120 // TargetInstrInfo::foldMemoryOperand() is going to try.
7121 // A similar issue also exists with spilling and reloading $exec registers.
7122 //
7123 // To prevent that, constrain the %0 register class here.
7124 if (MI.isFullCopy()) {
7125 Register DstReg = MI.getOperand(0).getReg();
7126 Register SrcReg = MI.getOperand(1).getReg();
7127 if ((DstReg.isVirtual() || SrcReg.isVirtual()) &&
7128 (DstReg.isVirtual() != SrcReg.isVirtual())) {
7129 MachineRegisterInfo &MRI = MF.getRegInfo();
7130 Register VirtReg = DstReg.isVirtual() ? DstReg : SrcReg;
7131 const TargetRegisterClass *RC = MRI.getRegClass(VirtReg);
7132 if (RC->hasSuperClassEq(&AMDGPU::SReg_32RegClass)) {
7133 MRI.constrainRegClass(VirtReg, &AMDGPU::SReg_32_XM0_XEXECRegClass);
7134 return nullptr;
7135 } else if (RC->hasSuperClassEq(&AMDGPU::SReg_64RegClass)) {
7136 MRI.constrainRegClass(VirtReg, &AMDGPU::SReg_64_XEXECRegClass);
7137 return nullptr;
7138 }
7139 }
7140 }
7141
7142 return nullptr;
7143 }
7144
getInstrLatency(const InstrItineraryData * ItinData,const MachineInstr & MI,unsigned * PredCost) const7145 unsigned SIInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
7146 const MachineInstr &MI,
7147 unsigned *PredCost) const {
7148 if (MI.isBundle()) {
7149 MachineBasicBlock::const_instr_iterator I(MI.getIterator());
7150 MachineBasicBlock::const_instr_iterator E(MI.getParent()->instr_end());
7151 unsigned Lat = 0, Count = 0;
7152 for (++I; I != E && I->isBundledWithPred(); ++I) {
7153 ++Count;
7154 Lat = std::max(Lat, SchedModel.computeInstrLatency(&*I));
7155 }
7156 return Lat + Count - 1;
7157 }
7158
7159 return SchedModel.computeInstrLatency(&MI);
7160 }
7161