1 //===-- GCNHazardRecognizers.cpp - GCN Hazard Recognizer Impls ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements hazard recognizers for scheduling on GCN processors.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "GCNHazardRecognizer.h"
14 #include "GCNSubtarget.h"
15 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
16 #include "SIMachineFunctionInfo.h"
17 #include "llvm/CodeGen/MachineFunction.h"
18 #include "llvm/CodeGen/ScheduleDAG.h"
19 #include "llvm/TargetParser/TargetParser.h"
20
21 using namespace llvm;
22
23 namespace {
24
25 struct MFMAPaddingRatioParser : public cl::parser<unsigned> {
MFMAPaddingRatioParser__anona96386c50111::MFMAPaddingRatioParser26 MFMAPaddingRatioParser(cl::Option &O) : cl::parser<unsigned>(O) {}
27
parse__anona96386c50111::MFMAPaddingRatioParser28 bool parse(cl::Option &O, StringRef ArgName, StringRef Arg, unsigned &Value) {
29 if (Arg.getAsInteger(0, Value))
30 return O.error("'" + Arg + "' value invalid for uint argument!");
31
32 if (Value > 100)
33 return O.error("'" + Arg + "' value must be in the range [0, 100]!");
34
35 return false;
36 }
37 };
38
39 } // end anonymous namespace
40
41 static cl::opt<unsigned, false, MFMAPaddingRatioParser>
42 MFMAPaddingRatio("amdgpu-mfma-padding-ratio", cl::init(0), cl::Hidden,
43 cl::desc("Fill a percentage of the latency between "
44 "neighboring MFMA with s_nops."));
45
46 //===----------------------------------------------------------------------===//
47 // Hazard Recognizer Implementation
48 //===----------------------------------------------------------------------===//
49
50 static bool shouldRunLdsBranchVmemWARHazardFixup(const MachineFunction &MF,
51 const GCNSubtarget &ST);
52
GCNHazardRecognizer(const MachineFunction & MF)53 GCNHazardRecognizer::GCNHazardRecognizer(const MachineFunction &MF) :
54 IsHazardRecognizerMode(false),
55 CurrCycleInstr(nullptr),
56 MF(MF),
57 ST(MF.getSubtarget<GCNSubtarget>()),
58 TII(*ST.getInstrInfo()),
59 TRI(TII.getRegisterInfo()),
60 ClauseUses(TRI.getNumRegUnits()),
61 ClauseDefs(TRI.getNumRegUnits()) {
62 MaxLookAhead = MF.getRegInfo().isPhysRegUsed(AMDGPU::AGPR0) ? 19 : 5;
63 TSchedModel.init(&ST);
64 RunLdsBranchVmemWARHazardFixup = shouldRunLdsBranchVmemWARHazardFixup(MF, ST);
65 }
66
Reset()67 void GCNHazardRecognizer::Reset() {
68 EmittedInstrs.clear();
69 }
70
EmitInstruction(SUnit * SU)71 void GCNHazardRecognizer::EmitInstruction(SUnit *SU) {
72 EmitInstruction(SU->getInstr());
73 }
74
EmitInstruction(MachineInstr * MI)75 void GCNHazardRecognizer::EmitInstruction(MachineInstr *MI) {
76 CurrCycleInstr = MI;
77 }
78
isDivFMas(unsigned Opcode)79 static bool isDivFMas(unsigned Opcode) {
80 return Opcode == AMDGPU::V_DIV_FMAS_F32_e64 || Opcode == AMDGPU::V_DIV_FMAS_F64_e64;
81 }
82
isSGetReg(unsigned Opcode)83 static bool isSGetReg(unsigned Opcode) {
84 return Opcode == AMDGPU::S_GETREG_B32;
85 }
86
isSSetReg(unsigned Opcode)87 static bool isSSetReg(unsigned Opcode) {
88 switch (Opcode) {
89 case AMDGPU::S_SETREG_B32:
90 case AMDGPU::S_SETREG_B32_mode:
91 case AMDGPU::S_SETREG_IMM32_B32:
92 case AMDGPU::S_SETREG_IMM32_B32_mode:
93 return true;
94 }
95 return false;
96 }
97
isRWLane(unsigned Opcode)98 static bool isRWLane(unsigned Opcode) {
99 return Opcode == AMDGPU::V_READLANE_B32 || Opcode == AMDGPU::V_WRITELANE_B32;
100 }
101
isRFE(unsigned Opcode)102 static bool isRFE(unsigned Opcode) {
103 return Opcode == AMDGPU::S_RFE_B64;
104 }
105
isSMovRel(unsigned Opcode)106 static bool isSMovRel(unsigned Opcode) {
107 switch (Opcode) {
108 case AMDGPU::S_MOVRELS_B32:
109 case AMDGPU::S_MOVRELS_B64:
110 case AMDGPU::S_MOVRELD_B32:
111 case AMDGPU::S_MOVRELD_B64:
112 return true;
113 default:
114 return false;
115 }
116 }
117
isDGEMM(unsigned Opcode)118 static bool isDGEMM(unsigned Opcode) {
119 return AMDGPU::getMAIIsDGEMM(Opcode);
120 }
121
isXDL(const GCNSubtarget & ST,const MachineInstr & MI)122 static bool isXDL(const GCNSubtarget &ST, const MachineInstr &MI) {
123 unsigned Opcode = MI.getOpcode();
124
125 if (!SIInstrInfo::isMAI(MI) ||
126 isDGEMM(Opcode) ||
127 Opcode == AMDGPU::V_ACCVGPR_WRITE_B32_e64 ||
128 Opcode == AMDGPU::V_ACCVGPR_READ_B32_e64)
129 return false;
130
131 if (!ST.hasGFX940Insts())
132 return true;
133
134 return AMDGPU::getMAIIsGFX940XDL(Opcode);
135 }
136
isSendMsgTraceDataOrGDS(const SIInstrInfo & TII,const MachineInstr & MI)137 static bool isSendMsgTraceDataOrGDS(const SIInstrInfo &TII,
138 const MachineInstr &MI) {
139 if (TII.isAlwaysGDS(MI.getOpcode()))
140 return true;
141
142 switch (MI.getOpcode()) {
143 case AMDGPU::S_SENDMSG:
144 case AMDGPU::S_SENDMSGHALT:
145 case AMDGPU::S_TTRACEDATA:
146 return true;
147 // These DS opcodes don't support GDS.
148 case AMDGPU::DS_NOP:
149 case AMDGPU::DS_PERMUTE_B32:
150 case AMDGPU::DS_BPERMUTE_B32:
151 return false;
152 default:
153 if (TII.isDS(MI.getOpcode())) {
154 int GDS = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
155 AMDGPU::OpName::gds);
156 if (MI.getOperand(GDS).getImm())
157 return true;
158 }
159 return false;
160 }
161 }
162
isPermlane(const MachineInstr & MI)163 static bool isPermlane(const MachineInstr &MI) {
164 unsigned Opcode = MI.getOpcode();
165 return Opcode == AMDGPU::V_PERMLANE16_B32_e64 ||
166 Opcode == AMDGPU::V_PERMLANE64_B32 ||
167 Opcode == AMDGPU::V_PERMLANEX16_B32_e64 ||
168 Opcode == AMDGPU::V_PERMLANE16_VAR_B32_e64 ||
169 Opcode == AMDGPU::V_PERMLANEX16_VAR_B32_e64;
170 }
171
isLdsDma(const MachineInstr & MI)172 static bool isLdsDma(const MachineInstr &MI) {
173 return SIInstrInfo::isVALU(MI) &&
174 (SIInstrInfo::isMUBUF(MI) || SIInstrInfo::isFLAT(MI));
175 }
176
getHWReg(const SIInstrInfo * TII,const MachineInstr & RegInstr)177 static unsigned getHWReg(const SIInstrInfo *TII, const MachineInstr &RegInstr) {
178 const MachineOperand *RegOp = TII->getNamedOperand(RegInstr,
179 AMDGPU::OpName::simm16);
180 return RegOp->getImm() & AMDGPU::Hwreg::ID_MASK_;
181 }
182
183 ScheduleHazardRecognizer::HazardType
getHazardType(SUnit * SU,int Stalls)184 GCNHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
185 MachineInstr *MI = SU->getInstr();
186 // If we are not in "HazardRecognizerMode" and therefore not being run from
187 // the scheduler, track possible stalls from hazards but don't insert noops.
188 auto HazardType = IsHazardRecognizerMode ? NoopHazard : Hazard;
189
190 if (MI->isBundle())
191 return NoHazard;
192
193 if (SIInstrInfo::isSMRD(*MI) && checkSMRDHazards(MI) > 0)
194 return HazardType;
195
196 if (ST.hasNSAtoVMEMBug() && checkNSAtoVMEMHazard(MI) > 0)
197 return HazardType;
198
199 if (checkFPAtomicToDenormModeHazard(MI) > 0)
200 return HazardType;
201
202 if (ST.hasNoDataDepHazard())
203 return NoHazard;
204
205 // FIXME: Should flat be considered vmem?
206 if ((SIInstrInfo::isVMEM(*MI) ||
207 SIInstrInfo::isFLAT(*MI))
208 && checkVMEMHazards(MI) > 0)
209 return HazardType;
210
211 if (SIInstrInfo::isVALU(*MI) && checkVALUHazards(MI) > 0)
212 return HazardType;
213
214 if (SIInstrInfo::isDPP(*MI) && checkDPPHazards(MI) > 0)
215 return HazardType;
216
217 if (isDivFMas(MI->getOpcode()) && checkDivFMasHazards(MI) > 0)
218 return HazardType;
219
220 if (isRWLane(MI->getOpcode()) && checkRWLaneHazards(MI) > 0)
221 return HazardType;
222
223 if ((SIInstrInfo::isVALU(*MI) || SIInstrInfo::isVMEM(*MI) ||
224 SIInstrInfo::isFLAT(*MI) || SIInstrInfo::isDS(*MI) ||
225 SIInstrInfo::isEXP(*MI)) && checkMAIVALUHazards(MI) > 0)
226 return HazardType;
227
228 if (isSGetReg(MI->getOpcode()) && checkGetRegHazards(MI) > 0)
229 return HazardType;
230
231 if (isSSetReg(MI->getOpcode()) && checkSetRegHazards(MI) > 0)
232 return HazardType;
233
234 if (isRFE(MI->getOpcode()) && checkRFEHazards(MI) > 0)
235 return HazardType;
236
237 if (((ST.hasReadM0MovRelInterpHazard() &&
238 (TII.isVINTRP(*MI) || isSMovRel(MI->getOpcode()) ||
239 MI->getOpcode() == AMDGPU::DS_WRITE_ADDTID_B32 ||
240 MI->getOpcode() == AMDGPU::DS_READ_ADDTID_B32)) ||
241 (ST.hasReadM0SendMsgHazard() && isSendMsgTraceDataOrGDS(TII, *MI)) ||
242 (ST.hasReadM0LdsDmaHazard() && isLdsDma(*MI)) ||
243 (ST.hasReadM0LdsDirectHazard() &&
244 MI->readsRegister(AMDGPU::LDS_DIRECT))) &&
245 checkReadM0Hazards(MI) > 0)
246 return HazardType;
247
248 if (SIInstrInfo::isMAI(*MI) && checkMAIHazards(MI) > 0)
249 return HazardType;
250
251 if ((SIInstrInfo::isVMEM(*MI) ||
252 SIInstrInfo::isFLAT(*MI) ||
253 SIInstrInfo::isDS(*MI)) && checkMAILdStHazards(MI) > 0)
254 return HazardType;
255
256 if (MI->isInlineAsm() && checkInlineAsmHazards(MI) > 0)
257 return HazardType;
258
259 return NoHazard;
260 }
261
insertNoopsInBundle(MachineInstr * MI,const SIInstrInfo & TII,unsigned Quantity)262 static void insertNoopsInBundle(MachineInstr *MI, const SIInstrInfo &TII,
263 unsigned Quantity) {
264 while (Quantity > 0) {
265 unsigned Arg = std::min(Quantity, 8u);
266 Quantity -= Arg;
267 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII.get(AMDGPU::S_NOP))
268 .addImm(Arg - 1);
269 }
270 }
271
272 unsigned
getMFMAPipelineWaitStates(const MachineInstr & MI) const273 GCNHazardRecognizer::getMFMAPipelineWaitStates(const MachineInstr &MI) const {
274 const MCSchedClassDesc *SC = TSchedModel.resolveSchedClass(&MI);
275 assert(TSchedModel.getWriteProcResBegin(SC) !=
276 TSchedModel.getWriteProcResEnd(SC));
277 return TSchedModel.getWriteProcResBegin(SC)->ReleaseAtCycle;
278 }
279
processBundle()280 void GCNHazardRecognizer::processBundle() {
281 MachineBasicBlock::instr_iterator MI = std::next(CurrCycleInstr->getIterator());
282 MachineBasicBlock::instr_iterator E = CurrCycleInstr->getParent()->instr_end();
283 // Check bundled MachineInstr's for hazards.
284 for (; MI != E && MI->isInsideBundle(); ++MI) {
285 CurrCycleInstr = &*MI;
286 unsigned WaitStates = PreEmitNoopsCommon(CurrCycleInstr);
287
288 if (IsHazardRecognizerMode) {
289 fixHazards(CurrCycleInstr);
290
291 insertNoopsInBundle(CurrCycleInstr, TII, WaitStates);
292 }
293
294 // It’s unnecessary to track more than MaxLookAhead instructions. Since we
295 // include the bundled MI directly after, only add a maximum of
296 // (MaxLookAhead - 1) noops to EmittedInstrs.
297 for (unsigned i = 0, e = std::min(WaitStates, MaxLookAhead - 1); i < e; ++i)
298 EmittedInstrs.push_front(nullptr);
299
300 EmittedInstrs.push_front(CurrCycleInstr);
301 EmittedInstrs.resize(MaxLookAhead);
302 }
303 CurrCycleInstr = nullptr;
304 }
305
runOnInstruction(MachineInstr * MI)306 void GCNHazardRecognizer::runOnInstruction(MachineInstr *MI) {
307 assert(IsHazardRecognizerMode);
308
309 unsigned NumPreNoops = PreEmitNoops(MI);
310 EmitNoops(NumPreNoops);
311 if (MI->isInsideBundle())
312 insertNoopsInBundle(MI, TII, NumPreNoops);
313 else
314 TII.insertNoops(*MI->getParent(), MachineBasicBlock::iterator(MI),
315 NumPreNoops);
316 EmitInstruction(MI);
317 AdvanceCycle();
318 }
319
PreEmitNoops(MachineInstr * MI)320 unsigned GCNHazardRecognizer::PreEmitNoops(MachineInstr *MI) {
321 IsHazardRecognizerMode = true;
322 CurrCycleInstr = MI;
323 unsigned W = PreEmitNoopsCommon(MI);
324 fixHazards(MI);
325 CurrCycleInstr = nullptr;
326 return W;
327 }
328
PreEmitNoopsCommon(MachineInstr * MI)329 unsigned GCNHazardRecognizer::PreEmitNoopsCommon(MachineInstr *MI) {
330 if (MI->isBundle())
331 return 0;
332
333 int WaitStates = 0;
334
335 if (SIInstrInfo::isSMRD(*MI))
336 return std::max(WaitStates, checkSMRDHazards(MI));
337
338 if (ST.hasNSAtoVMEMBug())
339 WaitStates = std::max(WaitStates, checkNSAtoVMEMHazard(MI));
340
341 WaitStates = std::max(WaitStates, checkFPAtomicToDenormModeHazard(MI));
342
343 if (ST.hasNoDataDepHazard())
344 return WaitStates;
345
346 if (SIInstrInfo::isVMEM(*MI) || SIInstrInfo::isFLAT(*MI))
347 WaitStates = std::max(WaitStates, checkVMEMHazards(MI));
348
349 if (SIInstrInfo::isVALU(*MI))
350 WaitStates = std::max(WaitStates, checkVALUHazards(MI));
351
352 if (SIInstrInfo::isDPP(*MI))
353 WaitStates = std::max(WaitStates, checkDPPHazards(MI));
354
355 if (isDivFMas(MI->getOpcode()))
356 WaitStates = std::max(WaitStates, checkDivFMasHazards(MI));
357
358 if (isRWLane(MI->getOpcode()))
359 WaitStates = std::max(WaitStates, checkRWLaneHazards(MI));
360
361 if ((SIInstrInfo::isVALU(*MI) || SIInstrInfo::isVMEM(*MI) ||
362 SIInstrInfo::isFLAT(*MI) || SIInstrInfo::isDS(*MI) ||
363 SIInstrInfo::isEXP(*MI)) && checkMAIVALUHazards(MI) > 0)
364 WaitStates = std::max(WaitStates, checkMAIVALUHazards(MI));
365
366 if (MI->isInlineAsm())
367 return std::max(WaitStates, checkInlineAsmHazards(MI));
368
369 if (isSGetReg(MI->getOpcode()))
370 return std::max(WaitStates, checkGetRegHazards(MI));
371
372 if (isSSetReg(MI->getOpcode()))
373 return std::max(WaitStates, checkSetRegHazards(MI));
374
375 if (isRFE(MI->getOpcode()))
376 return std::max(WaitStates, checkRFEHazards(MI));
377
378 if ((ST.hasReadM0MovRelInterpHazard() &&
379 (TII.isVINTRP(*MI) || isSMovRel(MI->getOpcode()) ||
380 MI->getOpcode() == AMDGPU::DS_WRITE_ADDTID_B32 ||
381 MI->getOpcode() == AMDGPU::DS_READ_ADDTID_B32)) ||
382 (ST.hasReadM0SendMsgHazard() && isSendMsgTraceDataOrGDS(TII, *MI)) ||
383 (ST.hasReadM0LdsDmaHazard() && isLdsDma(*MI)) ||
384 (ST.hasReadM0LdsDirectHazard() && MI->readsRegister(AMDGPU::LDS_DIRECT)))
385 return std::max(WaitStates, checkReadM0Hazards(MI));
386
387 if (SIInstrInfo::isMAI(*MI))
388 return std::max(WaitStates, checkMAIHazards(MI));
389
390 if (SIInstrInfo::isVMEM(*MI) ||
391 SIInstrInfo::isFLAT(*MI) ||
392 SIInstrInfo::isDS(*MI))
393 return std::max(WaitStates, checkMAILdStHazards(MI));
394
395 return WaitStates;
396 }
397
EmitNoop()398 void GCNHazardRecognizer::EmitNoop() {
399 EmittedInstrs.push_front(nullptr);
400 }
401
AdvanceCycle()402 void GCNHazardRecognizer::AdvanceCycle() {
403 // When the scheduler detects a stall, it will call AdvanceCycle() without
404 // emitting any instructions.
405 if (!CurrCycleInstr) {
406 EmittedInstrs.push_front(nullptr);
407 return;
408 }
409
410 if (CurrCycleInstr->isBundle()) {
411 processBundle();
412 return;
413 }
414
415 unsigned NumWaitStates = TII.getNumWaitStates(*CurrCycleInstr);
416 if (!NumWaitStates) {
417 CurrCycleInstr = nullptr;
418 return;
419 }
420
421 // Keep track of emitted instructions
422 EmittedInstrs.push_front(CurrCycleInstr);
423
424 // Add a nullptr for each additional wait state after the first. Make sure
425 // not to add more than getMaxLookAhead() items to the list, since we
426 // truncate the list to that size right after this loop.
427 for (unsigned i = 1, e = std::min(NumWaitStates, getMaxLookAhead());
428 i < e; ++i) {
429 EmittedInstrs.push_front(nullptr);
430 }
431
432 // getMaxLookahead() is the largest number of wait states we will ever need
433 // to insert, so there is no point in keeping track of more than that many
434 // wait states.
435 EmittedInstrs.resize(getMaxLookAhead());
436
437 CurrCycleInstr = nullptr;
438 }
439
RecedeCycle()440 void GCNHazardRecognizer::RecedeCycle() {
441 llvm_unreachable("hazard recognizer does not support bottom-up scheduling.");
442 }
443
444 //===----------------------------------------------------------------------===//
445 // Helper Functions
446 //===----------------------------------------------------------------------===//
447
448 typedef enum { HazardFound, HazardExpired, NoHazardFound } HazardFnResult;
449
450 typedef function_ref<bool(const MachineInstr &, int WaitStates)> IsExpiredFn;
451 typedef function_ref<unsigned int(const MachineInstr &)> GetNumWaitStatesFn;
452
453 // Search for a hazard in a block and its predecessors.
454 template <typename StateT>
455 static bool
hasHazard(StateT State,function_ref<HazardFnResult (StateT &,const MachineInstr &)> IsHazard,function_ref<void (StateT &,const MachineInstr &)> UpdateState,const MachineBasicBlock * MBB,MachineBasicBlock::const_reverse_instr_iterator I,DenseSet<const MachineBasicBlock * > & Visited)456 hasHazard(StateT State,
457 function_ref<HazardFnResult(StateT &, const MachineInstr &)> IsHazard,
458 function_ref<void(StateT &, const MachineInstr &)> UpdateState,
459 const MachineBasicBlock *MBB,
460 MachineBasicBlock::const_reverse_instr_iterator I,
461 DenseSet<const MachineBasicBlock *> &Visited) {
462 for (auto E = MBB->instr_rend(); I != E; ++I) {
463 // No need to look at parent BUNDLE instructions.
464 if (I->isBundle())
465 continue;
466
467 switch (IsHazard(State, *I)) {
468 case HazardFound:
469 return true;
470 case HazardExpired:
471 return false;
472 default:
473 // Continue search
474 break;
475 }
476
477 if (I->isInlineAsm() || I->isMetaInstruction())
478 continue;
479
480 UpdateState(State, *I);
481 }
482
483 for (MachineBasicBlock *Pred : MBB->predecessors()) {
484 if (!Visited.insert(Pred).second)
485 continue;
486
487 if (hasHazard(State, IsHazard, UpdateState, Pred, Pred->instr_rbegin(),
488 Visited))
489 return true;
490 }
491
492 return false;
493 }
494
495 // Returns a minimum wait states since \p I walking all predecessors.
496 // Only scans until \p IsExpired does not return true.
497 // Can only be run in a hazard recognizer mode.
getWaitStatesSince(GCNHazardRecognizer::IsHazardFn IsHazard,const MachineBasicBlock * MBB,MachineBasicBlock::const_reverse_instr_iterator I,int WaitStates,IsExpiredFn IsExpired,DenseSet<const MachineBasicBlock * > & Visited,GetNumWaitStatesFn GetNumWaitStates=SIInstrInfo::getNumWaitStates)498 static int getWaitStatesSince(
499 GCNHazardRecognizer::IsHazardFn IsHazard, const MachineBasicBlock *MBB,
500 MachineBasicBlock::const_reverse_instr_iterator I, int WaitStates,
501 IsExpiredFn IsExpired, DenseSet<const MachineBasicBlock *> &Visited,
502 GetNumWaitStatesFn GetNumWaitStates = SIInstrInfo::getNumWaitStates) {
503 for (auto E = MBB->instr_rend(); I != E; ++I) {
504 // Don't add WaitStates for parent BUNDLE instructions.
505 if (I->isBundle())
506 continue;
507
508 if (IsHazard(*I))
509 return WaitStates;
510
511 if (I->isInlineAsm())
512 continue;
513
514 WaitStates += GetNumWaitStates(*I);
515
516 if (IsExpired(*I, WaitStates))
517 return std::numeric_limits<int>::max();
518 }
519
520 int MinWaitStates = std::numeric_limits<int>::max();
521 for (MachineBasicBlock *Pred : MBB->predecessors()) {
522 if (!Visited.insert(Pred).second)
523 continue;
524
525 int W = getWaitStatesSince(IsHazard, Pred, Pred->instr_rbegin(), WaitStates,
526 IsExpired, Visited, GetNumWaitStates);
527
528 MinWaitStates = std::min(MinWaitStates, W);
529 }
530
531 return MinWaitStates;
532 }
533
getWaitStatesSince(GCNHazardRecognizer::IsHazardFn IsHazard,const MachineInstr * MI,IsExpiredFn IsExpired)534 static int getWaitStatesSince(GCNHazardRecognizer::IsHazardFn IsHazard,
535 const MachineInstr *MI, IsExpiredFn IsExpired) {
536 DenseSet<const MachineBasicBlock *> Visited;
537 return getWaitStatesSince(IsHazard, MI->getParent(),
538 std::next(MI->getReverseIterator()),
539 0, IsExpired, Visited);
540 }
541
getWaitStatesSince(IsHazardFn IsHazard,int Limit)542 int GCNHazardRecognizer::getWaitStatesSince(IsHazardFn IsHazard, int Limit) {
543 if (IsHazardRecognizerMode) {
544 auto IsExpiredFn = [Limit](const MachineInstr &, int WaitStates) {
545 return WaitStates >= Limit;
546 };
547 return ::getWaitStatesSince(IsHazard, CurrCycleInstr, IsExpiredFn);
548 }
549
550 int WaitStates = 0;
551 for (MachineInstr *MI : EmittedInstrs) {
552 if (MI) {
553 if (IsHazard(*MI))
554 return WaitStates;
555
556 if (MI->isInlineAsm())
557 continue;
558 }
559 ++WaitStates;
560
561 if (WaitStates >= Limit)
562 break;
563 }
564 return std::numeric_limits<int>::max();
565 }
566
getWaitStatesSinceDef(unsigned Reg,IsHazardFn IsHazardDef,int Limit)567 int GCNHazardRecognizer::getWaitStatesSinceDef(unsigned Reg,
568 IsHazardFn IsHazardDef,
569 int Limit) {
570 const SIRegisterInfo *TRI = ST.getRegisterInfo();
571
572 auto IsHazardFn = [IsHazardDef, TRI, Reg](const MachineInstr &MI) {
573 return IsHazardDef(MI) && MI.modifiesRegister(Reg, TRI);
574 };
575
576 return getWaitStatesSince(IsHazardFn, Limit);
577 }
578
getWaitStatesSinceSetReg(IsHazardFn IsHazard,int Limit)579 int GCNHazardRecognizer::getWaitStatesSinceSetReg(IsHazardFn IsHazard,
580 int Limit) {
581 auto IsHazardFn = [IsHazard](const MachineInstr &MI) {
582 return isSSetReg(MI.getOpcode()) && IsHazard(MI);
583 };
584
585 return getWaitStatesSince(IsHazardFn, Limit);
586 }
587
588 //===----------------------------------------------------------------------===//
589 // No-op Hazard Detection
590 //===----------------------------------------------------------------------===//
591
addRegUnits(const SIRegisterInfo & TRI,BitVector & BV,MCRegister Reg)592 static void addRegUnits(const SIRegisterInfo &TRI, BitVector &BV,
593 MCRegister Reg) {
594 for (MCRegUnit Unit : TRI.regunits(Reg))
595 BV.set(Unit);
596 }
597
addRegsToSet(const SIRegisterInfo & TRI,iterator_range<MachineInstr::const_mop_iterator> Ops,BitVector & DefSet,BitVector & UseSet)598 static void addRegsToSet(const SIRegisterInfo &TRI,
599 iterator_range<MachineInstr::const_mop_iterator> Ops,
600 BitVector &DefSet, BitVector &UseSet) {
601 for (const MachineOperand &Op : Ops) {
602 if (Op.isReg())
603 addRegUnits(TRI, Op.isDef() ? DefSet : UseSet, Op.getReg().asMCReg());
604 }
605 }
606
addClauseInst(const MachineInstr & MI)607 void GCNHazardRecognizer::addClauseInst(const MachineInstr &MI) {
608 addRegsToSet(TRI, MI.operands(), ClauseDefs, ClauseUses);
609 }
610
breaksSMEMSoftClause(MachineInstr * MI)611 static bool breaksSMEMSoftClause(MachineInstr *MI) {
612 return !SIInstrInfo::isSMRD(*MI);
613 }
614
breaksVMEMSoftClause(MachineInstr * MI)615 static bool breaksVMEMSoftClause(MachineInstr *MI) {
616 return !SIInstrInfo::isVMEM(*MI) && !SIInstrInfo::isFLAT(*MI);
617 }
618
checkSoftClauseHazards(MachineInstr * MEM)619 int GCNHazardRecognizer::checkSoftClauseHazards(MachineInstr *MEM) {
620 // SMEM soft clause are only present on VI+, and only matter if xnack is
621 // enabled.
622 if (!ST.isXNACKEnabled())
623 return 0;
624
625 bool IsSMRD = TII.isSMRD(*MEM);
626
627 resetClause();
628
629 // A soft-clause is any group of consecutive SMEM instructions. The
630 // instructions in this group may return out of order and/or may be
631 // replayed (i.e. the same instruction issued more than once).
632 //
633 // In order to handle these situations correctly we need to make sure that
634 // when a clause has more than one instruction, no instruction in the clause
635 // writes to a register that is read by another instruction in the clause
636 // (including itself). If we encounter this situation, we need to break the
637 // clause by inserting a non SMEM instruction.
638
639 for (MachineInstr *MI : EmittedInstrs) {
640 // When we hit a non-SMEM instruction then we have passed the start of the
641 // clause and we can stop.
642 if (!MI)
643 break;
644
645 if (IsSMRD ? breaksSMEMSoftClause(MI) : breaksVMEMSoftClause(MI))
646 break;
647
648 addClauseInst(*MI);
649 }
650
651 if (ClauseDefs.none())
652 return 0;
653
654 // We need to make sure not to put loads and stores in the same clause if they
655 // use the same address. For now, just start a new clause whenever we see a
656 // store.
657 if (MEM->mayStore())
658 return 1;
659
660 addClauseInst(*MEM);
661
662 // If the set of defs and uses intersect then we cannot add this instruction
663 // to the clause, so we have a hazard.
664 return ClauseDefs.anyCommon(ClauseUses) ? 1 : 0;
665 }
666
checkSMRDHazards(MachineInstr * SMRD)667 int GCNHazardRecognizer::checkSMRDHazards(MachineInstr *SMRD) {
668 int WaitStatesNeeded = 0;
669
670 WaitStatesNeeded = checkSoftClauseHazards(SMRD);
671
672 // This SMRD hazard only affects SI.
673 if (!ST.hasSMRDReadVALUDefHazard())
674 return WaitStatesNeeded;
675
676 // A read of an SGPR by SMRD instruction requires 4 wait states when the
677 // SGPR was written by a VALU instruction.
678 int SmrdSgprWaitStates = 4;
679 auto IsHazardDefFn = [this](const MachineInstr &MI) {
680 return TII.isVALU(MI);
681 };
682 auto IsBufferHazardDefFn = [this](const MachineInstr &MI) {
683 return TII.isSALU(MI);
684 };
685
686 bool IsBufferSMRD = TII.isBufferSMRD(*SMRD);
687
688 for (const MachineOperand &Use : SMRD->uses()) {
689 if (!Use.isReg())
690 continue;
691 int WaitStatesNeededForUse =
692 SmrdSgprWaitStates - getWaitStatesSinceDef(Use.getReg(), IsHazardDefFn,
693 SmrdSgprWaitStates);
694 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
695
696 // This fixes what appears to be undocumented hardware behavior in SI where
697 // s_mov writing a descriptor and s_buffer_load_dword reading the descriptor
698 // needs some number of nops in between. We don't know how many we need, but
699 // let's use 4. This wasn't discovered before probably because the only
700 // case when this happens is when we expand a 64-bit pointer into a full
701 // descriptor and use s_buffer_load_dword instead of s_load_dword, which was
702 // probably never encountered in the closed-source land.
703 if (IsBufferSMRD) {
704 int WaitStatesNeededForUse =
705 SmrdSgprWaitStates - getWaitStatesSinceDef(Use.getReg(),
706 IsBufferHazardDefFn,
707 SmrdSgprWaitStates);
708 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
709 }
710 }
711
712 return WaitStatesNeeded;
713 }
714
checkVMEMHazards(MachineInstr * VMEM)715 int GCNHazardRecognizer::checkVMEMHazards(MachineInstr* VMEM) {
716 if (!ST.hasVMEMReadSGPRVALUDefHazard())
717 return 0;
718
719 int WaitStatesNeeded = checkSoftClauseHazards(VMEM);
720
721 // A read of an SGPR by a VMEM instruction requires 5 wait states when the
722 // SGPR was written by a VALU Instruction.
723 const int VmemSgprWaitStates = 5;
724 auto IsHazardDefFn = [this](const MachineInstr &MI) {
725 return TII.isVALU(MI);
726 };
727 for (const MachineOperand &Use : VMEM->uses()) {
728 if (!Use.isReg() || TRI.isVectorRegister(MF.getRegInfo(), Use.getReg()))
729 continue;
730
731 int WaitStatesNeededForUse =
732 VmemSgprWaitStates - getWaitStatesSinceDef(Use.getReg(), IsHazardDefFn,
733 VmemSgprWaitStates);
734 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
735 }
736 return WaitStatesNeeded;
737 }
738
checkDPPHazards(MachineInstr * DPP)739 int GCNHazardRecognizer::checkDPPHazards(MachineInstr *DPP) {
740 const SIRegisterInfo *TRI = ST.getRegisterInfo();
741 const SIInstrInfo *TII = ST.getInstrInfo();
742
743 // Check for DPP VGPR read after VALU VGPR write and EXEC write.
744 int DppVgprWaitStates = 2;
745 int DppExecWaitStates = 5;
746 int WaitStatesNeeded = 0;
747 auto IsHazardDefFn = [TII](const MachineInstr &MI) {
748 return TII->isVALU(MI);
749 };
750
751 for (const MachineOperand &Use : DPP->uses()) {
752 if (!Use.isReg() || !TRI->isVGPR(MF.getRegInfo(), Use.getReg()))
753 continue;
754 int WaitStatesNeededForUse =
755 DppVgprWaitStates - getWaitStatesSinceDef(
756 Use.getReg(),
757 [](const MachineInstr &) { return true; },
758 DppVgprWaitStates);
759 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
760 }
761
762 WaitStatesNeeded = std::max(
763 WaitStatesNeeded,
764 DppExecWaitStates - getWaitStatesSinceDef(AMDGPU::EXEC, IsHazardDefFn,
765 DppExecWaitStates));
766
767 return WaitStatesNeeded;
768 }
769
checkDivFMasHazards(MachineInstr * DivFMas)770 int GCNHazardRecognizer::checkDivFMasHazards(MachineInstr *DivFMas) {
771 const SIInstrInfo *TII = ST.getInstrInfo();
772
773 // v_div_fmas requires 4 wait states after a write to vcc from a VALU
774 // instruction.
775 const int DivFMasWaitStates = 4;
776 auto IsHazardDefFn = [TII](const MachineInstr &MI) {
777 return TII->isVALU(MI);
778 };
779 int WaitStatesNeeded = getWaitStatesSinceDef(AMDGPU::VCC, IsHazardDefFn,
780 DivFMasWaitStates);
781
782 return DivFMasWaitStates - WaitStatesNeeded;
783 }
784
checkGetRegHazards(MachineInstr * GetRegInstr)785 int GCNHazardRecognizer::checkGetRegHazards(MachineInstr *GetRegInstr) {
786 const SIInstrInfo *TII = ST.getInstrInfo();
787 unsigned GetRegHWReg = getHWReg(TII, *GetRegInstr);
788
789 const int GetRegWaitStates = 2;
790 auto IsHazardFn = [TII, GetRegHWReg](const MachineInstr &MI) {
791 return GetRegHWReg == getHWReg(TII, MI);
792 };
793 int WaitStatesNeeded = getWaitStatesSinceSetReg(IsHazardFn, GetRegWaitStates);
794
795 return GetRegWaitStates - WaitStatesNeeded;
796 }
797
checkSetRegHazards(MachineInstr * SetRegInstr)798 int GCNHazardRecognizer::checkSetRegHazards(MachineInstr *SetRegInstr) {
799 const SIInstrInfo *TII = ST.getInstrInfo();
800 unsigned HWReg = getHWReg(TII, *SetRegInstr);
801
802 const int SetRegWaitStates = ST.getSetRegWaitStates();
803 auto IsHazardFn = [TII, HWReg](const MachineInstr &MI) {
804 return HWReg == getHWReg(TII, MI);
805 };
806 int WaitStatesNeeded = getWaitStatesSinceSetReg(IsHazardFn, SetRegWaitStates);
807 return SetRegWaitStates - WaitStatesNeeded;
808 }
809
createsVALUHazard(const MachineInstr & MI)810 int GCNHazardRecognizer::createsVALUHazard(const MachineInstr &MI) {
811 if (!MI.mayStore())
812 return -1;
813
814 const SIInstrInfo *TII = ST.getInstrInfo();
815 unsigned Opcode = MI.getOpcode();
816 const MCInstrDesc &Desc = MI.getDesc();
817
818 int VDataIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdata);
819 int VDataRCID = -1;
820 if (VDataIdx != -1)
821 VDataRCID = Desc.operands()[VDataIdx].RegClass;
822
823 if (TII->isMUBUF(MI) || TII->isMTBUF(MI)) {
824 // There is no hazard if the instruction does not use vector regs
825 // (like wbinvl1)
826 if (VDataIdx == -1)
827 return -1;
828 // For MUBUF/MTBUF instructions this hazard only exists if the
829 // instruction is not using a register in the soffset field.
830 const MachineOperand *SOffset =
831 TII->getNamedOperand(MI, AMDGPU::OpName::soffset);
832 // If we have no soffset operand, then assume this field has been
833 // hardcoded to zero.
834 if (AMDGPU::getRegBitWidth(VDataRCID) > 64 &&
835 (!SOffset || !SOffset->isReg()))
836 return VDataIdx;
837 }
838
839 // MIMG instructions create a hazard if they don't use a 256-bit T# and
840 // the store size is greater than 8 bytes and they have more than two bits
841 // of their dmask set.
842 // All our MIMG definitions use a 256-bit T#, so we can skip checking for them.
843 if (TII->isMIMG(MI)) {
844 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::srsrc);
845 assert(SRsrcIdx != -1 &&
846 AMDGPU::getRegBitWidth(Desc.operands()[SRsrcIdx].RegClass) == 256);
847 (void)SRsrcIdx;
848 }
849
850 if (TII->isFLAT(MI)) {
851 int DataIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdata);
852 if (AMDGPU::getRegBitWidth(Desc.operands()[DataIdx].RegClass) > 64)
853 return DataIdx;
854 }
855
856 return -1;
857 }
858
859 int
checkVALUHazardsHelper(const MachineOperand & Def,const MachineRegisterInfo & MRI)860 GCNHazardRecognizer::checkVALUHazardsHelper(const MachineOperand &Def,
861 const MachineRegisterInfo &MRI) {
862 // Helper to check for the hazard where VMEM instructions that store more than
863 // 8 bytes can have there store data over written by the next instruction.
864 const SIRegisterInfo *TRI = ST.getRegisterInfo();
865
866 const int VALUWaitStates = ST.hasGFX940Insts() ? 2 : 1;
867 int WaitStatesNeeded = 0;
868
869 if (!TRI->isVectorRegister(MRI, Def.getReg()))
870 return WaitStatesNeeded;
871 Register Reg = Def.getReg();
872 auto IsHazardFn = [this, Reg, TRI](const MachineInstr &MI) {
873 int DataIdx = createsVALUHazard(MI);
874 return DataIdx >= 0 &&
875 TRI->regsOverlap(MI.getOperand(DataIdx).getReg(), Reg);
876 };
877 int WaitStatesNeededForDef =
878 VALUWaitStates - getWaitStatesSince(IsHazardFn, VALUWaitStates);
879 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForDef);
880
881 return WaitStatesNeeded;
882 }
883
checkVALUHazards(MachineInstr * VALU)884 int GCNHazardRecognizer::checkVALUHazards(MachineInstr *VALU) {
885 int WaitStatesNeeded = 0;
886
887 if (ST.hasTransForwardingHazard() && !SIInstrInfo::isTRANS(*VALU)) {
888 const int TransDefWaitstates = 1;
889
890 auto IsTransDefFn = [this, VALU](const MachineInstr &MI) {
891 if (!SIInstrInfo::isTRANS(MI))
892 return false;
893 const SIRegisterInfo *TRI = ST.getRegisterInfo();
894 const SIInstrInfo *TII = ST.getInstrInfo();
895 Register Def = TII->getNamedOperand(MI, AMDGPU::OpName::vdst)->getReg();
896
897 for (const MachineOperand &Use : VALU->explicit_uses()) {
898 if (Use.isReg() && TRI->regsOverlap(Def, Use.getReg()))
899 return true;
900 }
901
902 return false;
903 };
904
905 int WaitStatesNeededForDef =
906 TransDefWaitstates -
907 getWaitStatesSince(IsTransDefFn, TransDefWaitstates);
908 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForDef);
909 }
910
911 if (ST.hasDstSelForwardingHazard()) {
912 const int Shift16DefWaitstates = 1;
913
914 auto IsShift16BitDefFn = [this, VALU](const MachineInstr &MI) {
915 if (!SIInstrInfo::isVALU(MI))
916 return false;
917 const SIInstrInfo *TII = ST.getInstrInfo();
918 if (SIInstrInfo::isSDWA(MI)) {
919 if (auto *DstSel = TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel))
920 if (DstSel->getImm() == AMDGPU::SDWA::DWORD)
921 return false;
922 } else {
923 if (!AMDGPU::hasNamedOperand(MI.getOpcode(), AMDGPU::OpName::op_sel) ||
924 !(TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)
925 ->getImm() &
926 SISrcMods::DST_OP_SEL))
927 return false;
928 }
929 const SIRegisterInfo *TRI = ST.getRegisterInfo();
930 if (auto *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst)) {
931 Register Def = Dst->getReg();
932
933 for (const MachineOperand &Use : VALU->explicit_uses()) {
934 if (Use.isReg() && TRI->regsOverlap(Def, Use.getReg()))
935 return true;
936 }
937 }
938
939 return false;
940 };
941
942 int WaitStatesNeededForDef =
943 Shift16DefWaitstates -
944 getWaitStatesSince(IsShift16BitDefFn, Shift16DefWaitstates);
945 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForDef);
946 }
947
948 if (ST.hasVDecCoExecHazard()) {
949 const int VALUWriteSGPRVALUReadWaitstates = 2;
950 const int VALUWriteEXECRWLane = 4;
951 const int VALUWriteVGPRReadlaneRead = 1;
952
953 const SIRegisterInfo *TRI = ST.getRegisterInfo();
954 const MachineRegisterInfo &MRI = MF.getRegInfo();
955 Register UseReg;
956 auto IsVALUDefSGPRFn = [&UseReg, TRI](const MachineInstr &MI) {
957 if (!SIInstrInfo::isVALU(MI))
958 return false;
959 return MI.modifiesRegister(UseReg, TRI);
960 };
961
962 for (const MachineOperand &Use : VALU->explicit_uses()) {
963 if (!Use.isReg())
964 continue;
965
966 UseReg = Use.getReg();
967 if (TRI->isSGPRReg(MRI, UseReg)) {
968 int WaitStatesNeededForDef =
969 VALUWriteSGPRVALUReadWaitstates -
970 getWaitStatesSince(IsVALUDefSGPRFn,
971 VALUWriteSGPRVALUReadWaitstates);
972 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForDef);
973 }
974 }
975
976 if (VALU->readsRegister(AMDGPU::VCC, TRI)) {
977 UseReg = AMDGPU::VCC;
978 int WaitStatesNeededForDef =
979 VALUWriteSGPRVALUReadWaitstates -
980 getWaitStatesSince(IsVALUDefSGPRFn, VALUWriteSGPRVALUReadWaitstates);
981 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForDef);
982 }
983
984 switch (VALU->getOpcode()) {
985 case AMDGPU::V_READLANE_B32:
986 case AMDGPU::V_READFIRSTLANE_B32: {
987 MachineOperand *Src = TII.getNamedOperand(*VALU, AMDGPU::OpName::src0);
988 UseReg = Src->getReg();
989 int WaitStatesNeededForDef =
990 VALUWriteVGPRReadlaneRead -
991 getWaitStatesSince(IsVALUDefSGPRFn, VALUWriteVGPRReadlaneRead);
992 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForDef);
993 }
994 [[fallthrough]];
995 case AMDGPU::V_WRITELANE_B32: {
996 UseReg = AMDGPU::EXEC;
997 int WaitStatesNeededForDef =
998 VALUWriteEXECRWLane -
999 getWaitStatesSince(IsVALUDefSGPRFn, VALUWriteEXECRWLane);
1000 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForDef);
1001 break;
1002 }
1003 default:
1004 break;
1005 }
1006 }
1007
1008 // This checks for the hazard where VMEM instructions that store more than
1009 // 8 bytes can have there store data over written by the next instruction.
1010 if (!ST.has12DWordStoreHazard())
1011 return WaitStatesNeeded;
1012
1013 const MachineRegisterInfo &MRI = MF.getRegInfo();
1014
1015 for (const MachineOperand &Def : VALU->defs()) {
1016 WaitStatesNeeded = std::max(WaitStatesNeeded, checkVALUHazardsHelper(Def, MRI));
1017 }
1018
1019 return WaitStatesNeeded;
1020 }
1021
checkInlineAsmHazards(MachineInstr * IA)1022 int GCNHazardRecognizer::checkInlineAsmHazards(MachineInstr *IA) {
1023 // This checks for hazards associated with inline asm statements.
1024 // Since inline asms can contain just about anything, we use this
1025 // to call/leverage other check*Hazard routines. Note that
1026 // this function doesn't attempt to address all possible inline asm
1027 // hazards (good luck), but is a collection of what has been
1028 // problematic thus far.
1029
1030 // see checkVALUHazards()
1031 if (!ST.has12DWordStoreHazard())
1032 return 0;
1033
1034 const MachineRegisterInfo &MRI = MF.getRegInfo();
1035 int WaitStatesNeeded = 0;
1036
1037 for (const MachineOperand &Op :
1038 llvm::drop_begin(IA->operands(), InlineAsm::MIOp_FirstOperand)) {
1039 if (Op.isReg() && Op.isDef()) {
1040 WaitStatesNeeded =
1041 std::max(WaitStatesNeeded, checkVALUHazardsHelper(Op, MRI));
1042 }
1043 }
1044
1045 return WaitStatesNeeded;
1046 }
1047
checkRWLaneHazards(MachineInstr * RWLane)1048 int GCNHazardRecognizer::checkRWLaneHazards(MachineInstr *RWLane) {
1049 const SIInstrInfo *TII = ST.getInstrInfo();
1050 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1051 const MachineRegisterInfo &MRI = MF.getRegInfo();
1052
1053 const MachineOperand *LaneSelectOp =
1054 TII->getNamedOperand(*RWLane, AMDGPU::OpName::src1);
1055
1056 if (!LaneSelectOp->isReg() || !TRI->isSGPRReg(MRI, LaneSelectOp->getReg()))
1057 return 0;
1058
1059 Register LaneSelectReg = LaneSelectOp->getReg();
1060 auto IsHazardFn = [TII](const MachineInstr &MI) { return TII->isVALU(MI); };
1061
1062 const int RWLaneWaitStates = 4;
1063 int WaitStatesSince = getWaitStatesSinceDef(LaneSelectReg, IsHazardFn,
1064 RWLaneWaitStates);
1065 return RWLaneWaitStates - WaitStatesSince;
1066 }
1067
checkRFEHazards(MachineInstr * RFE)1068 int GCNHazardRecognizer::checkRFEHazards(MachineInstr *RFE) {
1069 if (!ST.hasRFEHazards())
1070 return 0;
1071
1072 const SIInstrInfo *TII = ST.getInstrInfo();
1073
1074 const int RFEWaitStates = 1;
1075
1076 auto IsHazardFn = [TII](const MachineInstr &MI) {
1077 return getHWReg(TII, MI) == AMDGPU::Hwreg::ID_TRAPSTS;
1078 };
1079 int WaitStatesNeeded = getWaitStatesSinceSetReg(IsHazardFn, RFEWaitStates);
1080 return RFEWaitStates - WaitStatesNeeded;
1081 }
1082
checkReadM0Hazards(MachineInstr * MI)1083 int GCNHazardRecognizer::checkReadM0Hazards(MachineInstr *MI) {
1084 const SIInstrInfo *TII = ST.getInstrInfo();
1085 const int ReadM0WaitStates = 1;
1086 auto IsHazardFn = [TII](const MachineInstr &MI) { return TII->isSALU(MI); };
1087 return ReadM0WaitStates -
1088 getWaitStatesSinceDef(AMDGPU::M0, IsHazardFn, ReadM0WaitStates);
1089 }
1090
fixHazards(MachineInstr * MI)1091 void GCNHazardRecognizer::fixHazards(MachineInstr *MI) {
1092 fixVMEMtoScalarWriteHazards(MI);
1093 fixVcmpxPermlaneHazards(MI);
1094 fixSMEMtoVectorWriteHazards(MI);
1095 fixVcmpxExecWARHazard(MI);
1096 fixLdsBranchVmemWARHazard(MI);
1097 if (ST.hasLdsDirect()) {
1098 fixLdsDirectVALUHazard(MI);
1099 fixLdsDirectVMEMHazard(MI);
1100 }
1101 fixVALUPartialForwardingHazard(MI);
1102 fixVALUTransUseHazard(MI);
1103 fixWMMAHazards(MI);
1104 fixShift64HighRegBug(MI);
1105 fixVALUMaskWriteHazard(MI);
1106 }
1107
fixVcmpxPermlaneHazards(MachineInstr * MI)1108 bool GCNHazardRecognizer::fixVcmpxPermlaneHazards(MachineInstr *MI) {
1109 if (!ST.hasVcmpxPermlaneHazard() || !isPermlane(*MI))
1110 return false;
1111
1112 const SIInstrInfo *TII = ST.getInstrInfo();
1113 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1114 auto IsHazardFn = [TII, TRI](const MachineInstr &MI) {
1115 return (TII->isVOPC(MI) ||
1116 ((TII->isVOP3(MI) || TII->isSDWA(MI)) && MI.isCompare())) &&
1117 MI.modifiesRegister(AMDGPU::EXEC, TRI);
1118 };
1119
1120 auto IsExpiredFn = [](const MachineInstr &MI, int) {
1121 unsigned Opc = MI.getOpcode();
1122 return SIInstrInfo::isVALU(MI) && Opc != AMDGPU::V_NOP_e32 &&
1123 Opc != AMDGPU::V_NOP_e64 && Opc != AMDGPU::V_NOP_sdwa;
1124 };
1125
1126 if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) ==
1127 std::numeric_limits<int>::max())
1128 return false;
1129
1130 // V_NOP will be discarded by SQ.
1131 // Use V_MOV_B32 v?, v?. Register must be alive so use src0 of V_PERMLANE*
1132 // which is always a VGPR and available.
1133 auto *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0);
1134 Register Reg = Src0->getReg();
1135 bool IsUndef = Src0->isUndef();
1136 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
1137 TII->get(AMDGPU::V_MOV_B32_e32))
1138 .addReg(Reg, RegState::Define | (IsUndef ? RegState::Dead : 0))
1139 .addReg(Reg, IsUndef ? RegState::Undef : RegState::Kill);
1140
1141 return true;
1142 }
1143
fixVMEMtoScalarWriteHazards(MachineInstr * MI)1144 bool GCNHazardRecognizer::fixVMEMtoScalarWriteHazards(MachineInstr *MI) {
1145 if (!ST.hasVMEMtoScalarWriteHazard())
1146 return false;
1147 assert(!ST.hasExtendedWaitCounts());
1148
1149 if (!SIInstrInfo::isSALU(*MI) && !SIInstrInfo::isSMRD(*MI))
1150 return false;
1151
1152 if (MI->getNumDefs() == 0)
1153 return false;
1154
1155 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1156
1157 auto IsHazardFn = [TRI, MI](const MachineInstr &I) {
1158 if (!SIInstrInfo::isVMEM(I) && !SIInstrInfo::isDS(I) &&
1159 !SIInstrInfo::isFLAT(I))
1160 return false;
1161
1162 for (const MachineOperand &Def : MI->defs()) {
1163 const MachineOperand *Op =
1164 I.findRegisterUseOperand(Def.getReg(), false, TRI);
1165 if (!Op)
1166 continue;
1167 return true;
1168 }
1169 return false;
1170 };
1171
1172 auto IsExpiredFn = [](const MachineInstr &MI, int) {
1173 return SIInstrInfo::isVALU(MI) ||
1174 (MI.getOpcode() == AMDGPU::S_WAITCNT &&
1175 !MI.getOperand(0).getImm()) ||
1176 (MI.getOpcode() == AMDGPU::S_WAITCNT_DEPCTR &&
1177 AMDGPU::DepCtr::decodeFieldVmVsrc(MI.getOperand(0).getImm()) == 0);
1178 };
1179
1180 if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) ==
1181 std::numeric_limits<int>::max())
1182 return false;
1183
1184 const SIInstrInfo *TII = ST.getInstrInfo();
1185 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
1186 TII->get(AMDGPU::S_WAITCNT_DEPCTR))
1187 .addImm(AMDGPU::DepCtr::encodeFieldVmVsrc(0));
1188 return true;
1189 }
1190
fixSMEMtoVectorWriteHazards(MachineInstr * MI)1191 bool GCNHazardRecognizer::fixSMEMtoVectorWriteHazards(MachineInstr *MI) {
1192 if (!ST.hasSMEMtoVectorWriteHazard())
1193 return false;
1194 assert(!ST.hasExtendedWaitCounts());
1195
1196 if (!SIInstrInfo::isVALU(*MI))
1197 return false;
1198
1199 unsigned SDSTName;
1200 switch (MI->getOpcode()) {
1201 case AMDGPU::V_READLANE_B32:
1202 case AMDGPU::V_READFIRSTLANE_B32:
1203 SDSTName = AMDGPU::OpName::vdst;
1204 break;
1205 default:
1206 SDSTName = AMDGPU::OpName::sdst;
1207 break;
1208 }
1209
1210 const SIInstrInfo *TII = ST.getInstrInfo();
1211 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1212 const AMDGPU::IsaVersion IV = AMDGPU::getIsaVersion(ST.getCPU());
1213 const MachineOperand *SDST = TII->getNamedOperand(*MI, SDSTName);
1214 if (!SDST) {
1215 for (const auto &MO : MI->implicit_operands()) {
1216 if (MO.isDef() && TRI->isSGPRClass(TRI->getPhysRegBaseClass(MO.getReg()))) {
1217 SDST = &MO;
1218 break;
1219 }
1220 }
1221 }
1222
1223 if (!SDST)
1224 return false;
1225
1226 const Register SDSTReg = SDST->getReg();
1227 auto IsHazardFn = [SDSTReg, TRI](const MachineInstr &I) {
1228 return SIInstrInfo::isSMRD(I) && I.readsRegister(SDSTReg, TRI);
1229 };
1230
1231 auto IsExpiredFn = [TII, IV](const MachineInstr &MI, int) {
1232 if (TII->isSALU(MI)) {
1233 switch (MI.getOpcode()) {
1234 case AMDGPU::S_SETVSKIP:
1235 case AMDGPU::S_VERSION:
1236 case AMDGPU::S_WAITCNT_VSCNT:
1237 case AMDGPU::S_WAITCNT_VMCNT:
1238 case AMDGPU::S_WAITCNT_EXPCNT:
1239 // These instructions cannot not mitigate the hazard.
1240 return false;
1241 case AMDGPU::S_WAITCNT_LGKMCNT:
1242 // Reducing lgkmcnt count to 0 always mitigates the hazard.
1243 return (MI.getOperand(1).getImm() == 0) &&
1244 (MI.getOperand(0).getReg() == AMDGPU::SGPR_NULL);
1245 case AMDGPU::S_WAITCNT: {
1246 const int64_t Imm = MI.getOperand(0).getImm();
1247 AMDGPU::Waitcnt Decoded = AMDGPU::decodeWaitcnt(IV, Imm);
1248 // DsCnt corresponds to LGKMCnt here.
1249 return (Decoded.DsCnt == 0);
1250 }
1251 default:
1252 // SOPP instructions cannot mitigate the hazard.
1253 if (TII->isSOPP(MI))
1254 return false;
1255 // At this point the SALU can be assumed to mitigate the hazard
1256 // because either:
1257 // (a) it is independent of the at risk SMEM (breaking chain),
1258 // or
1259 // (b) it is dependent on the SMEM, in which case an appropriate
1260 // s_waitcnt lgkmcnt _must_ exist between it and the at risk
1261 // SMEM instruction.
1262 return true;
1263 }
1264 }
1265 return false;
1266 };
1267
1268 if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) ==
1269 std::numeric_limits<int>::max())
1270 return false;
1271
1272 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
1273 TII->get(AMDGPU::S_MOV_B32), AMDGPU::SGPR_NULL)
1274 .addImm(0);
1275 return true;
1276 }
1277
fixVcmpxExecWARHazard(MachineInstr * MI)1278 bool GCNHazardRecognizer::fixVcmpxExecWARHazard(MachineInstr *MI) {
1279 if (!ST.hasVcmpxExecWARHazard())
1280 return false;
1281 assert(!ST.hasExtendedWaitCounts());
1282
1283 if (!SIInstrInfo::isVALU(*MI))
1284 return false;
1285
1286 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1287 if (!MI->modifiesRegister(AMDGPU::EXEC, TRI))
1288 return false;
1289
1290 auto IsHazardFn = [TRI](const MachineInstr &I) {
1291 if (SIInstrInfo::isVALU(I))
1292 return false;
1293 return I.readsRegister(AMDGPU::EXEC, TRI);
1294 };
1295
1296 const SIInstrInfo *TII = ST.getInstrInfo();
1297 auto IsExpiredFn = [TII, TRI](const MachineInstr &MI, int) {
1298 if (SIInstrInfo::isVALU(MI)) {
1299 if (TII->getNamedOperand(MI, AMDGPU::OpName::sdst))
1300 return true;
1301 for (auto MO : MI.implicit_operands())
1302 if (MO.isDef() && TRI->isSGPRClass(TRI->getPhysRegBaseClass(MO.getReg())))
1303 return true;
1304 }
1305 if (MI.getOpcode() == AMDGPU::S_WAITCNT_DEPCTR &&
1306 AMDGPU::DepCtr::decodeFieldSaSdst(MI.getOperand(0).getImm()) == 0)
1307 return true;
1308 return false;
1309 };
1310
1311 if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) ==
1312 std::numeric_limits<int>::max())
1313 return false;
1314
1315 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
1316 TII->get(AMDGPU::S_WAITCNT_DEPCTR))
1317 .addImm(AMDGPU::DepCtr::encodeFieldSaSdst(0));
1318 return true;
1319 }
1320
shouldRunLdsBranchVmemWARHazardFixup(const MachineFunction & MF,const GCNSubtarget & ST)1321 static bool shouldRunLdsBranchVmemWARHazardFixup(const MachineFunction &MF,
1322 const GCNSubtarget &ST) {
1323 if (!ST.hasLdsBranchVmemWARHazard())
1324 return false;
1325
1326 // Check if the necessary condition for the hazard is met: both LDS and VMEM
1327 // instructions need to appear in the same function.
1328 bool HasLds = false;
1329 bool HasVmem = false;
1330 for (auto &MBB : MF) {
1331 for (auto &MI : MBB) {
1332 HasLds |= SIInstrInfo::isDS(MI);
1333 HasVmem |=
1334 SIInstrInfo::isVMEM(MI) || SIInstrInfo::isSegmentSpecificFLAT(MI);
1335 if (HasLds && HasVmem)
1336 return true;
1337 }
1338 }
1339 return false;
1340 }
1341
isStoreCountWaitZero(const MachineInstr & I)1342 static bool isStoreCountWaitZero(const MachineInstr &I) {
1343 return I.getOpcode() == AMDGPU::S_WAITCNT_VSCNT &&
1344 I.getOperand(0).getReg() == AMDGPU::SGPR_NULL &&
1345 !I.getOperand(1).getImm();
1346 }
1347
fixLdsBranchVmemWARHazard(MachineInstr * MI)1348 bool GCNHazardRecognizer::fixLdsBranchVmemWARHazard(MachineInstr *MI) {
1349 if (!RunLdsBranchVmemWARHazardFixup)
1350 return false;
1351
1352 assert(ST.hasLdsBranchVmemWARHazard());
1353 assert(!ST.hasExtendedWaitCounts());
1354
1355 auto IsHazardInst = [](const MachineInstr &MI) {
1356 if (SIInstrInfo::isDS(MI))
1357 return 1;
1358 if (SIInstrInfo::isVMEM(MI) || SIInstrInfo::isSegmentSpecificFLAT(MI))
1359 return 2;
1360 return 0;
1361 };
1362
1363 auto InstType = IsHazardInst(*MI);
1364 if (!InstType)
1365 return false;
1366
1367 auto IsExpiredFn = [&IsHazardInst](const MachineInstr &I, int) {
1368 return IsHazardInst(I) || isStoreCountWaitZero(I);
1369 };
1370
1371 auto IsHazardFn = [InstType, &IsHazardInst](const MachineInstr &I) {
1372 if (!I.isBranch())
1373 return false;
1374
1375 auto IsHazardFn = [InstType, IsHazardInst](const MachineInstr &I) {
1376 auto InstType2 = IsHazardInst(I);
1377 return InstType2 && InstType != InstType2;
1378 };
1379
1380 auto IsExpiredFn = [InstType, &IsHazardInst](const MachineInstr &I, int) {
1381 auto InstType2 = IsHazardInst(I);
1382 if (InstType == InstType2)
1383 return true;
1384
1385 return isStoreCountWaitZero(I);
1386 };
1387
1388 return ::getWaitStatesSince(IsHazardFn, &I, IsExpiredFn) !=
1389 std::numeric_limits<int>::max();
1390 };
1391
1392 if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) ==
1393 std::numeric_limits<int>::max())
1394 return false;
1395
1396 const SIInstrInfo *TII = ST.getInstrInfo();
1397 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
1398 TII->get(AMDGPU::S_WAITCNT_VSCNT))
1399 .addReg(AMDGPU::SGPR_NULL, RegState::Undef)
1400 .addImm(0);
1401
1402 return true;
1403 }
1404
fixLdsDirectVALUHazard(MachineInstr * MI)1405 bool GCNHazardRecognizer::fixLdsDirectVALUHazard(MachineInstr *MI) {
1406 if (!SIInstrInfo::isLDSDIR(*MI))
1407 return false;
1408
1409 const int NoHazardWaitStates = 15;
1410 const MachineOperand *VDST = TII.getNamedOperand(*MI, AMDGPU::OpName::vdst);
1411 const Register VDSTReg = VDST->getReg();
1412
1413 bool VisitedTrans = false;
1414 auto IsHazardFn = [this, VDSTReg, &VisitedTrans](const MachineInstr &I) {
1415 if (!SIInstrInfo::isVALU(I))
1416 return false;
1417 VisitedTrans = VisitedTrans || SIInstrInfo::isTRANS(I);
1418 // Cover both WAR and WAW
1419 return I.readsRegister(VDSTReg, &TRI) || I.modifiesRegister(VDSTReg, &TRI);
1420 };
1421 auto IsExpiredFn = [&](const MachineInstr &I, int WaitStates) {
1422 if (WaitStates >= NoHazardWaitStates)
1423 return true;
1424 // Instructions which cause va_vdst==0 expire hazard
1425 return SIInstrInfo::isVMEM(I) || SIInstrInfo::isFLAT(I) ||
1426 SIInstrInfo::isDS(I) || SIInstrInfo::isEXP(I);
1427 };
1428 auto GetWaitStatesFn = [](const MachineInstr &MI) {
1429 return SIInstrInfo::isVALU(MI) ? 1 : 0;
1430 };
1431
1432 DenseSet<const MachineBasicBlock *> Visited;
1433 auto Count = ::getWaitStatesSince(IsHazardFn, MI->getParent(),
1434 std::next(MI->getReverseIterator()), 0,
1435 IsExpiredFn, Visited, GetWaitStatesFn);
1436
1437 // Transcendentals can execute in parallel to other VALUs.
1438 // This makes va_vdst count unusable with a mixture of VALU and TRANS.
1439 if (VisitedTrans)
1440 Count = 0;
1441
1442 MachineOperand *WaitVdstOp =
1443 TII.getNamedOperand(*MI, AMDGPU::OpName::waitvdst);
1444 WaitVdstOp->setImm(std::min(Count, NoHazardWaitStates));
1445
1446 return true;
1447 }
1448
fixLdsDirectVMEMHazard(MachineInstr * MI)1449 bool GCNHazardRecognizer::fixLdsDirectVMEMHazard(MachineInstr *MI) {
1450 if (!SIInstrInfo::isLDSDIR(*MI))
1451 return false;
1452
1453 const MachineOperand *VDST = TII.getNamedOperand(*MI, AMDGPU::OpName::vdst);
1454 const Register VDSTReg = VDST->getReg();
1455
1456 auto IsHazardFn = [this, VDSTReg](const MachineInstr &I) {
1457 if (!SIInstrInfo::isVMEM(I) && !SIInstrInfo::isFLAT(I) &&
1458 !SIInstrInfo::isDS(I))
1459 return false;
1460 return I.readsRegister(VDSTReg, &TRI) || I.modifiesRegister(VDSTReg, &TRI);
1461 };
1462 bool LdsdirCanWait = ST.hasLdsWaitVMSRC();
1463 // TODO: On GFX12 the hazard should expire on S_WAIT_LOADCNT/SAMPLECNT/BVHCNT
1464 // according to the type of VMEM instruction.
1465 auto IsExpiredFn = [this, LdsdirCanWait](const MachineInstr &I, int) {
1466 return SIInstrInfo::isVALU(I) || SIInstrInfo::isEXP(I) ||
1467 (I.getOpcode() == AMDGPU::S_WAITCNT && !I.getOperand(0).getImm()) ||
1468 (I.getOpcode() == AMDGPU::S_WAITCNT_DEPCTR &&
1469 AMDGPU::DepCtr::decodeFieldVmVsrc(I.getOperand(0).getImm()) == 0) ||
1470 (LdsdirCanWait && SIInstrInfo::isLDSDIR(I) &&
1471 !TII.getNamedOperand(I, AMDGPU::OpName::waitvsrc)->getImm());
1472 };
1473
1474 if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) ==
1475 std::numeric_limits<int>::max())
1476 return false;
1477
1478 if (LdsdirCanWait) {
1479 TII.getNamedOperand(*MI, AMDGPU::OpName::waitvsrc)->setImm(0);
1480 } else {
1481 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
1482 TII.get(AMDGPU::S_WAITCNT_DEPCTR))
1483 .addImm(AMDGPU::DepCtr::encodeFieldVmVsrc(0));
1484 }
1485
1486 return true;
1487 }
1488
fixVALUPartialForwardingHazard(MachineInstr * MI)1489 bool GCNHazardRecognizer::fixVALUPartialForwardingHazard(MachineInstr *MI) {
1490 if (!ST.hasVALUPartialForwardingHazard())
1491 return false;
1492 assert(!ST.hasExtendedWaitCounts());
1493
1494 if (!ST.isWave64() || !SIInstrInfo::isVALU(*MI))
1495 return false;
1496
1497 SmallSetVector<Register, 4> SrcVGPRs;
1498
1499 for (const MachineOperand &Use : MI->explicit_uses()) {
1500 if (Use.isReg() && TRI.isVGPR(MF.getRegInfo(), Use.getReg()))
1501 SrcVGPRs.insert(Use.getReg());
1502 }
1503
1504 // Only applies with >= 2 unique VGPR sources
1505 if (SrcVGPRs.size() <= 1)
1506 return false;
1507
1508 // Look for the following pattern:
1509 // Va <- VALU [PreExecPos]
1510 // intv1
1511 // Exec <- SALU [ExecPos]
1512 // intv2
1513 // Vb <- VALU [PostExecPos]
1514 // intv3
1515 // MI Va, Vb (WaitState = 0)
1516 //
1517 // Where:
1518 // intv1 + intv2 <= 2 VALUs
1519 // intv3 <= 4 VALUs
1520 //
1521 // If found, insert an appropriate S_WAITCNT_DEPCTR before MI.
1522
1523 const int Intv1plus2MaxVALUs = 2;
1524 const int Intv3MaxVALUs = 4;
1525 const int IntvMaxVALUs = 6;
1526 const int NoHazardVALUWaitStates = IntvMaxVALUs + 2;
1527
1528 struct StateType {
1529 SmallDenseMap<Register, int, 4> DefPos;
1530 int ExecPos = std::numeric_limits<int>::max();
1531 int VALUs = 0;
1532 };
1533
1534 StateType State;
1535
1536 // This overloads expiry testing with all the hazard detection
1537 auto IsHazardFn = [&, this](StateType &State, const MachineInstr &I) {
1538 // Too many VALU states have passed
1539 if (State.VALUs > NoHazardVALUWaitStates)
1540 return HazardExpired;
1541
1542 // Instructions which cause va_vdst==0 expire hazard
1543 if (SIInstrInfo::isVMEM(I) || SIInstrInfo::isFLAT(I) ||
1544 SIInstrInfo::isDS(I) || SIInstrInfo::isEXP(I) ||
1545 (I.getOpcode() == AMDGPU::S_WAITCNT_DEPCTR &&
1546 AMDGPU::DepCtr::decodeFieldVaVdst(I.getOperand(0).getImm()) == 0))
1547 return HazardExpired;
1548
1549 // Track registers writes
1550 bool Changed = false;
1551 if (SIInstrInfo::isVALU(I)) {
1552 for (Register Src : SrcVGPRs) {
1553 if (!State.DefPos.count(Src) && I.modifiesRegister(Src, &TRI)) {
1554 State.DefPos[Src] = State.VALUs;
1555 Changed = true;
1556 }
1557 }
1558 } else if (SIInstrInfo::isSALU(I)) {
1559 if (State.ExecPos == std::numeric_limits<int>::max()) {
1560 if (!State.DefPos.empty() && I.modifiesRegister(AMDGPU::EXEC, &TRI)) {
1561 State.ExecPos = State.VALUs;
1562 Changed = true;
1563 }
1564 }
1565 }
1566
1567 // Early expiration: too many VALUs in intv3
1568 if (State.VALUs > Intv3MaxVALUs && State.DefPos.empty())
1569 return HazardExpired;
1570
1571 // Only evaluate state if something changed
1572 if (!Changed)
1573 return NoHazardFound;
1574
1575 // Determine positions of VALUs pre/post exec change
1576 if (State.ExecPos == std::numeric_limits<int>::max())
1577 return NoHazardFound;
1578
1579 int PreExecPos = std::numeric_limits<int>::max();
1580 int PostExecPos = std::numeric_limits<int>::max();
1581
1582 for (auto Entry : State.DefPos) {
1583 int DefVALUs = Entry.second;
1584 if (DefVALUs != std::numeric_limits<int>::max()) {
1585 if (DefVALUs >= State.ExecPos)
1586 PreExecPos = std::min(PreExecPos, DefVALUs);
1587 else if (DefVALUs < State.ExecPos)
1588 PostExecPos = std::min(PostExecPos, DefVALUs);
1589 }
1590 }
1591
1592 // Need a VALUs post exec change
1593 if (PostExecPos == std::numeric_limits<int>::max())
1594 return NoHazardFound;
1595
1596 // Too many VALUs in intv3?
1597 int Intv3VALUs = PostExecPos;
1598 if (Intv3VALUs > Intv3MaxVALUs)
1599 return HazardExpired;
1600
1601 // Too many VALUs in intv2?
1602 int Intv2VALUs = (State.ExecPos - PostExecPos) - 1;
1603 if (Intv2VALUs > Intv1plus2MaxVALUs)
1604 return HazardExpired;
1605
1606 // Need a VALUs pre exec change
1607 if (PreExecPos == std::numeric_limits<int>::max())
1608 return NoHazardFound;
1609
1610 // Too many VALUs in intv1?
1611 int Intv1VALUs = PreExecPos - State.ExecPos;
1612 if (Intv1VALUs > Intv1plus2MaxVALUs)
1613 return HazardExpired;
1614
1615 // Too many VALUs in intv1 + intv2
1616 if (Intv1VALUs + Intv2VALUs > Intv1plus2MaxVALUs)
1617 return HazardExpired;
1618
1619 return HazardFound;
1620 };
1621 auto UpdateStateFn = [](StateType &State, const MachineInstr &MI) {
1622 if (SIInstrInfo::isVALU(MI))
1623 State.VALUs += 1;
1624 };
1625
1626 DenseSet<const MachineBasicBlock *> Visited;
1627 if (!hasHazard<StateType>(State, IsHazardFn, UpdateStateFn, MI->getParent(),
1628 std::next(MI->getReverseIterator()), Visited))
1629 return false;
1630
1631 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
1632 TII.get(AMDGPU::S_WAITCNT_DEPCTR))
1633 .addImm(0x0fff);
1634
1635 return true;
1636 }
1637
fixVALUTransUseHazard(MachineInstr * MI)1638 bool GCNHazardRecognizer::fixVALUTransUseHazard(MachineInstr *MI) {
1639 if (!ST.hasVALUTransUseHazard())
1640 return false;
1641 assert(!ST.hasExtendedWaitCounts());
1642
1643 if (!SIInstrInfo::isVALU(*MI))
1644 return false;
1645
1646 SmallSet<Register, 4> SrcVGPRs;
1647
1648 for (const MachineOperand &Use : MI->explicit_uses()) {
1649 if (Use.isReg() && TRI.isVGPR(MF.getRegInfo(), Use.getReg()))
1650 SrcVGPRs.insert(Use.getReg());
1651 }
1652
1653 // Look for the following pattern:
1654 // Va <- TRANS VALU
1655 // intv
1656 // MI Va (WaitState = 0)
1657 //
1658 // Where:
1659 // intv <= 5 VALUs / 1 TRANS
1660 //
1661 // If found, insert an appropriate S_WAITCNT_DEPCTR before MI.
1662
1663 const int IntvMaxVALUs = 5;
1664 const int IntvMaxTRANS = 1;
1665
1666 struct StateType {
1667 int VALUs = 0;
1668 int TRANS = 0;
1669 };
1670
1671 StateType State;
1672
1673 // This overloads expiry testing with all the hazard detection
1674 auto IsHazardFn = [&, this](StateType &State, const MachineInstr &I) {
1675 // Too many VALU states have passed
1676 if (State.VALUs > IntvMaxVALUs || State.TRANS > IntvMaxTRANS)
1677 return HazardExpired;
1678
1679 // Instructions which cause va_vdst==0 expire hazard
1680 if (SIInstrInfo::isVMEM(I) || SIInstrInfo::isFLAT(I) ||
1681 SIInstrInfo::isDS(I) || SIInstrInfo::isEXP(I) ||
1682 (I.getOpcode() == AMDGPU::S_WAITCNT_DEPCTR &&
1683 I.getOperand(0).getImm() == 0x0fff))
1684 return HazardExpired;
1685
1686 // Track registers writes
1687 if (SIInstrInfo::isTRANS(I)) {
1688 for (Register Src : SrcVGPRs) {
1689 if (I.modifiesRegister(Src, &TRI)) {
1690 return HazardFound;
1691 }
1692 }
1693 }
1694
1695 return NoHazardFound;
1696 };
1697 auto UpdateStateFn = [](StateType &State, const MachineInstr &MI) {
1698 if (SIInstrInfo::isVALU(MI))
1699 State.VALUs += 1;
1700 if (SIInstrInfo::isTRANS(MI))
1701 State.TRANS += 1;
1702 };
1703
1704 DenseSet<const MachineBasicBlock *> Visited;
1705 if (!hasHazard<StateType>(State, IsHazardFn, UpdateStateFn, MI->getParent(),
1706 std::next(MI->getReverseIterator()), Visited))
1707 return false;
1708
1709 // Hazard is observed - insert a wait on va_dst counter to ensure hazard is
1710 // avoided.
1711 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
1712 TII.get(AMDGPU::S_WAITCNT_DEPCTR))
1713 .addImm(AMDGPU::DepCtr::encodeFieldVaVdst(0));
1714
1715 return true;
1716 }
1717
fixWMMAHazards(MachineInstr * MI)1718 bool GCNHazardRecognizer::fixWMMAHazards(MachineInstr *MI) {
1719 if (!SIInstrInfo::isWMMA(*MI) && !SIInstrInfo::isSWMMAC(*MI))
1720 return false;
1721
1722 const SIInstrInfo *TII = ST.getInstrInfo();
1723 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1724
1725 auto IsHazardFn = [MI, TII, TRI, this](const MachineInstr &I) {
1726 if (!SIInstrInfo::isWMMA(I) && !SIInstrInfo::isSWMMAC(I))
1727 return false;
1728
1729 // Src0 or Src1 of the current wmma instruction overlaps with the dest of
1730 // the previous wmma.
1731 const Register CurSrc0Reg =
1732 TII->getNamedOperand(*MI, AMDGPU::OpName::src0)->getReg();
1733 const Register CurSrc1Reg =
1734 TII->getNamedOperand(*MI, AMDGPU::OpName::src1)->getReg();
1735
1736 const Register PrevDstReg =
1737 TII->getNamedOperand(I, AMDGPU::OpName::vdst)->getReg();
1738
1739 if (TRI->regsOverlap(PrevDstReg, CurSrc0Reg) ||
1740 TRI->regsOverlap(PrevDstReg, CurSrc1Reg)) {
1741 return true;
1742 }
1743
1744 // Src2 of the current wmma instruction overlaps with the dest of the
1745 // previous wmma.
1746 const MachineOperand *Src2 =
1747 TII->getNamedOperand(*MI, AMDGPU::OpName::src2);
1748 const Register CurSrc2Reg = Src2->isReg() ? Src2->getReg() : Register();
1749
1750 if (CurSrc2Reg != AMDGPU::NoRegister &&
1751 TRI->regsOverlap(PrevDstReg, CurSrc2Reg)) {
1752
1753 const MachineOperand *Src2Mods =
1754 TII->getNamedOperand(*MI, AMDGPU::OpName::src2_modifiers);
1755 const bool NoSrc2Mods =
1756 !Src2Mods ||
1757 (Src2Mods->getImm() & (SISrcMods::NEG | SISrcMods::NEG_HI)) == 0;
1758 // Exception: there is no hazard if the wmma instructions are of the same
1759 // type and there is no input modifier on src2 of the current instruction.
1760 return !(NoSrc2Mods && (TII->pseudoToMCOpcode(I.getOpcode()) ==
1761 TII->pseudoToMCOpcode(MI->getOpcode())));
1762 }
1763
1764 // GFX12+ allows overlap of matrix C with PrevDstReg (hardware will stall)
1765 // but Index can't overlap with PrevDstReg.
1766 if (AMDGPU::isGFX12Plus(ST)) {
1767 if (SIInstrInfo::isSWMMAC(*MI)) {
1768 const Register CurIndex =
1769 TII->getNamedOperand(*MI, AMDGPU::OpName::src2)->getReg();
1770 if (TRI->regsOverlap(PrevDstReg, CurIndex))
1771 return true;
1772 }
1773 return false;
1774 }
1775
1776 return false;
1777 };
1778
1779 auto IsExpiredFn = [](const MachineInstr &I, int) {
1780 return SIInstrInfo::isVALU(I);
1781 };
1782
1783 if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) ==
1784 std::numeric_limits<int>::max())
1785 return false;
1786
1787 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII->get(AMDGPU::V_NOP_e32));
1788
1789 return true;
1790 }
1791
fixShift64HighRegBug(MachineInstr * MI)1792 bool GCNHazardRecognizer::fixShift64HighRegBug(MachineInstr *MI) {
1793 if (!ST.hasShift64HighRegBug())
1794 return false;
1795 assert(!ST.hasExtendedWaitCounts());
1796
1797 switch (MI->getOpcode()) {
1798 default:
1799 return false;
1800 case AMDGPU::V_LSHLREV_B64_e64:
1801 case AMDGPU::V_LSHRREV_B64_e64:
1802 case AMDGPU::V_ASHRREV_I64_e64:
1803 break;
1804 }
1805
1806 MachineOperand *Amt = TII.getNamedOperand(*MI, AMDGPU::OpName::src0);
1807 if (!Amt->isReg())
1808 return false;
1809
1810 Register AmtReg = Amt->getReg();
1811 const MachineRegisterInfo &MRI = MF.getRegInfo();
1812 // Check if this is a last VGPR in the allocation block.
1813 if (!TRI.isVGPR(MRI, AmtReg) || ((AmtReg - AMDGPU::VGPR0) & 7) != 7)
1814 return false;
1815
1816 if (AmtReg != AMDGPU::VGPR255 && MRI.isPhysRegUsed(AmtReg + 1))
1817 return false;
1818
1819 MachineOperand *Src1 = TII.getNamedOperand(*MI, AMDGPU::OpName::src1);
1820 bool OverlappedSrc = Src1->isReg() && TRI.regsOverlap(Src1->getReg(), AmtReg);
1821 bool OverlappedDst = MI->modifiesRegister(AmtReg, &TRI);
1822 bool Overlapped = OverlappedSrc || OverlappedDst;
1823
1824 assert(!OverlappedDst || !OverlappedSrc ||
1825 Src1->getReg() == MI->getOperand(0).getReg());
1826 assert(ST.needsAlignedVGPRs());
1827 static_assert(AMDGPU::VGPR0 + 1 == AMDGPU::VGPR1);
1828
1829 Register NewReg;
1830 for (MCRegister Reg : Overlapped ? AMDGPU::VReg_64_Align2RegClass
1831 : AMDGPU::VGPR_32RegClass) {
1832 if (!MI->modifiesRegister(Reg, &TRI) && !MI->readsRegister(Reg, &TRI)) {
1833 NewReg = Reg;
1834 break;
1835 }
1836 }
1837
1838 Register NewAmt = Overlapped ? (Register)TRI.getSubReg(NewReg, AMDGPU::sub1)
1839 : NewReg;
1840 Register NewAmtLo;
1841
1842 if (Overlapped)
1843 NewAmtLo = TRI.getSubReg(NewReg, AMDGPU::sub0);
1844
1845 DebugLoc DL = MI->getDebugLoc();
1846 MachineBasicBlock *MBB = MI->getParent();
1847 // Insert a full wait count because found register might be pending a wait.
1848 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_WAITCNT))
1849 .addImm(0);
1850
1851 // Insert V_SWAP_B32 instruction(s) and run hazard recognizer on them.
1852 if (Overlapped)
1853 runOnInstruction(
1854 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_SWAP_B32), NewAmtLo)
1855 .addDef(AmtReg - 1)
1856 .addReg(AmtReg - 1, RegState::Undef)
1857 .addReg(NewAmtLo, RegState::Undef));
1858 runOnInstruction(BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_SWAP_B32), NewAmt)
1859 .addDef(AmtReg)
1860 .addReg(AmtReg, RegState::Undef)
1861 .addReg(NewAmt, RegState::Undef));
1862
1863 // Instructions emitted after the current instruction will be processed by the
1864 // parent loop of the hazard recognizer in a natural way.
1865 BuildMI(*MBB, std::next(MI->getIterator()), DL, TII.get(AMDGPU::V_SWAP_B32),
1866 AmtReg)
1867 .addDef(NewAmt)
1868 .addReg(NewAmt)
1869 .addReg(AmtReg);
1870 if (Overlapped)
1871 BuildMI(*MBB, std::next(MI->getIterator()), DL, TII.get(AMDGPU::V_SWAP_B32),
1872 AmtReg - 1)
1873 .addDef(NewAmtLo)
1874 .addReg(NewAmtLo)
1875 .addReg(AmtReg - 1);
1876
1877 // Re-running hazard recognizer on the modified instruction is not necessary,
1878 // inserted V_SWAP_B32 has already both read and write new registers so
1879 // hazards related to these register has already been handled.
1880 Amt->setReg(NewAmt);
1881 Amt->setIsKill(false);
1882 // We do not update liveness, so verifier may see it as undef.
1883 Amt->setIsUndef();
1884 if (OverlappedDst)
1885 MI->getOperand(0).setReg(NewReg);
1886 if (OverlappedSrc) {
1887 Src1->setReg(NewReg);
1888 Src1->setIsKill(false);
1889 Src1->setIsUndef();
1890 }
1891
1892 return true;
1893 }
1894
checkNSAtoVMEMHazard(MachineInstr * MI)1895 int GCNHazardRecognizer::checkNSAtoVMEMHazard(MachineInstr *MI) {
1896 int NSAtoVMEMWaitStates = 1;
1897
1898 if (!ST.hasNSAtoVMEMBug())
1899 return 0;
1900
1901 if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isMTBUF(*MI))
1902 return 0;
1903
1904 const SIInstrInfo *TII = ST.getInstrInfo();
1905 const auto *Offset = TII->getNamedOperand(*MI, AMDGPU::OpName::offset);
1906 if (!Offset || (Offset->getImm() & 6) == 0)
1907 return 0;
1908
1909 auto IsHazardFn = [TII](const MachineInstr &I) {
1910 if (!SIInstrInfo::isMIMG(I))
1911 return false;
1912 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(I.getOpcode());
1913 return Info->MIMGEncoding == AMDGPU::MIMGEncGfx10NSA &&
1914 TII->getInstSizeInBytes(I) >= 16;
1915 };
1916
1917 return NSAtoVMEMWaitStates - getWaitStatesSince(IsHazardFn, 1);
1918 }
1919
checkFPAtomicToDenormModeHazard(MachineInstr * MI)1920 int GCNHazardRecognizer::checkFPAtomicToDenormModeHazard(MachineInstr *MI) {
1921 int FPAtomicToDenormModeWaitStates = 3;
1922
1923 if (!ST.hasFPAtomicToDenormModeHazard())
1924 return 0;
1925 assert(!ST.hasExtendedWaitCounts());
1926
1927 if (MI->getOpcode() != AMDGPU::S_DENORM_MODE)
1928 return 0;
1929
1930 auto IsHazardFn = [](const MachineInstr &I) {
1931 if (!SIInstrInfo::isVMEM(I) && !SIInstrInfo::isFLAT(I))
1932 return false;
1933 return SIInstrInfo::isFPAtomic(I);
1934 };
1935
1936 auto IsExpiredFn = [](const MachineInstr &MI, int WaitStates) {
1937 if (WaitStates >= 3 || SIInstrInfo::isVALU(MI))
1938 return true;
1939
1940 switch (MI.getOpcode()) {
1941 case AMDGPU::S_WAITCNT:
1942 case AMDGPU::S_WAITCNT_VSCNT:
1943 case AMDGPU::S_WAITCNT_VMCNT:
1944 case AMDGPU::S_WAITCNT_EXPCNT:
1945 case AMDGPU::S_WAITCNT_LGKMCNT:
1946 case AMDGPU::S_WAIT_IDLE:
1947 return true;
1948 default:
1949 break;
1950 }
1951
1952 return false;
1953 };
1954
1955 return FPAtomicToDenormModeWaitStates -
1956 ::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn);
1957 }
1958
checkMAIHazards(MachineInstr * MI)1959 int GCNHazardRecognizer::checkMAIHazards(MachineInstr *MI) {
1960 assert(SIInstrInfo::isMAI(*MI));
1961
1962 return ST.hasGFX90AInsts() ? checkMAIHazards90A(MI) : checkMAIHazards908(MI);
1963 }
1964
checkMFMAPadding(MachineInstr * MI)1965 int GCNHazardRecognizer::checkMFMAPadding(MachineInstr *MI) {
1966 // Early exit if no padding is requested.
1967 if (MFMAPaddingRatio == 0)
1968 return 0;
1969
1970 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1971 if (!SIInstrInfo::isMFMA(*MI) || MFI->getOccupancy() < 2)
1972 return 0;
1973
1974 int NeighborMFMALatency = 0;
1975 auto IsNeighboringMFMA = [&NeighborMFMALatency,
1976 this](const MachineInstr &MI) {
1977 if (!SIInstrInfo::isMFMA(MI))
1978 return false;
1979
1980 NeighborMFMALatency = this->getMFMAPipelineWaitStates(MI);
1981 return true;
1982 };
1983
1984 const int MaxMFMAPipelineWaitStates = 16;
1985 int WaitStatesSinceNeighborMFMA =
1986 getWaitStatesSince(IsNeighboringMFMA, MaxMFMAPipelineWaitStates);
1987
1988 int NeighborMFMAPaddingNeeded =
1989 (NeighborMFMALatency * MFMAPaddingRatio / 100) -
1990 WaitStatesSinceNeighborMFMA;
1991
1992 return std::max(0, NeighborMFMAPaddingNeeded);
1993 }
1994
checkMAIHazards908(MachineInstr * MI)1995 int GCNHazardRecognizer::checkMAIHazards908(MachineInstr *MI) {
1996 int WaitStatesNeeded = 0;
1997 unsigned Opc = MI->getOpcode();
1998
1999 auto IsVALUFn = [](const MachineInstr &MI) {
2000 return SIInstrInfo::isVALU(MI) || MI.isInlineAsm();
2001 };
2002
2003 if (Opc != AMDGPU::V_ACCVGPR_READ_B32_e64) { // MFMA or v_accvgpr_write
2004 const int LegacyVALUWritesVGPRWaitStates = 2;
2005 const int VALUWritesExecWaitStates = 4;
2006 const int MaxWaitStates = 4;
2007
2008 int WaitStatesNeededForUse = VALUWritesExecWaitStates -
2009 getWaitStatesSinceDef(AMDGPU::EXEC, IsVALUFn, MaxWaitStates);
2010 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
2011
2012 if (WaitStatesNeeded < MaxWaitStates) {
2013 for (const MachineOperand &Use : MI->explicit_uses()) {
2014 const int MaxWaitStates = 2;
2015
2016 if (!Use.isReg() || !TRI.isVGPR(MF.getRegInfo(), Use.getReg()))
2017 continue;
2018
2019 int WaitStatesNeededForUse = LegacyVALUWritesVGPRWaitStates -
2020 getWaitStatesSinceDef(Use.getReg(), IsVALUFn, MaxWaitStates);
2021 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
2022
2023 if (WaitStatesNeeded == MaxWaitStates)
2024 break;
2025 }
2026 }
2027 }
2028
2029 for (const MachineOperand &Op : MI->explicit_operands()) {
2030 if (!Op.isReg() || !TRI.isAGPR(MF.getRegInfo(), Op.getReg()))
2031 continue;
2032
2033 if (Op.isDef() && Opc != AMDGPU::V_ACCVGPR_WRITE_B32_e64)
2034 continue;
2035
2036 const int MFMAWritesAGPROverlappedSrcABWaitStates = 4;
2037 const int MFMAWritesAGPROverlappedSrcCWaitStates = 2;
2038 const int MFMA4x4WritesAGPRAccVgprReadWaitStates = 4;
2039 const int MFMA16x16WritesAGPRAccVgprReadWaitStates = 10;
2040 const int MFMA32x32WritesAGPRAccVgprReadWaitStates = 18;
2041 const int MFMA4x4WritesAGPRAccVgprWriteWaitStates = 1;
2042 const int MFMA16x16WritesAGPRAccVgprWriteWaitStates = 7;
2043 const int MFMA32x32WritesAGPRAccVgprWriteWaitStates = 15;
2044 const int MaxWaitStates = 18;
2045 Register Reg = Op.getReg();
2046 unsigned HazardDefLatency = 0;
2047
2048 auto IsOverlappedMFMAFn = [Reg, &HazardDefLatency,
2049 this](const MachineInstr &MI) {
2050 if (!SIInstrInfo::isMFMA(MI))
2051 return false;
2052 Register DstReg = MI.getOperand(0).getReg();
2053 if (DstReg == Reg)
2054 return false;
2055 HazardDefLatency =
2056 std::max(HazardDefLatency, TSchedModel.computeInstrLatency(&MI));
2057 return TRI.regsOverlap(DstReg, Reg);
2058 };
2059
2060 int WaitStatesSinceDef = getWaitStatesSinceDef(Reg, IsOverlappedMFMAFn,
2061 MaxWaitStates);
2062 int NeedWaitStates = MFMAWritesAGPROverlappedSrcABWaitStates;
2063 int SrcCIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
2064 int OpNo = Op.getOperandNo();
2065 if (OpNo == SrcCIdx) {
2066 NeedWaitStates = MFMAWritesAGPROverlappedSrcCWaitStates;
2067 } else if (Opc == AMDGPU::V_ACCVGPR_READ_B32_e64) {
2068 switch (HazardDefLatency) {
2069 case 2: NeedWaitStates = MFMA4x4WritesAGPRAccVgprReadWaitStates;
2070 break;
2071 case 8: NeedWaitStates = MFMA16x16WritesAGPRAccVgprReadWaitStates;
2072 break;
2073 case 16: [[fallthrough]];
2074 default: NeedWaitStates = MFMA32x32WritesAGPRAccVgprReadWaitStates;
2075 break;
2076 }
2077 } else if (Opc == AMDGPU::V_ACCVGPR_WRITE_B32_e64) {
2078 switch (HazardDefLatency) {
2079 case 2: NeedWaitStates = MFMA4x4WritesAGPRAccVgprWriteWaitStates;
2080 break;
2081 case 8: NeedWaitStates = MFMA16x16WritesAGPRAccVgprWriteWaitStates;
2082 break;
2083 case 16: [[fallthrough]];
2084 default: NeedWaitStates = MFMA32x32WritesAGPRAccVgprWriteWaitStates;
2085 break;
2086 }
2087 }
2088
2089 int WaitStatesNeededForUse = NeedWaitStates - WaitStatesSinceDef;
2090 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
2091
2092 if (WaitStatesNeeded == MaxWaitStates)
2093 return WaitStatesNeeded; // Early exit.
2094
2095 auto IsAccVgprWriteFn = [Reg, this](const MachineInstr &MI) {
2096 if (MI.getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32_e64)
2097 return false;
2098 Register DstReg = MI.getOperand(0).getReg();
2099 return TRI.regsOverlap(Reg, DstReg);
2100 };
2101
2102 const int AccVGPRWriteMFMAReadSrcCWaitStates = 1;
2103 const int AccVGPRWriteMFMAReadSrcABWaitStates = 3;
2104 const int AccVGPRWriteAccVgprReadWaitStates = 3;
2105 NeedWaitStates = AccVGPRWriteMFMAReadSrcABWaitStates;
2106 if (OpNo == SrcCIdx)
2107 NeedWaitStates = AccVGPRWriteMFMAReadSrcCWaitStates;
2108 else if (Opc == AMDGPU::V_ACCVGPR_READ_B32_e64)
2109 NeedWaitStates = AccVGPRWriteAccVgprReadWaitStates;
2110
2111 WaitStatesNeededForUse = NeedWaitStates -
2112 getWaitStatesSinceDef(Reg, IsAccVgprWriteFn, MaxWaitStates);
2113 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
2114
2115 if (WaitStatesNeeded == MaxWaitStates)
2116 return WaitStatesNeeded; // Early exit.
2117 }
2118
2119 if (Opc == AMDGPU::V_ACCVGPR_WRITE_B32_e64) {
2120 const int MFMA4x4ReadSrcCAccVgprWriteWaitStates = 0;
2121 const int MFMA16x16ReadSrcCAccVgprWriteWaitStates = 5;
2122 const int MFMA32x32ReadSrcCAccVgprWriteWaitStates = 13;
2123 const int MaxWaitStates = 13;
2124 Register DstReg = MI->getOperand(0).getReg();
2125 unsigned HazardDefLatency = 0;
2126
2127 auto IsSrcCMFMAFn = [DstReg, &HazardDefLatency,
2128 this](const MachineInstr &MI) {
2129 if (!SIInstrInfo::isMFMA(MI))
2130 return false;
2131 Register Reg = TII.getNamedOperand(MI, AMDGPU::OpName::src2)->getReg();
2132 HazardDefLatency =
2133 std::max(HazardDefLatency, TSchedModel.computeInstrLatency(&MI));
2134 return TRI.regsOverlap(Reg, DstReg);
2135 };
2136
2137 int WaitStatesSince = getWaitStatesSince(IsSrcCMFMAFn, MaxWaitStates);
2138 int NeedWaitStates;
2139 switch (HazardDefLatency) {
2140 case 2: NeedWaitStates = MFMA4x4ReadSrcCAccVgprWriteWaitStates;
2141 break;
2142 case 8: NeedWaitStates = MFMA16x16ReadSrcCAccVgprWriteWaitStates;
2143 break;
2144 case 16: [[fallthrough]];
2145 default: NeedWaitStates = MFMA32x32ReadSrcCAccVgprWriteWaitStates;
2146 break;
2147 }
2148
2149 int WaitStatesNeededForUse = NeedWaitStates - WaitStatesSince;
2150 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
2151 }
2152
2153 // Pad neighboring MFMA with noops for better inter-wave performance.
2154 WaitStatesNeeded = std::max(WaitStatesNeeded, checkMFMAPadding(MI));
2155
2156 return WaitStatesNeeded;
2157 }
2158
checkMAIHazards90A(MachineInstr * MI)2159 int GCNHazardRecognizer::checkMAIHazards90A(MachineInstr *MI) {
2160 int WaitStatesNeeded = 0;
2161 unsigned Opc = MI->getOpcode();
2162
2163 auto IsLegacyVALUFn = [](const MachineInstr &MI) {
2164 return SIInstrInfo::isVALU(MI) && !SIInstrInfo::isMFMA(MI);
2165 };
2166
2167 auto IsLegacyVALUNotDotFn = [](const MachineInstr &MI) {
2168 return SIInstrInfo::isVALU(MI) && !SIInstrInfo::isMFMA(MI) &&
2169 !SIInstrInfo::isDOT(MI);
2170 };
2171
2172 if (!SIInstrInfo::isMFMA(*MI))
2173 return WaitStatesNeeded;
2174
2175 const int VALUWritesExecWaitStates = 4;
2176 int WaitStatesNeededForUse = VALUWritesExecWaitStates -
2177 getWaitStatesSinceDef(AMDGPU::EXEC, IsLegacyVALUFn,
2178 VALUWritesExecWaitStates);
2179 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
2180
2181 int SrcCIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
2182
2183 // Loop for both DGEMM and S/HGEMM 2nd instruction.
2184 for (const MachineOperand &Use : MI->explicit_uses()) {
2185 const int LegacyVALUNotDotWritesVGPRWaitStates = 2;
2186 const int SMFMA4x4WritesVGPROverlappedSMFMASrcCWaitStates = 2;
2187 const int GFX940_XDL2PassWritesVGPROverlappedSMFMASrcCWaitStates = 3;
2188 const int GFX940_XDL4PassWritesVGPROverlappedSMFMASrcCWaitStates = 5;
2189 const int GFX940_SMFMA4PassWritesVGPROverlappedSMFMASrcCWaitStates = 4;
2190 const int GFX940_XDL8PassWritesVGPROverlappedSMFMASrcCWaitStates = 9;
2191 const int GFX940_SMFMA8PassWritesVGPROverlappedSMFMASrcCWaitStates = 8;
2192 const int GFX940_XDL16PassWritesVGPROverlappedSMFMASrcCWaitStates = 17;
2193 const int GFX940_SMFMA16PassWritesVGPROverlappedSMFMASrcCWaitStates = 16;
2194 const int SMFMA16x16WritesVGPROverlappedSMFMASrcCWaitStates = 8;
2195 const int SMFMA32x32WritesVGPROverlappedSMFMASrcCWaitStates = 16;
2196 const int SMFMA4x4WritesVGPROverlappedDMFMASrcCWaitStates = 3;
2197 const int SMFMA16x16WritesVGPROverlappedDMFMASrcCWaitStates = 9;
2198 const int SMFMA32x32WritesVGPROverlappedDMFMASrcCWaitStates = 17;
2199 const int DMFMA16x16WritesVGPROverlappedSrcCWaitStates = 9;
2200 const int DMFMA4x4WritesVGPROverlappedSrcCWaitStates = 4;
2201 const int SMFMA4x4WritesVGPROverlappedSrcABWaitStates = 5;
2202 const int SMFMA16x16WritesVGPROverlappedSrcABWaitStates = 11;
2203 const int SMFMA32x32WritesVGPROverlappedSrcABWaitStates = 19;
2204 const int GFX940_SMFMA2PassWritesVGPROverlappedSrcABWaitStates = 4;
2205 const int GFX940_SMFMA4PassWritesVGPROverlappedSrcABWaitStates = 6;
2206 const int GFX940_SMFMA8PassWritesVGPROverlappedSrcABWaitStates = 10;
2207 const int GFX940_SMFMA16PassWritesVGPROverlappedSrcABWaitStates = 18;
2208 const int GFX940_XDL2PassWritesVGPROverlappedSrcABWaitStates = 5;
2209 const int GFX940_XDL4PassWritesVGPROverlappedSrcABWaitStates = 7;
2210 const int GFX940_XDL8PassWritesVGPROverlappedSrcABWaitStates = 11;
2211 const int GFX940_XDL16PassWritesVGPROverlappedSrcABWaitStates = 19;
2212 const int DMFMA4x4WritesVGPROverlappedMFMASrcABWaitStates = 6;
2213 const int DMFMA16x16WritesVGPROverlappedMFMASrcABWaitStates = 11;
2214 const int DMFMA4x4WritesVGPRFullSrcCWaitStates = 4;
2215 const int GFX940_SMFMA4x4WritesVGPRFullSrcCWaitStates = 2;
2216 const int MaxWaitStates = 19;
2217
2218 if (!Use.isReg())
2219 continue;
2220 Register Reg = Use.getReg();
2221 bool FullReg;
2222 const MachineInstr *MI1;
2223
2224 auto IsOverlappedMFMAFn = [Reg, &FullReg, &MI1,
2225 this](const MachineInstr &MI) {
2226 if (!SIInstrInfo::isMFMA(MI))
2227 return false;
2228 Register DstReg = MI.getOperand(0).getReg();
2229 FullReg = (DstReg == Reg);
2230 MI1 = &MI;
2231 return TRI.regsOverlap(DstReg, Reg);
2232 };
2233
2234 WaitStatesNeededForUse = LegacyVALUNotDotWritesVGPRWaitStates -
2235 getWaitStatesSinceDef(Reg, IsLegacyVALUNotDotFn, MaxWaitStates);
2236 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
2237
2238 int NumWaitStates =
2239 getWaitStatesSinceDef(Reg, IsOverlappedMFMAFn, MaxWaitStates);
2240 if (NumWaitStates == std::numeric_limits<int>::max())
2241 continue;
2242
2243 int OpNo = Use.getOperandNo();
2244 unsigned Opc1 = MI1->getOpcode();
2245 int NeedWaitStates = 0;
2246 if (OpNo == SrcCIdx) {
2247 if (!isDGEMM(Opc) && (!ST.hasGFX940Insts() && isDGEMM(Opc1))) {
2248 NeedWaitStates = 0;
2249 } else if (FullReg) {
2250 if ((Opc == AMDGPU::V_MFMA_F64_4X4X4F64_e64 ||
2251 Opc == AMDGPU::V_MFMA_F64_4X4X4F64_vgprcd_e64) &&
2252 (Opc1 == AMDGPU::V_MFMA_F64_4X4X4F64_e64 ||
2253 Opc1 == AMDGPU::V_MFMA_F64_4X4X4F64_vgprcd_e64))
2254 NeedWaitStates = DMFMA4x4WritesVGPRFullSrcCWaitStates;
2255 else if (ST.hasGFX940Insts() &&
2256 TSchedModel.computeInstrLatency(MI1) == 2)
2257 NeedWaitStates = GFX940_SMFMA4x4WritesVGPRFullSrcCWaitStates;
2258 } else {
2259 switch (Opc1) {
2260 case AMDGPU::V_MFMA_F64_16X16X4F64_e64:
2261 case AMDGPU::V_MFMA_F64_16X16X4F64_vgprcd_e64:
2262 case AMDGPU::V_MFMA_F64_16X16X4F64_mac_e64:
2263 case AMDGPU::V_MFMA_F64_16X16X4F64_mac_vgprcd_e64:
2264 if (!isXDL(ST, *MI))
2265 NeedWaitStates = DMFMA16x16WritesVGPROverlappedSrcCWaitStates;
2266 break;
2267 case AMDGPU::V_MFMA_F64_4X4X4F64_e64:
2268 case AMDGPU::V_MFMA_F64_4X4X4F64_vgprcd_e64:
2269 if (!isXDL(ST, *MI))
2270 NeedWaitStates = DMFMA4x4WritesVGPROverlappedSrcCWaitStates;
2271 break;
2272 default:
2273 if (ST.hasGFX940Insts() && isXDL(ST, *MI) && !isXDL(ST, *MI1))
2274 break;
2275 switch (TSchedModel.computeInstrLatency(MI1)) {
2276 case 2:
2277 NeedWaitStates = ST.hasGFX940Insts()
2278 ? isXDL(ST, *MI1)
2279 ? GFX940_XDL2PassWritesVGPROverlappedSMFMASrcCWaitStates
2280 : SMFMA4x4WritesVGPROverlappedSMFMASrcCWaitStates
2281 : isDGEMM(Opc)
2282 ? SMFMA4x4WritesVGPROverlappedDMFMASrcCWaitStates
2283 : SMFMA4x4WritesVGPROverlappedSMFMASrcCWaitStates;
2284 break;
2285 case 4:
2286 assert(ST.hasGFX940Insts());
2287 NeedWaitStates = isXDL(ST, *MI1)
2288 ? GFX940_XDL4PassWritesVGPROverlappedSMFMASrcCWaitStates
2289 : GFX940_SMFMA4PassWritesVGPROverlappedSMFMASrcCWaitStates;
2290 break;
2291 case 8:
2292 NeedWaitStates = ST.hasGFX940Insts()
2293 ? isXDL(ST, *MI1)
2294 ? GFX940_XDL8PassWritesVGPROverlappedSMFMASrcCWaitStates
2295 : GFX940_SMFMA8PassWritesVGPROverlappedSMFMASrcCWaitStates
2296 : isDGEMM(Opc)
2297 ? SMFMA16x16WritesVGPROverlappedDMFMASrcCWaitStates
2298 : SMFMA16x16WritesVGPROverlappedSMFMASrcCWaitStates;
2299 break;
2300 case 16: [[fallthrough]];
2301 default:
2302 NeedWaitStates = ST.hasGFX940Insts()
2303 ? isXDL(ST, *MI1)
2304 ? GFX940_XDL16PassWritesVGPROverlappedSMFMASrcCWaitStates
2305 : GFX940_SMFMA16PassWritesVGPROverlappedSMFMASrcCWaitStates
2306 : isDGEMM(Opc)
2307 ? SMFMA32x32WritesVGPROverlappedDMFMASrcCWaitStates
2308 : SMFMA32x32WritesVGPROverlappedSMFMASrcCWaitStates;
2309 }
2310 }
2311 }
2312 } else {
2313 switch (Opc1) {
2314 case AMDGPU::V_MFMA_F64_16X16X4F64_e64:
2315 case AMDGPU::V_MFMA_F64_16X16X4F64_vgprcd_e64:
2316 case AMDGPU::V_MFMA_F64_16X16X4F64_mac_e64:
2317 case AMDGPU::V_MFMA_F64_16X16X4F64_mac_vgprcd_e64:
2318 NeedWaitStates = DMFMA16x16WritesVGPROverlappedMFMASrcABWaitStates;
2319 break;
2320 case AMDGPU::V_MFMA_F64_4X4X4F64_e64:
2321 case AMDGPU::V_MFMA_F64_4X4X4F64_vgprcd_e64:
2322 NeedWaitStates = DMFMA4x4WritesVGPROverlappedMFMASrcABWaitStates;
2323 break;
2324 default:
2325 switch (TSchedModel.computeInstrLatency(MI1)) {
2326 case 2:
2327 NeedWaitStates = ST.hasGFX940Insts()
2328 ? isXDL(ST, *MI1)
2329 ? GFX940_XDL2PassWritesVGPROverlappedSrcABWaitStates
2330 : GFX940_SMFMA2PassWritesVGPROverlappedSrcABWaitStates
2331 : SMFMA4x4WritesVGPROverlappedSrcABWaitStates;
2332 break;
2333 case 4:
2334 assert(ST.hasGFX940Insts());
2335 NeedWaitStates = isXDL(ST, *MI1)
2336 ? GFX940_XDL4PassWritesVGPROverlappedSrcABWaitStates
2337 : GFX940_SMFMA4PassWritesVGPROverlappedSrcABWaitStates;
2338 break;
2339 case 8:
2340 NeedWaitStates = ST.hasGFX940Insts()
2341 ? isXDL(ST, *MI1)
2342 ? GFX940_XDL8PassWritesVGPROverlappedSrcABWaitStates
2343 : GFX940_SMFMA8PassWritesVGPROverlappedSrcABWaitStates
2344 : SMFMA16x16WritesVGPROverlappedSrcABWaitStates;
2345 break;
2346 case 16: [[fallthrough]];
2347 default:
2348 NeedWaitStates = ST.hasGFX940Insts()
2349 ? isXDL(ST, *MI1)
2350 ? GFX940_XDL16PassWritesVGPROverlappedSrcABWaitStates
2351 : GFX940_SMFMA16PassWritesVGPROverlappedSrcABWaitStates
2352 : SMFMA32x32WritesVGPROverlappedSrcABWaitStates;
2353 }
2354 }
2355 }
2356 if (WaitStatesNeeded >= NeedWaitStates)
2357 continue;
2358
2359 WaitStatesNeededForUse = NeedWaitStates - NumWaitStates;
2360 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
2361
2362 if (WaitStatesNeeded == MaxWaitStates)
2363 break;
2364 }
2365
2366 return WaitStatesNeeded;
2367 }
2368
checkMAILdStHazards(MachineInstr * MI)2369 int GCNHazardRecognizer::checkMAILdStHazards(MachineInstr *MI) {
2370 // On gfx90a+ relevant hazards are checked in checkMAIVALUHazards()
2371 if (!ST.hasMAIInsts() || ST.hasGFX90AInsts())
2372 return 0;
2373
2374 int WaitStatesNeeded = 0;
2375
2376 auto IsAccVgprReadFn = [](const MachineInstr &MI) {
2377 return MI.getOpcode() == AMDGPU::V_ACCVGPR_READ_B32_e64;
2378 };
2379
2380 for (const MachineOperand &Op : MI->explicit_uses()) {
2381 if (!Op.isReg() || !TRI.isVGPR(MF.getRegInfo(), Op.getReg()))
2382 continue;
2383
2384 Register Reg = Op.getReg();
2385
2386 const int AccVgprReadLdStWaitStates = 2;
2387 const int VALUWriteAccVgprRdWrLdStDepVALUWaitStates = 1;
2388 const int MaxWaitStates = 2;
2389
2390 int WaitStatesNeededForUse = AccVgprReadLdStWaitStates -
2391 getWaitStatesSinceDef(Reg, IsAccVgprReadFn, MaxWaitStates);
2392 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
2393
2394 if (WaitStatesNeeded == MaxWaitStates)
2395 return WaitStatesNeeded; // Early exit.
2396
2397 auto IsVALUAccVgprRdWrCheckFn = [Reg, this](const MachineInstr &MI) {
2398 if (MI.getOpcode() != AMDGPU::V_ACCVGPR_READ_B32_e64 &&
2399 MI.getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32_e64)
2400 return false;
2401 auto IsVALUFn = [](const MachineInstr &MI) {
2402 return SIInstrInfo::isVALU(MI) && !SIInstrInfo::isMAI(MI);
2403 };
2404 return getWaitStatesSinceDef(Reg, IsVALUFn, 2 /*MaxWaitStates*/) <
2405 std::numeric_limits<int>::max();
2406 };
2407
2408 WaitStatesNeededForUse = VALUWriteAccVgprRdWrLdStDepVALUWaitStates -
2409 getWaitStatesSince(IsVALUAccVgprRdWrCheckFn, MaxWaitStates);
2410 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
2411 }
2412
2413 return WaitStatesNeeded;
2414 }
2415
checkMAIVALUHazards(MachineInstr * MI)2416 int GCNHazardRecognizer::checkMAIVALUHazards(MachineInstr *MI) {
2417 if (!ST.hasGFX90AInsts())
2418 return 0;
2419
2420 auto IsDGEMMFn = [](const MachineInstr &MI) -> bool {
2421 return isDGEMM(MI.getOpcode());
2422 };
2423
2424 // This is checked in checkMAIHazards90A()
2425 if (SIInstrInfo::isMFMA(*MI))
2426 return 0;
2427
2428 const MachineRegisterInfo &MRI = MF.getRegInfo();
2429
2430 int WaitStatesNeeded = 0;
2431
2432 bool IsMem = SIInstrInfo::isVMEM(*MI) ||
2433 SIInstrInfo::isFLAT(*MI) ||
2434 SIInstrInfo::isDS(*MI);
2435 bool IsMemOrExport = IsMem || SIInstrInfo::isEXP(*MI);
2436 bool IsVALU = SIInstrInfo::isVALU(*MI);
2437
2438 const MachineInstr *MFMA = nullptr;
2439 unsigned Reg;
2440 auto IsMFMAWriteFn = [&Reg, &MFMA, this](const MachineInstr &MI) {
2441 if (!SIInstrInfo::isMFMA(MI) ||
2442 !TRI.regsOverlap(MI.getOperand(0).getReg(), Reg))
2443 return false;
2444 MFMA = &MI;
2445 return true;
2446 };
2447
2448 const MachineInstr *DOT = nullptr;
2449 auto IsDotWriteFn = [&Reg, &DOT, this](const MachineInstr &MI) {
2450 if (!SIInstrInfo::isDOT(MI) ||
2451 !TRI.regsOverlap(MI.getOperand(0).getReg(), Reg))
2452 return false;
2453 DOT = &MI;
2454 return true;
2455 };
2456
2457 bool DGEMMAfterVALUWrite = false;
2458 auto IsDGEMMHazard = [&DGEMMAfterVALUWrite, this](const MachineInstr &MI) {
2459 // Found DGEMM on reverse traversal to def.
2460 if (isDGEMM(MI.getOpcode()))
2461 DGEMMAfterVALUWrite = true;
2462
2463 // Only hazard if register is defined by a VALU and a DGEMM is found after
2464 // after the def.
2465 if (!TII.isVALU(MI) || !DGEMMAfterVALUWrite)
2466 return false;
2467
2468 return true;
2469 };
2470
2471 int SrcCIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
2472 AMDGPU::OpName::src2);
2473
2474 if (IsMemOrExport || IsVALU) {
2475 const int SMFMA4x4WriteVgprVALUMemExpReadWaitStates = 5;
2476 const int SMFMA16x16WriteVgprVALUMemExpReadWaitStates = 11;
2477 const int SMFMA32x32WriteVgprVALUMemExpReadWaitStates = 19;
2478 const int GFX940_SMFMA2PassWriteVgprVALUMemExpReadWaitStates = 4;
2479 const int GFX940_SMFMA4PassWriteVgprVALUMemExpReadWaitStates = 6;
2480 const int GFX940_SMFMA8PassWriteVgprVALUMemExpReadWaitStates = 10;
2481 const int GFX940_SMFMA16PassWriteVgprVALUMemExpReadWaitStates = 18;
2482 const int GFX940_XDL2PassWriteVgprVALUMemExpReadWaitStates = 5;
2483 const int GFX940_XDL4PassWriteVgprVALUMemExpReadWaitStates = 7;
2484 const int GFX940_XDL8PassWriteVgprVALUMemExpReadWaitStates = 11;
2485 const int GFX940_XDL16PassWriteVgprVALUMemExpReadWaitStates = 19;
2486 const int DMFMA4x4WriteVgprMemExpReadWaitStates = 9;
2487 const int DMFMA16x16WriteVgprMemExpReadWaitStates = 18;
2488 const int DMFMA4x4WriteVgprVALUReadWaitStates = 6;
2489 const int DMFMA16x16WriteVgprVALUReadWaitStates = 11;
2490 const int DotWriteSameDotReadSrcAB = 3;
2491 const int DotWriteDifferentVALURead = 3;
2492 const int DMFMABetweenVALUWriteVMEMRead = 2;
2493 const int MaxWaitStates = 19;
2494
2495 for (const MachineOperand &Use : MI->explicit_uses()) {
2496 if (!Use.isReg())
2497 continue;
2498 Reg = Use.getReg();
2499
2500 DOT = nullptr;
2501 int WaitStatesSinceDef = getWaitStatesSinceDef(Reg, IsDotWriteFn,
2502 MaxWaitStates);
2503 if (DOT) {
2504 int NeedWaitStates = 0;
2505 if (DOT->getOpcode() == MI->getOpcode()) {
2506 if (&Use - &MI->getOperand(0) != SrcCIdx)
2507 NeedWaitStates = DotWriteSameDotReadSrcAB;
2508 } else {
2509 NeedWaitStates = DotWriteDifferentVALURead;
2510 }
2511
2512 int WaitStatesNeededForUse = NeedWaitStates - WaitStatesSinceDef;
2513 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
2514 }
2515
2516 // Workaround for HW data hazard bug observed only in GFX90A. When there
2517 // is a DGEMM instruction in-between a VALU and a VMEM instruction it
2518 // causes the SQ to incorrectly not insert two wait states between the two
2519 // instructions needed to avoid data hazard.
2520 if (IsMem && ST.hasGFX90AInsts() && !ST.hasGFX940Insts()) {
2521 DGEMMAfterVALUWrite = false;
2522 if (TRI.isVectorRegister(MRI, Reg)) {
2523 int WaitStatesNeededForUse =
2524 DMFMABetweenVALUWriteVMEMRead -
2525 getWaitStatesSinceDef(Reg, IsDGEMMHazard,
2526 DMFMABetweenVALUWriteVMEMRead);
2527
2528 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
2529 }
2530 }
2531
2532 MFMA = nullptr;
2533 WaitStatesSinceDef =
2534 getWaitStatesSinceDef(Reg, IsMFMAWriteFn, MaxWaitStates);
2535 if (!MFMA)
2536 continue;
2537
2538 unsigned HazardDefLatency = TSchedModel.computeInstrLatency(MFMA);
2539 int NeedWaitStates = MaxWaitStates;
2540 switch (HazardDefLatency) {
2541 case 2:
2542 NeedWaitStates =
2543 ST.hasGFX940Insts()
2544 ? isXDL(ST, *MFMA)
2545 ? GFX940_XDL2PassWriteVgprVALUMemExpReadWaitStates
2546 : GFX940_SMFMA2PassWriteVgprVALUMemExpReadWaitStates
2547 : SMFMA4x4WriteVgprVALUMemExpReadWaitStates;
2548 break;
2549 case 4:
2550 assert(isDGEMM(MFMA->getOpcode()) || ST.hasGFX940Insts());
2551 NeedWaitStates =
2552 isDGEMM(MFMA->getOpcode())
2553 ? IsMemOrExport ? DMFMA4x4WriteVgprMemExpReadWaitStates
2554 : DMFMA4x4WriteVgprVALUReadWaitStates
2555 : isXDL(ST, *MFMA)
2556 ? GFX940_XDL4PassWriteVgprVALUMemExpReadWaitStates
2557 : GFX940_SMFMA4PassWriteVgprVALUMemExpReadWaitStates;
2558 break;
2559 case 8:
2560 NeedWaitStates =
2561 ST.hasGFX940Insts()
2562 ? isXDL(ST, *MFMA)
2563 ? GFX940_XDL8PassWriteVgprVALUMemExpReadWaitStates
2564 : GFX940_SMFMA8PassWriteVgprVALUMemExpReadWaitStates
2565 : SMFMA16x16WriteVgprVALUMemExpReadWaitStates;
2566 break;
2567 case 16: [[fallthrough]];
2568 default:
2569 NeedWaitStates =
2570 isDGEMM(MFMA->getOpcode())
2571 ? IsMemOrExport ? DMFMA16x16WriteVgprMemExpReadWaitStates
2572 : DMFMA16x16WriteVgprVALUReadWaitStates
2573 : ST.hasGFX940Insts()
2574 ? isXDL(ST, *MFMA)
2575 ? GFX940_XDL16PassWriteVgprVALUMemExpReadWaitStates
2576 : GFX940_SMFMA16PassWriteVgprVALUMemExpReadWaitStates
2577 : SMFMA32x32WriteVgprVALUMemExpReadWaitStates;
2578 break;
2579 }
2580
2581 int WaitStatesNeededForUse = NeedWaitStates - WaitStatesSinceDef;
2582 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
2583
2584 if (WaitStatesNeeded == MaxWaitStates)
2585 break;
2586 }
2587 }
2588
2589 unsigned Opc = MI->getOpcode();
2590 const int DMFMAToFMA64WaitStates = 2;
2591 if ((Opc == AMDGPU::V_FMA_F64_e64 ||
2592 Opc == AMDGPU::V_FMAC_F64_e32 || Opc == AMDGPU::V_FMAC_F64_e64 ||
2593 Opc == AMDGPU::V_FMAC_F64_dpp) &&
2594 WaitStatesNeeded < DMFMAToFMA64WaitStates) {
2595 int WaitStatesNeededForUse = DMFMAToFMA64WaitStates -
2596 getWaitStatesSince(IsDGEMMFn, DMFMAToFMA64WaitStates);
2597 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
2598 }
2599
2600 if (!IsVALU && !IsMemOrExport)
2601 return WaitStatesNeeded;
2602
2603 for (const MachineOperand &Def : MI->defs()) {
2604 const int SMFMA4x4WriteVgprVALUWawWaitStates = 5;
2605 const int SMFMA16x16WriteVgprVALUWawWaitStates = 11;
2606 const int SMFMA32x32WriteVgprVALUWawWaitStates = 19;
2607 const int GFX940_SMFMA2PassWriteVgprVALUWawWaitStates = 4;
2608 const int GFX940_SMFMA4PassWriteVgprVALUWawWaitStates = 6;
2609 const int GFX940_SMFMA8PassWriteVgprVALUWawWaitStates = 10;
2610 const int GFX940_SMFMA16PassWriteVgprVALUWawWaitStates = 18;
2611 const int GFX940_XDL2PassWriteVgprVALUWawWaitStates = 5;
2612 const int GFX940_XDL4PassWriteVgprVALUWawWaitStates = 7;
2613 const int GFX940_XDL8PassWriteVgprVALUWawWaitStates = 11;
2614 const int GFX940_XDL16PassWriteVgprVALUWawWaitStates = 19;
2615 const int SMFMA4x4ReadVgprVALUWarWaitStates = 1;
2616 const int GFX940_XDL4PassReadVgprVALUWarWaitStates = 3;
2617 const int SMFMA16x16ReadVgprVALUWarWaitStates = 7;
2618 const int SMFMA32x32ReadVgprVALUWarWaitStates = 15;
2619 const int DMFMA4x4WriteVgprVALUWriteWaitStates = 6;
2620 const int DMFMA16x16WriteVgprVALUWriteWaitStates = 11;
2621 const int DotWriteDifferentVALUWrite = 3;
2622 const int MaxWaitStates = 19;
2623 const int MaxWarWaitStates = 15;
2624
2625 Reg = Def.getReg();
2626
2627 DOT = nullptr;
2628 int WaitStatesSinceDef = getWaitStatesSinceDef(Reg, IsDotWriteFn,
2629 MaxWaitStates);
2630 if (DOT && DOT->getOpcode() != MI->getOpcode())
2631 WaitStatesNeeded = std::max(WaitStatesNeeded, DotWriteDifferentVALUWrite -
2632 WaitStatesSinceDef);
2633
2634 MFMA = nullptr;
2635 WaitStatesSinceDef =
2636 getWaitStatesSinceDef(Reg, IsMFMAWriteFn, MaxWaitStates);
2637 if (MFMA) {
2638 int NeedWaitStates = MaxWaitStates;
2639 switch (TSchedModel.computeInstrLatency(MFMA)) {
2640 case 2:
2641 NeedWaitStates = ST.hasGFX940Insts()
2642 ? isXDL(ST, *MFMA)
2643 ? GFX940_XDL2PassWriteVgprVALUWawWaitStates
2644 : GFX940_SMFMA2PassWriteVgprVALUWawWaitStates
2645 : SMFMA4x4WriteVgprVALUWawWaitStates;
2646 break;
2647 case 4:
2648 assert(isDGEMM(MFMA->getOpcode()) || ST.hasGFX940Insts());
2649 NeedWaitStates = isDGEMM(MFMA->getOpcode())
2650 ? DMFMA4x4WriteVgprVALUWriteWaitStates
2651 : isXDL(ST, *MFMA)
2652 ? GFX940_XDL4PassWriteVgprVALUWawWaitStates
2653 : GFX940_SMFMA4PassWriteVgprVALUWawWaitStates;
2654 break;
2655 case 8:
2656 NeedWaitStates = ST.hasGFX940Insts()
2657 ? isXDL(ST, *MFMA)
2658 ? GFX940_XDL8PassWriteVgprVALUWawWaitStates
2659 : GFX940_SMFMA8PassWriteVgprVALUWawWaitStates
2660 : SMFMA16x16WriteVgprVALUWawWaitStates;
2661 break;
2662 case 16: [[fallthrough]];
2663 default:
2664 NeedWaitStates = isDGEMM(MFMA->getOpcode())
2665 ? DMFMA16x16WriteVgprVALUWriteWaitStates
2666 : ST.hasGFX940Insts()
2667 ? isXDL(ST, *MFMA)
2668 ? GFX940_XDL16PassWriteVgprVALUWawWaitStates
2669 : GFX940_SMFMA16PassWriteVgprVALUWawWaitStates
2670 : SMFMA32x32WriteVgprVALUWawWaitStates;
2671 break;
2672 }
2673
2674 int WaitStatesNeededForUse = NeedWaitStates - WaitStatesSinceDef;
2675 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
2676
2677 if (WaitStatesNeeded == MaxWaitStates)
2678 break;
2679 }
2680
2681 auto IsSMFMAReadAsCFn = [&Reg, &MFMA, this](const MachineInstr &MI) {
2682 if (!SIInstrInfo::isMFMA(MI) || isDGEMM(MI.getOpcode()) ||
2683 !MI.readsRegister(Reg, &TRI))
2684 return false;
2685
2686 if (ST.hasGFX940Insts() && !isXDL(ST, MI))
2687 return false;
2688
2689 const MachineOperand *SrcC =
2690 TII.getNamedOperand(MI, AMDGPU::OpName::src2);
2691 assert(SrcC);
2692 if (!SrcC->isReg() || !TRI.regsOverlap(SrcC->getReg(), Reg))
2693 return false;
2694
2695 MFMA = &MI;
2696 return true;
2697 };
2698
2699 MFMA = nullptr;
2700 int WaitStatesSinceUse = getWaitStatesSince(IsSMFMAReadAsCFn,
2701 MaxWarWaitStates);
2702 if (!MFMA)
2703 continue;
2704
2705 unsigned HazardDefLatency = TSchedModel.computeInstrLatency(MFMA);
2706 int NeedWaitStates = MaxWaitStates;
2707 switch (HazardDefLatency) {
2708 case 2: NeedWaitStates = SMFMA4x4ReadVgprVALUWarWaitStates;
2709 break;
2710 case 4: assert(ST.hasGFX940Insts());
2711 NeedWaitStates = GFX940_XDL4PassReadVgprVALUWarWaitStates;
2712 break;
2713 case 8: NeedWaitStates = SMFMA16x16ReadVgprVALUWarWaitStates;
2714 break;
2715 case 16: [[fallthrough]];
2716 default: NeedWaitStates = SMFMA32x32ReadVgprVALUWarWaitStates;
2717 break;
2718 }
2719
2720 int WaitStatesNeededForUse = NeedWaitStates - WaitStatesSinceUse;
2721 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
2722 }
2723
2724 return WaitStatesNeeded;
2725 }
2726
ShouldPreferAnother(SUnit * SU)2727 bool GCNHazardRecognizer::ShouldPreferAnother(SUnit *SU) {
2728 if (!SU->isInstr())
2729 return false;
2730
2731 const MachineInstr *MAI = nullptr;
2732
2733 auto IsMFMAFn = [&MAI](const MachineInstr &MI) {
2734 MAI = nullptr;
2735 if (SIInstrInfo::isMFMA(MI))
2736 MAI = &MI;
2737 return MAI != nullptr;
2738 };
2739
2740 MachineInstr *MI = SU->getInstr();
2741 if (IsMFMAFn(*MI)) {
2742 int W = getWaitStatesSince(IsMFMAFn, 16);
2743 if (MAI)
2744 return W < (int)TSchedModel.computeInstrLatency(MAI);
2745 }
2746
2747 return false;
2748 }
2749
fixVALUMaskWriteHazard(MachineInstr * MI)2750 bool GCNHazardRecognizer::fixVALUMaskWriteHazard(MachineInstr *MI) {
2751 if (!ST.hasVALUMaskWriteHazard())
2752 return false;
2753 assert(!ST.hasExtendedWaitCounts());
2754
2755 if (!ST.isWave64() || !SIInstrInfo::isSALU(*MI))
2756 return false;
2757
2758 // The hazard sequence is three instructions:
2759 // 1. VALU reads SGPR as mask
2760 // 2. SALU writes SGPR
2761 // 3. SALU reads SGPR
2762 // The hazard can expire if the distance between 2 and 3 is sufficient.
2763 // In practice this happens <10% of the time, hence this always assumes
2764 // the hazard exists if 1 and 2 are present to avoid searching.
2765
2766 const MachineOperand *SDSTOp = TII.getNamedOperand(*MI, AMDGPU::OpName::sdst);
2767 if (!SDSTOp || !SDSTOp->isReg())
2768 return false;
2769
2770 const Register HazardReg = SDSTOp->getReg();
2771 if (HazardReg == AMDGPU::EXEC ||
2772 HazardReg == AMDGPU::EXEC_LO ||
2773 HazardReg == AMDGPU::EXEC_HI ||
2774 HazardReg == AMDGPU::M0)
2775 return false;
2776
2777 auto IsHazardFn = [HazardReg, this](const MachineInstr &I) {
2778 switch (I.getOpcode()) {
2779 case AMDGPU::V_ADDC_U32_e32:
2780 case AMDGPU::V_ADDC_U32_dpp:
2781 case AMDGPU::V_CNDMASK_B16_e32:
2782 case AMDGPU::V_CNDMASK_B16_dpp:
2783 case AMDGPU::V_CNDMASK_B32_e32:
2784 case AMDGPU::V_CNDMASK_B32_dpp:
2785 case AMDGPU::V_DIV_FMAS_F32_e64:
2786 case AMDGPU::V_DIV_FMAS_F64_e64:
2787 case AMDGPU::V_SUBB_U32_e32:
2788 case AMDGPU::V_SUBB_U32_dpp:
2789 case AMDGPU::V_SUBBREV_U32_e32:
2790 case AMDGPU::V_SUBBREV_U32_dpp:
2791 // These implicitly read VCC as mask source.
2792 return HazardReg == AMDGPU::VCC ||
2793 HazardReg == AMDGPU::VCC_LO ||
2794 HazardReg == AMDGPU::VCC_HI;
2795 case AMDGPU::V_ADDC_U32_e64:
2796 case AMDGPU::V_ADDC_U32_e64_dpp:
2797 case AMDGPU::V_CNDMASK_B16_e64:
2798 case AMDGPU::V_CNDMASK_B16_e64_dpp:
2799 case AMDGPU::V_CNDMASK_B32_e64:
2800 case AMDGPU::V_CNDMASK_B32_e64_dpp:
2801 case AMDGPU::V_SUBB_U32_e64:
2802 case AMDGPU::V_SUBB_U32_e64_dpp:
2803 case AMDGPU::V_SUBBREV_U32_e64:
2804 case AMDGPU::V_SUBBREV_U32_e64_dpp: {
2805 // Only check mask register overlaps.
2806 const MachineOperand *SSRCOp = TII.getNamedOperand(I, AMDGPU::OpName::src2);
2807 assert(SSRCOp);
2808 return TRI.regsOverlap(SSRCOp->getReg(), HazardReg);
2809 }
2810 default:
2811 return false;
2812 }
2813 };
2814
2815 const MachineRegisterInfo &MRI = MF.getRegInfo();
2816 auto IsExpiredFn = [&MRI, this](const MachineInstr &I, int) {
2817 // s_waitcnt_depctr sa_sdst(0) mitigates hazard.
2818 if (I.getOpcode() == AMDGPU::S_WAITCNT_DEPCTR &&
2819 AMDGPU::DepCtr::decodeFieldSaSdst(I.getOperand(0).getImm()) == 0)
2820 return true;
2821
2822 // VALU access to any SGPR or literal constant other than HazardReg
2823 // mitigates hazard. No need to check HazardReg here as this will
2824 // only be called when !IsHazardFn.
2825 if (!SIInstrInfo::isVALU(I))
2826 return false;
2827 for (int OpNo = 0, End = I.getNumOperands(); OpNo < End; ++OpNo) {
2828 const MachineOperand &Op = I.getOperand(OpNo);
2829 if (Op.isReg()) {
2830 Register OpReg = Op.getReg();
2831 // Only consider uses
2832 if (!Op.isUse())
2833 continue;
2834 // Ignore EXEC
2835 if (OpReg == AMDGPU::EXEC ||
2836 OpReg == AMDGPU::EXEC_LO ||
2837 OpReg == AMDGPU::EXEC_HI)
2838 continue;
2839 // Ignore all implicit uses except VCC
2840 if (Op.isImplicit()) {
2841 if (OpReg == AMDGPU::VCC ||
2842 OpReg == AMDGPU::VCC_LO ||
2843 OpReg == AMDGPU::VCC_HI)
2844 return true;
2845 continue;
2846 }
2847 if (TRI.isSGPRReg(MRI, OpReg))
2848 return true;
2849 } else {
2850 const MCInstrDesc &InstDesc = I.getDesc();
2851 const MCOperandInfo &OpInfo = InstDesc.operands()[OpNo];
2852 if (!TII.isInlineConstant(Op, OpInfo))
2853 return true;
2854 }
2855 }
2856 return false;
2857 };
2858
2859 // Check for hazard
2860 if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) ==
2861 std::numeric_limits<int>::max())
2862 return false;
2863
2864 auto NextMI = std::next(MI->getIterator());
2865
2866 // Add s_waitcnt_depctr sa_sdst(0) after SALU write.
2867 BuildMI(*MI->getParent(), NextMI, MI->getDebugLoc(),
2868 TII.get(AMDGPU::S_WAITCNT_DEPCTR))
2869 .addImm(AMDGPU::DepCtr::encodeFieldSaSdst(0));
2870
2871 // SALU write may be s_getpc in a bundle.
2872 if (MI->getOpcode() == AMDGPU::S_GETPC_B64) {
2873 // Update offsets of any references in the bundle.
2874 while (NextMI != MI->getParent()->end() &&
2875 NextMI->isBundledWithPred()) {
2876 for (auto &Operand : NextMI->operands()) {
2877 if (Operand.isGlobal())
2878 Operand.setOffset(Operand.getOffset() + 4);
2879 }
2880 NextMI++;
2881 }
2882 }
2883
2884 return true;
2885 }
2886