1 //===-- SILowerControlFlow.cpp - Use predicates for control flow ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This pass lowers the pseudo control flow instructions to real
11 /// machine instructions.
12 ///
13 /// All control flow is handled using predicated instructions and
14 /// a predicate stack. Each Scalar ALU controls the operations of 64 Vector
15 /// ALUs. The Scalar ALU can update the predicate for any of the Vector ALUs
16 /// by writting to the 64-bit EXEC register (each bit corresponds to a
17 /// single vector ALU). Typically, for predicates, a vector ALU will write
18 /// to its bit of the VCC register (like EXEC VCC is 64-bits, one for each
19 /// Vector ALU) and then the ScalarALU will AND the VCC register with the
20 /// EXEC to update the predicates.
21 ///
22 /// For example:
23 /// %vcc = V_CMP_GT_F32 %vgpr1, %vgpr2
24 /// %sgpr0 = SI_IF %vcc
25 /// %vgpr0 = V_ADD_F32 %vgpr0, %vgpr0
26 /// %sgpr0 = SI_ELSE %sgpr0
27 /// %vgpr0 = V_SUB_F32 %vgpr0, %vgpr0
28 /// SI_END_CF %sgpr0
29 ///
30 /// becomes:
31 ///
32 /// %sgpr0 = S_AND_SAVEEXEC_B64 %vcc // Save and update the exec mask
33 /// %sgpr0 = S_XOR_B64 %sgpr0, %exec // Clear live bits from saved exec mask
34 /// S_CBRANCH_EXECZ label0 // This instruction is an optional
35 /// // optimization which allows us to
36 /// // branch if all the bits of
37 /// // EXEC are zero.
38 /// %vgpr0 = V_ADD_F32 %vgpr0, %vgpr0 // Do the IF block of the branch
39 ///
40 /// label0:
41 /// %sgpr0 = S_OR_SAVEEXEC_B64 %sgpr0 // Restore the exec mask for the Then block
42 /// %exec = S_XOR_B64 %sgpr0, %exec // Update the exec mask
43 /// S_BRANCH_EXECZ label1 // Use our branch optimization
44 /// // instruction again.
45 /// %vgpr0 = V_SUB_F32 %vgpr0, %vgpr // Do the THEN block
46 /// label1:
47 /// %exec = S_OR_B64 %exec, %sgpr0 // Re-enable saved exec mask bits
48 //===----------------------------------------------------------------------===//
49
50 #include "AMDGPU.h"
51 #include "GCNSubtarget.h"
52 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
53 #include "llvm/ADT/SmallSet.h"
54 #include "llvm/CodeGen/LiveIntervals.h"
55 #include "llvm/CodeGen/MachineFunctionPass.h"
56
57 using namespace llvm;
58
59 #define DEBUG_TYPE "si-lower-control-flow"
60
61 static cl::opt<bool>
62 RemoveRedundantEndcf("amdgpu-remove-redundant-endcf",
63 cl::init(true), cl::ReallyHidden);
64
65 namespace {
66
67 class SILowerControlFlow : public MachineFunctionPass {
68 private:
69 const SIRegisterInfo *TRI = nullptr;
70 const SIInstrInfo *TII = nullptr;
71 LiveIntervals *LIS = nullptr;
72 MachineRegisterInfo *MRI = nullptr;
73 SetVector<MachineInstr*> LoweredEndCf;
74 DenseSet<Register> LoweredIf;
75 SmallSet<MachineInstr *, 16> NeedsKillCleanup;
76
77 const TargetRegisterClass *BoolRC = nullptr;
78 bool InsertKillCleanups;
79 unsigned AndOpc;
80 unsigned OrOpc;
81 unsigned XorOpc;
82 unsigned MovTermOpc;
83 unsigned Andn2TermOpc;
84 unsigned XorTermrOpc;
85 unsigned OrTermrOpc;
86 unsigned OrSaveExecOpc;
87 unsigned Exec;
88
89 void emitIf(MachineInstr &MI);
90 void emitElse(MachineInstr &MI);
91 void emitIfBreak(MachineInstr &MI);
92 void emitLoop(MachineInstr &MI);
93
94 MachineBasicBlock *emitEndCf(MachineInstr &MI);
95
96 void lowerInitExec(MachineBasicBlock *MBB, MachineInstr &MI);
97
98 void findMaskOperands(MachineInstr &MI, unsigned OpNo,
99 SmallVectorImpl<MachineOperand> &Src) const;
100
101 void combineMasks(MachineInstr &MI);
102
103 bool removeMBBifRedundant(MachineBasicBlock &MBB);
104
105 MachineBasicBlock *process(MachineInstr &MI);
106
107 // Skip to the next instruction, ignoring debug instructions, and trivial
108 // block boundaries (blocks that have one (typically fallthrough) successor,
109 // and the successor has one predecessor.
110 MachineBasicBlock::iterator
111 skipIgnoreExecInstsTrivialSucc(MachineBasicBlock &MBB,
112 MachineBasicBlock::iterator It) const;
113
114 /// Find the insertion point for a new conditional branch.
115 MachineBasicBlock::iterator
skipToUncondBrOrEnd(MachineBasicBlock & MBB,MachineBasicBlock::iterator I) const116 skipToUncondBrOrEnd(MachineBasicBlock &MBB,
117 MachineBasicBlock::iterator I) const {
118 assert(I->isTerminator());
119
120 // FIXME: What if we had multiple pre-existing conditional branches?
121 MachineBasicBlock::iterator End = MBB.end();
122 while (I != End && !I->isUnconditionalBranch())
123 ++I;
124 return I;
125 }
126
127 // Remove redundant SI_END_CF instructions.
128 void optimizeEndCf();
129
130 public:
131 static char ID;
132
SILowerControlFlow()133 SILowerControlFlow() : MachineFunctionPass(ID) {}
134
135 bool runOnMachineFunction(MachineFunction &MF) override;
136
getPassName() const137 StringRef getPassName() const override {
138 return "SI Lower control flow pseudo instructions";
139 }
140
getAnalysisUsage(AnalysisUsage & AU) const141 void getAnalysisUsage(AnalysisUsage &AU) const override {
142 // Should preserve the same set that TwoAddressInstructions does.
143 AU.addPreserved<SlotIndexes>();
144 AU.addPreserved<LiveIntervals>();
145 AU.addPreservedID(LiveVariablesID);
146 MachineFunctionPass::getAnalysisUsage(AU);
147 }
148 };
149
150 } // end anonymous namespace
151
152 char SILowerControlFlow::ID = 0;
153
154 INITIALIZE_PASS(SILowerControlFlow, DEBUG_TYPE,
155 "SI lower control flow", false, false)
156
setImpSCCDefDead(MachineInstr & MI,bool IsDead)157 static void setImpSCCDefDead(MachineInstr &MI, bool IsDead) {
158 MachineOperand &ImpDefSCC = MI.getOperand(3);
159 assert(ImpDefSCC.getReg() == AMDGPU::SCC && ImpDefSCC.isDef());
160
161 ImpDefSCC.setIsDead(IsDead);
162 }
163
164 char &llvm::SILowerControlFlowID = SILowerControlFlow::ID;
165
hasKill(const MachineBasicBlock * Begin,const MachineBasicBlock * End,const SIInstrInfo * TII)166 static bool hasKill(const MachineBasicBlock *Begin,
167 const MachineBasicBlock *End, const SIInstrInfo *TII) {
168 DenseSet<const MachineBasicBlock*> Visited;
169 SmallVector<MachineBasicBlock *, 4> Worklist(Begin->successors());
170
171 while (!Worklist.empty()) {
172 MachineBasicBlock *MBB = Worklist.pop_back_val();
173
174 if (MBB == End || !Visited.insert(MBB).second)
175 continue;
176 for (auto &Term : MBB->terminators())
177 if (TII->isKillTerminator(Term.getOpcode()))
178 return true;
179
180 Worklist.append(MBB->succ_begin(), MBB->succ_end());
181 }
182
183 return false;
184 }
185
isSimpleIf(const MachineInstr & MI,const MachineRegisterInfo * MRI)186 static bool isSimpleIf(const MachineInstr &MI, const MachineRegisterInfo *MRI) {
187 Register SaveExecReg = MI.getOperand(0).getReg();
188 auto U = MRI->use_instr_nodbg_begin(SaveExecReg);
189
190 if (U == MRI->use_instr_nodbg_end() ||
191 std::next(U) != MRI->use_instr_nodbg_end() ||
192 U->getOpcode() != AMDGPU::SI_END_CF)
193 return false;
194
195 return true;
196 }
197
emitIf(MachineInstr & MI)198 void SILowerControlFlow::emitIf(MachineInstr &MI) {
199 MachineBasicBlock &MBB = *MI.getParent();
200 const DebugLoc &DL = MI.getDebugLoc();
201 MachineBasicBlock::iterator I(&MI);
202 Register SaveExecReg = MI.getOperand(0).getReg();
203 MachineOperand& Cond = MI.getOperand(1);
204 assert(Cond.getSubReg() == AMDGPU::NoSubRegister);
205
206 MachineOperand &ImpDefSCC = MI.getOperand(4);
207 assert(ImpDefSCC.getReg() == AMDGPU::SCC && ImpDefSCC.isDef());
208
209 // If there is only one use of save exec register and that use is SI_END_CF,
210 // we can optimize SI_IF by returning the full saved exec mask instead of
211 // just cleared bits.
212 bool SimpleIf = isSimpleIf(MI, MRI);
213
214 if (InsertKillCleanups) {
215 // Check for SI_KILL_*_TERMINATOR on full path of control flow and
216 // flag the associated SI_END_CF for insertion of a kill cleanup.
217 auto UseMI = MRI->use_instr_nodbg_begin(SaveExecReg);
218 while (UseMI->getOpcode() != AMDGPU::SI_END_CF) {
219 assert(std::next(UseMI) == MRI->use_instr_nodbg_end());
220 assert(UseMI->getOpcode() == AMDGPU::SI_ELSE);
221 MachineOperand &NextExec = UseMI->getOperand(0);
222 Register NextExecReg = NextExec.getReg();
223 if (NextExec.isDead()) {
224 assert(!SimpleIf);
225 break;
226 }
227 UseMI = MRI->use_instr_nodbg_begin(NextExecReg);
228 }
229 if (UseMI->getOpcode() == AMDGPU::SI_END_CF) {
230 if (hasKill(MI.getParent(), UseMI->getParent(), TII)) {
231 NeedsKillCleanup.insert(&*UseMI);
232 SimpleIf = false;
233 }
234 }
235 } else if (SimpleIf) {
236 // Check for SI_KILL_*_TERMINATOR on path from if to endif.
237 // if there is any such terminator simplifications are not safe.
238 auto UseMI = MRI->use_instr_nodbg_begin(SaveExecReg);
239 SimpleIf = !hasKill(MI.getParent(), UseMI->getParent(), TII);
240 }
241
242 // Add an implicit def of exec to discourage scheduling VALU after this which
243 // will interfere with trying to form s_and_saveexec_b64 later.
244 Register CopyReg = SimpleIf ? SaveExecReg
245 : MRI->createVirtualRegister(BoolRC);
246 MachineInstr *CopyExec =
247 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), CopyReg)
248 .addReg(Exec)
249 .addReg(Exec, RegState::ImplicitDefine);
250 LoweredIf.insert(CopyReg);
251
252 Register Tmp = MRI->createVirtualRegister(BoolRC);
253
254 MachineInstr *And =
255 BuildMI(MBB, I, DL, TII->get(AndOpc), Tmp)
256 .addReg(CopyReg)
257 .add(Cond);
258
259 setImpSCCDefDead(*And, true);
260
261 MachineInstr *Xor = nullptr;
262 if (!SimpleIf) {
263 Xor =
264 BuildMI(MBB, I, DL, TII->get(XorOpc), SaveExecReg)
265 .addReg(Tmp)
266 .addReg(CopyReg);
267 setImpSCCDefDead(*Xor, ImpDefSCC.isDead());
268 }
269
270 // Use a copy that is a terminator to get correct spill code placement it with
271 // fast regalloc.
272 MachineInstr *SetExec =
273 BuildMI(MBB, I, DL, TII->get(MovTermOpc), Exec)
274 .addReg(Tmp, RegState::Kill);
275
276 // Skip ahead to the unconditional branch in case there are other terminators
277 // present.
278 I = skipToUncondBrOrEnd(MBB, I);
279
280 // Insert the S_CBRANCH_EXECZ instruction which will be optimized later
281 // during SIRemoveShortExecBranches.
282 MachineInstr *NewBr = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ))
283 .add(MI.getOperand(2));
284
285 if (!LIS) {
286 MI.eraseFromParent();
287 return;
288 }
289
290 LIS->InsertMachineInstrInMaps(*CopyExec);
291
292 // Replace with and so we don't need to fix the live interval for condition
293 // register.
294 LIS->ReplaceMachineInstrInMaps(MI, *And);
295
296 if (!SimpleIf)
297 LIS->InsertMachineInstrInMaps(*Xor);
298 LIS->InsertMachineInstrInMaps(*SetExec);
299 LIS->InsertMachineInstrInMaps(*NewBr);
300
301 LIS->removeAllRegUnitsForPhysReg(AMDGPU::EXEC);
302 MI.eraseFromParent();
303
304 // FIXME: Is there a better way of adjusting the liveness? It shouldn't be
305 // hard to add another def here but I'm not sure how to correctly update the
306 // valno.
307 LIS->removeInterval(SaveExecReg);
308 LIS->createAndComputeVirtRegInterval(SaveExecReg);
309 LIS->createAndComputeVirtRegInterval(Tmp);
310 if (!SimpleIf)
311 LIS->createAndComputeVirtRegInterval(CopyReg);
312 }
313
emitElse(MachineInstr & MI)314 void SILowerControlFlow::emitElse(MachineInstr &MI) {
315 MachineBasicBlock &MBB = *MI.getParent();
316 const DebugLoc &DL = MI.getDebugLoc();
317
318 Register DstReg = MI.getOperand(0).getReg();
319
320 MachineBasicBlock::iterator Start = MBB.begin();
321
322 // This must be inserted before phis and any spill code inserted before the
323 // else.
324 Register SaveReg = MRI->createVirtualRegister(BoolRC);
325 MachineInstr *OrSaveExec =
326 BuildMI(MBB, Start, DL, TII->get(OrSaveExecOpc), SaveReg)
327 .add(MI.getOperand(1)); // Saved EXEC
328
329 MachineBasicBlock *DestBB = MI.getOperand(2).getMBB();
330
331 MachineBasicBlock::iterator ElsePt(MI);
332
333 // This accounts for any modification of the EXEC mask within the block and
334 // can be optimized out pre-RA when not required.
335 MachineInstr *And = BuildMI(MBB, ElsePt, DL, TII->get(AndOpc), DstReg)
336 .addReg(Exec)
337 .addReg(SaveReg);
338
339 if (LIS)
340 LIS->InsertMachineInstrInMaps(*And);
341
342 MachineInstr *Xor =
343 BuildMI(MBB, ElsePt, DL, TII->get(XorTermrOpc), Exec)
344 .addReg(Exec)
345 .addReg(DstReg);
346
347 // Skip ahead to the unconditional branch in case there are other terminators
348 // present.
349 ElsePt = skipToUncondBrOrEnd(MBB, ElsePt);
350
351 MachineInstr *Branch =
352 BuildMI(MBB, ElsePt, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ))
353 .addMBB(DestBB);
354
355 if (!LIS) {
356 MI.eraseFromParent();
357 return;
358 }
359
360 LIS->RemoveMachineInstrFromMaps(MI);
361 MI.eraseFromParent();
362
363 LIS->InsertMachineInstrInMaps(*OrSaveExec);
364
365 LIS->InsertMachineInstrInMaps(*Xor);
366 LIS->InsertMachineInstrInMaps(*Branch);
367
368 LIS->removeInterval(DstReg);
369 LIS->createAndComputeVirtRegInterval(DstReg);
370 LIS->createAndComputeVirtRegInterval(SaveReg);
371
372 // Let this be recomputed.
373 LIS->removeAllRegUnitsForPhysReg(AMDGPU::EXEC);
374 }
375
emitIfBreak(MachineInstr & MI)376 void SILowerControlFlow::emitIfBreak(MachineInstr &MI) {
377 MachineBasicBlock &MBB = *MI.getParent();
378 const DebugLoc &DL = MI.getDebugLoc();
379 auto Dst = MI.getOperand(0).getReg();
380
381 // Skip ANDing with exec if the break condition is already masked by exec
382 // because it is a V_CMP in the same basic block. (We know the break
383 // condition operand was an i1 in IR, so if it is a VALU instruction it must
384 // be one with a carry-out.)
385 bool SkipAnding = false;
386 if (MI.getOperand(1).isReg()) {
387 if (MachineInstr *Def = MRI->getUniqueVRegDef(MI.getOperand(1).getReg())) {
388 SkipAnding = Def->getParent() == MI.getParent()
389 && SIInstrInfo::isVALU(*Def);
390 }
391 }
392
393 // AND the break condition operand with exec, then OR that into the "loop
394 // exit" mask.
395 MachineInstr *And = nullptr, *Or = nullptr;
396 if (!SkipAnding) {
397 Register AndReg = MRI->createVirtualRegister(BoolRC);
398 And = BuildMI(MBB, &MI, DL, TII->get(AndOpc), AndReg)
399 .addReg(Exec)
400 .add(MI.getOperand(1));
401 Or = BuildMI(MBB, &MI, DL, TII->get(OrOpc), Dst)
402 .addReg(AndReg)
403 .add(MI.getOperand(2));
404 if (LIS)
405 LIS->createAndComputeVirtRegInterval(AndReg);
406 } else
407 Or = BuildMI(MBB, &MI, DL, TII->get(OrOpc), Dst)
408 .add(MI.getOperand(1))
409 .add(MI.getOperand(2));
410
411 if (LIS) {
412 if (And)
413 LIS->InsertMachineInstrInMaps(*And);
414 LIS->ReplaceMachineInstrInMaps(MI, *Or);
415 }
416
417 MI.eraseFromParent();
418 }
419
emitLoop(MachineInstr & MI)420 void SILowerControlFlow::emitLoop(MachineInstr &MI) {
421 MachineBasicBlock &MBB = *MI.getParent();
422 const DebugLoc &DL = MI.getDebugLoc();
423
424 MachineInstr *AndN2 =
425 BuildMI(MBB, &MI, DL, TII->get(Andn2TermOpc), Exec)
426 .addReg(Exec)
427 .add(MI.getOperand(0));
428
429 auto BranchPt = skipToUncondBrOrEnd(MBB, MI.getIterator());
430 MachineInstr *Branch =
431 BuildMI(MBB, BranchPt, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
432 .add(MI.getOperand(1));
433
434 if (LIS) {
435 LIS->ReplaceMachineInstrInMaps(MI, *AndN2);
436 LIS->InsertMachineInstrInMaps(*Branch);
437 }
438
439 MI.eraseFromParent();
440 }
441
442 MachineBasicBlock::iterator
skipIgnoreExecInstsTrivialSucc(MachineBasicBlock & MBB,MachineBasicBlock::iterator It) const443 SILowerControlFlow::skipIgnoreExecInstsTrivialSucc(
444 MachineBasicBlock &MBB, MachineBasicBlock::iterator It) const {
445
446 SmallSet<const MachineBasicBlock *, 4> Visited;
447 MachineBasicBlock *B = &MBB;
448 do {
449 if (!Visited.insert(B).second)
450 return MBB.end();
451
452 auto E = B->end();
453 for ( ; It != E; ++It) {
454 if (It->getOpcode() == AMDGPU::SI_KILL_CLEANUP)
455 continue;
456 if (TII->mayReadEXEC(*MRI, *It))
457 break;
458 }
459
460 if (It != E)
461 return It;
462
463 if (B->succ_size() != 1)
464 return MBB.end();
465
466 // If there is one trivial successor, advance to the next block.
467 MachineBasicBlock *Succ = *B->succ_begin();
468
469 It = Succ->begin();
470 B = Succ;
471 } while (true);
472 }
473
emitEndCf(MachineInstr & MI)474 MachineBasicBlock *SILowerControlFlow::emitEndCf(MachineInstr &MI) {
475 MachineBasicBlock &MBB = *MI.getParent();
476 const DebugLoc &DL = MI.getDebugLoc();
477
478 MachineBasicBlock::iterator InsPt = MBB.begin();
479
480 // If we have instructions that aren't prolog instructions, split the block
481 // and emit a terminator instruction. This ensures correct spill placement.
482 // FIXME: We should unconditionally split the block here.
483 bool NeedBlockSplit = false;
484 Register DataReg = MI.getOperand(0).getReg();
485 for (MachineBasicBlock::iterator I = InsPt, E = MI.getIterator();
486 I != E; ++I) {
487 if (I->modifiesRegister(DataReg, TRI)) {
488 NeedBlockSplit = true;
489 break;
490 }
491 }
492
493 unsigned Opcode = OrOpc;
494 MachineBasicBlock *SplitBB = &MBB;
495 if (NeedBlockSplit) {
496 SplitBB = MBB.splitAt(MI, /*UpdateLiveIns*/true, LIS);
497 Opcode = OrTermrOpc;
498 InsPt = MI;
499 }
500
501 MachineInstr *NewMI =
502 BuildMI(MBB, InsPt, DL, TII->get(Opcode), Exec)
503 .addReg(Exec)
504 .add(MI.getOperand(0));
505
506 LoweredEndCf.insert(NewMI);
507
508 // If this ends control flow which contains kills (as flagged in emitIf)
509 // then insert an SI_KILL_CLEANUP immediately following the exec mask
510 // manipulation. This can be lowered to early termination if appropriate.
511 MachineInstr *CleanUpMI = nullptr;
512 if (NeedsKillCleanup.count(&MI))
513 CleanUpMI = BuildMI(MBB, InsPt, DL, TII->get(AMDGPU::SI_KILL_CLEANUP));
514
515 if (LIS) {
516 LIS->ReplaceMachineInstrInMaps(MI, *NewMI);
517 if (CleanUpMI)
518 LIS->InsertMachineInstrInMaps(*CleanUpMI);
519 }
520
521 MI.eraseFromParent();
522
523 if (LIS)
524 LIS->handleMove(*NewMI);
525 return SplitBB;
526 }
527
528 // Returns replace operands for a logical operation, either single result
529 // for exec or two operands if source was another equivalent operation.
findMaskOperands(MachineInstr & MI,unsigned OpNo,SmallVectorImpl<MachineOperand> & Src) const530 void SILowerControlFlow::findMaskOperands(MachineInstr &MI, unsigned OpNo,
531 SmallVectorImpl<MachineOperand> &Src) const {
532 MachineOperand &Op = MI.getOperand(OpNo);
533 if (!Op.isReg() || !Op.getReg().isVirtual()) {
534 Src.push_back(Op);
535 return;
536 }
537
538 MachineInstr *Def = MRI->getUniqueVRegDef(Op.getReg());
539 if (!Def || Def->getParent() != MI.getParent() ||
540 !(Def->isFullCopy() || (Def->getOpcode() == MI.getOpcode())))
541 return;
542
543 // Make sure we do not modify exec between def and use.
544 // A copy with implcitly defined exec inserted earlier is an exclusion, it
545 // does not really modify exec.
546 for (auto I = Def->getIterator(); I != MI.getIterator(); ++I)
547 if (I->modifiesRegister(AMDGPU::EXEC, TRI) &&
548 !(I->isCopy() && I->getOperand(0).getReg() != Exec))
549 return;
550
551 for (const auto &SrcOp : Def->explicit_operands())
552 if (SrcOp.isReg() && SrcOp.isUse() &&
553 (SrcOp.getReg().isVirtual() || SrcOp.getReg() == Exec))
554 Src.push_back(SrcOp);
555 }
556
557 // Search and combine pairs of equivalent instructions, like
558 // S_AND_B64 x, (S_AND_B64 x, y) => S_AND_B64 x, y
559 // S_OR_B64 x, (S_OR_B64 x, y) => S_OR_B64 x, y
560 // One of the operands is exec mask.
combineMasks(MachineInstr & MI)561 void SILowerControlFlow::combineMasks(MachineInstr &MI) {
562 assert(MI.getNumExplicitOperands() == 3);
563 SmallVector<MachineOperand, 4> Ops;
564 unsigned OpToReplace = 1;
565 findMaskOperands(MI, 1, Ops);
566 if (Ops.size() == 1) OpToReplace = 2; // First operand can be exec or its copy
567 findMaskOperands(MI, 2, Ops);
568 if (Ops.size() != 3) return;
569
570 unsigned UniqueOpndIdx;
571 if (Ops[0].isIdenticalTo(Ops[1])) UniqueOpndIdx = 2;
572 else if (Ops[0].isIdenticalTo(Ops[2])) UniqueOpndIdx = 1;
573 else if (Ops[1].isIdenticalTo(Ops[2])) UniqueOpndIdx = 1;
574 else return;
575
576 Register Reg = MI.getOperand(OpToReplace).getReg();
577 MI.RemoveOperand(OpToReplace);
578 MI.addOperand(Ops[UniqueOpndIdx]);
579 if (MRI->use_empty(Reg))
580 MRI->getUniqueVRegDef(Reg)->eraseFromParent();
581 }
582
optimizeEndCf()583 void SILowerControlFlow::optimizeEndCf() {
584 // If the only instruction immediately following this END_CF is an another
585 // END_CF in the only successor we can avoid emitting exec mask restore here.
586 if (!RemoveRedundantEndcf)
587 return;
588
589 for (MachineInstr *MI : LoweredEndCf) {
590 MachineBasicBlock &MBB = *MI->getParent();
591 auto Next =
592 skipIgnoreExecInstsTrivialSucc(MBB, std::next(MI->getIterator()));
593 if (Next == MBB.end() || !LoweredEndCf.count(&*Next))
594 continue;
595 // Only skip inner END_CF if outer ENDCF belongs to SI_IF.
596 // If that belongs to SI_ELSE then saved mask has an inverted value.
597 Register SavedExec
598 = TII->getNamedOperand(*Next, AMDGPU::OpName::src1)->getReg();
599 assert(SavedExec.isVirtual() && "Expected saved exec to be src1!");
600
601 const MachineInstr *Def = MRI->getUniqueVRegDef(SavedExec);
602 if (Def && LoweredIf.count(SavedExec)) {
603 LLVM_DEBUG(dbgs() << "Skip redundant "; MI->dump());
604 if (LIS)
605 LIS->RemoveMachineInstrFromMaps(*MI);
606 MI->eraseFromParent();
607 removeMBBifRedundant(MBB);
608 }
609 }
610 }
611
process(MachineInstr & MI)612 MachineBasicBlock *SILowerControlFlow::process(MachineInstr &MI) {
613 MachineBasicBlock &MBB = *MI.getParent();
614 MachineBasicBlock::iterator I(MI);
615 MachineInstr *Prev = (I != MBB.begin()) ? &*(std::prev(I)) : nullptr;
616
617 MachineBasicBlock *SplitBB = &MBB;
618
619 switch (MI.getOpcode()) {
620 case AMDGPU::SI_IF:
621 emitIf(MI);
622 break;
623
624 case AMDGPU::SI_ELSE:
625 emitElse(MI);
626 break;
627
628 case AMDGPU::SI_IF_BREAK:
629 emitIfBreak(MI);
630 break;
631
632 case AMDGPU::SI_LOOP:
633 emitLoop(MI);
634 break;
635
636 case AMDGPU::SI_END_CF:
637 SplitBB = emitEndCf(MI);
638 break;
639
640 default:
641 assert(false && "Attempt to process unsupported instruction");
642 break;
643 }
644
645 MachineBasicBlock::iterator Next;
646 for (I = Prev ? Prev->getIterator() : MBB.begin(); I != MBB.end(); I = Next) {
647 Next = std::next(I);
648 MachineInstr &MaskMI = *I;
649 switch (MaskMI.getOpcode()) {
650 case AMDGPU::S_AND_B64:
651 case AMDGPU::S_OR_B64:
652 case AMDGPU::S_AND_B32:
653 case AMDGPU::S_OR_B32:
654 // Cleanup bit manipulations on exec mask
655 combineMasks(MaskMI);
656 break;
657 default:
658 I = MBB.end();
659 break;
660 }
661 }
662
663 return SplitBB;
664 }
665
lowerInitExec(MachineBasicBlock * MBB,MachineInstr & MI)666 void SILowerControlFlow::lowerInitExec(MachineBasicBlock *MBB,
667 MachineInstr &MI) {
668 MachineFunction &MF = *MBB->getParent();
669 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
670 bool IsWave32 = ST.isWave32();
671
672 if (MI.getOpcode() == AMDGPU::SI_INIT_EXEC) {
673 // This should be before all vector instructions.
674 BuildMI(*MBB, MBB->begin(), MI.getDebugLoc(),
675 TII->get(IsWave32 ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64), Exec)
676 .addImm(MI.getOperand(0).getImm());
677 if (LIS)
678 LIS->RemoveMachineInstrFromMaps(MI);
679 MI.eraseFromParent();
680 return;
681 }
682
683 // Extract the thread count from an SGPR input and set EXEC accordingly.
684 // Since BFM can't shift by 64, handle that case with CMP + CMOV.
685 //
686 // S_BFE_U32 count, input, {shift, 7}
687 // S_BFM_B64 exec, count, 0
688 // S_CMP_EQ_U32 count, 64
689 // S_CMOV_B64 exec, -1
690 Register InputReg = MI.getOperand(0).getReg();
691 MachineInstr *FirstMI = &*MBB->begin();
692 if (InputReg.isVirtual()) {
693 MachineInstr *DefInstr = MRI->getVRegDef(InputReg);
694 assert(DefInstr && DefInstr->isCopy());
695 if (DefInstr->getParent() == MBB) {
696 if (DefInstr != FirstMI) {
697 // If the `InputReg` is defined in current block, we also need to
698 // move that instruction to the beginning of the block.
699 DefInstr->removeFromParent();
700 MBB->insert(FirstMI, DefInstr);
701 if (LIS)
702 LIS->handleMove(*DefInstr);
703 } else {
704 // If first instruction is definition then move pointer after it.
705 FirstMI = &*std::next(FirstMI->getIterator());
706 }
707 }
708 }
709
710 // Insert instruction sequence at block beginning (before vector operations).
711 const DebugLoc DL = MI.getDebugLoc();
712 const unsigned WavefrontSize = ST.getWavefrontSize();
713 const unsigned Mask = (WavefrontSize << 1) - 1;
714 Register CountReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
715 auto BfeMI = BuildMI(*MBB, FirstMI, DL, TII->get(AMDGPU::S_BFE_U32), CountReg)
716 .addReg(InputReg)
717 .addImm((MI.getOperand(1).getImm() & Mask) | 0x70000);
718 auto BfmMI =
719 BuildMI(*MBB, FirstMI, DL,
720 TII->get(IsWave32 ? AMDGPU::S_BFM_B32 : AMDGPU::S_BFM_B64), Exec)
721 .addReg(CountReg)
722 .addImm(0);
723 auto CmpMI = BuildMI(*MBB, FirstMI, DL, TII->get(AMDGPU::S_CMP_EQ_U32))
724 .addReg(CountReg, RegState::Kill)
725 .addImm(WavefrontSize);
726 auto CmovMI =
727 BuildMI(*MBB, FirstMI, DL,
728 TII->get(IsWave32 ? AMDGPU::S_CMOV_B32 : AMDGPU::S_CMOV_B64),
729 Exec)
730 .addImm(-1);
731
732 if (!LIS) {
733 MI.eraseFromParent();
734 return;
735 }
736
737 LIS->RemoveMachineInstrFromMaps(MI);
738 MI.eraseFromParent();
739
740 LIS->InsertMachineInstrInMaps(*BfeMI);
741 LIS->InsertMachineInstrInMaps(*BfmMI);
742 LIS->InsertMachineInstrInMaps(*CmpMI);
743 LIS->InsertMachineInstrInMaps(*CmovMI);
744
745 LIS->removeInterval(InputReg);
746 LIS->createAndComputeVirtRegInterval(InputReg);
747 LIS->createAndComputeVirtRegInterval(CountReg);
748 }
749
removeMBBifRedundant(MachineBasicBlock & MBB)750 bool SILowerControlFlow::removeMBBifRedundant(MachineBasicBlock &MBB) {
751 auto GetFallThroughSucc = [=](MachineBasicBlock *B) -> MachineBasicBlock * {
752 auto *S = B->getNextNode();
753 if (!S)
754 return nullptr;
755 if (B->isSuccessor(S)) {
756 // The only fallthrough candidate
757 MachineBasicBlock::iterator I(B->getFirstInstrTerminator());
758 MachineBasicBlock::iterator E = B->end();
759 for (; I != E; I++) {
760 if (I->isBranch() && TII->getBranchDestBlock(*I) == S)
761 // We have unoptimized branch to layout successor
762 return nullptr;
763 }
764 }
765 return S;
766 };
767
768 for (auto &I : MBB.instrs()) {
769 if (!I.isDebugInstr() && !I.isUnconditionalBranch())
770 return false;
771 }
772
773 assert(MBB.succ_size() == 1 && "MBB has more than one successor");
774
775 MachineBasicBlock *Succ = *MBB.succ_begin();
776 MachineBasicBlock *FallThrough = nullptr;
777
778 while (!MBB.predecessors().empty()) {
779 MachineBasicBlock *P = *MBB.pred_begin();
780 if (GetFallThroughSucc(P) == &MBB)
781 FallThrough = P;
782 P->ReplaceUsesOfBlockWith(&MBB, Succ);
783 }
784 MBB.removeSuccessor(Succ);
785 if (LIS) {
786 for (auto &I : MBB.instrs())
787 LIS->RemoveMachineInstrFromMaps(I);
788 }
789 MBB.clear();
790 MBB.eraseFromParent();
791 if (FallThrough && !FallThrough->isLayoutSuccessor(Succ)) {
792 if (!GetFallThroughSucc(Succ)) {
793 MachineFunction *MF = FallThrough->getParent();
794 MachineFunction::iterator FallThroughPos(FallThrough);
795 MF->splice(std::next(FallThroughPos), Succ);
796 } else
797 BuildMI(*FallThrough, FallThrough->end(),
798 FallThrough->findBranchDebugLoc(), TII->get(AMDGPU::S_BRANCH))
799 .addMBB(Succ);
800 }
801
802 return true;
803 }
804
runOnMachineFunction(MachineFunction & MF)805 bool SILowerControlFlow::runOnMachineFunction(MachineFunction &MF) {
806 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
807 TII = ST.getInstrInfo();
808 TRI = &TII->getRegisterInfo();
809
810 // This doesn't actually need LiveIntervals, but we can preserve them.
811 LIS = getAnalysisIfAvailable<LiveIntervals>();
812 MRI = &MF.getRegInfo();
813 BoolRC = TRI->getBoolRC();
814 InsertKillCleanups =
815 MF.getFunction().getCallingConv() == CallingConv::AMDGPU_PS;
816
817 if (ST.isWave32()) {
818 AndOpc = AMDGPU::S_AND_B32;
819 OrOpc = AMDGPU::S_OR_B32;
820 XorOpc = AMDGPU::S_XOR_B32;
821 MovTermOpc = AMDGPU::S_MOV_B32_term;
822 Andn2TermOpc = AMDGPU::S_ANDN2_B32_term;
823 XorTermrOpc = AMDGPU::S_XOR_B32_term;
824 OrTermrOpc = AMDGPU::S_OR_B32_term;
825 OrSaveExecOpc = AMDGPU::S_OR_SAVEEXEC_B32;
826 Exec = AMDGPU::EXEC_LO;
827 } else {
828 AndOpc = AMDGPU::S_AND_B64;
829 OrOpc = AMDGPU::S_OR_B64;
830 XorOpc = AMDGPU::S_XOR_B64;
831 MovTermOpc = AMDGPU::S_MOV_B64_term;
832 Andn2TermOpc = AMDGPU::S_ANDN2_B64_term;
833 XorTermrOpc = AMDGPU::S_XOR_B64_term;
834 OrTermrOpc = AMDGPU::S_OR_B64_term;
835 OrSaveExecOpc = AMDGPU::S_OR_SAVEEXEC_B64;
836 Exec = AMDGPU::EXEC;
837 }
838
839 SmallVector<MachineInstr *, 32> Worklist;
840
841 MachineFunction::iterator NextBB;
842 for (MachineFunction::iterator BI = MF.begin();
843 BI != MF.end(); BI = NextBB) {
844 NextBB = std::next(BI);
845 MachineBasicBlock *MBB = &*BI;
846
847 MachineBasicBlock::iterator I, E, Next;
848 E = MBB->end();
849 for (I = MBB->begin(); I != E; I = Next) {
850 Next = std::next(I);
851 MachineInstr &MI = *I;
852 MachineBasicBlock *SplitMBB = MBB;
853
854 switch (MI.getOpcode()) {
855 case AMDGPU::SI_IF:
856 SplitMBB = process(MI);
857 break;
858
859 case AMDGPU::SI_ELSE:
860 case AMDGPU::SI_IF_BREAK:
861 case AMDGPU::SI_LOOP:
862 case AMDGPU::SI_END_CF:
863 // Only build worklist if SI_IF instructions must be processed first.
864 if (InsertKillCleanups)
865 Worklist.push_back(&MI);
866 else
867 SplitMBB = process(MI);
868 break;
869
870 // FIXME: find a better place for this
871 case AMDGPU::SI_INIT_EXEC:
872 case AMDGPU::SI_INIT_EXEC_FROM_INPUT:
873 lowerInitExec(MBB, MI);
874 if (LIS)
875 LIS->removeAllRegUnitsForPhysReg(AMDGPU::EXEC);
876 break;
877
878 default:
879 break;
880 }
881
882 if (SplitMBB != MBB) {
883 MBB = Next->getParent();
884 E = MBB->end();
885 }
886 }
887 }
888
889 for (MachineInstr *MI : Worklist)
890 process(*MI);
891
892 optimizeEndCf();
893
894 LoweredEndCf.clear();
895 LoweredIf.clear();
896 NeedsKillCleanup.clear();
897
898 return true;
899 }
900