1 //===-- SIOptimizeExecMaskingPreRA.cpp ------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This pass removes redundant S_OR_B64 instructions enabling lanes in
11 /// the exec. If two SI_END_CF (lowered as S_OR_B64) come together without any
12 /// vector instructions between them we can only keep outer SI_END_CF, given
13 /// that CFG is structured and exec bits of the outer end statement are always
14 /// not less than exec bit of the inner one.
15 ///
16 /// This needs to be done before the RA to eliminate saved exec bits registers
17 /// but after register coalescer to have no vector registers copies in between
18 /// of different end cf statements.
19 ///
20 //===----------------------------------------------------------------------===//
21 
22 #include "AMDGPU.h"
23 #include "AMDGPUSubtarget.h"
24 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
25 #include "SIInstrInfo.h"
26 #include "llvm/CodeGen/LiveIntervals.h"
27 #include "llvm/CodeGen/MachineFunctionPass.h"
28 #include "llvm/InitializePasses.h"
29 
30 using namespace llvm;
31 
32 #define DEBUG_TYPE "si-optimize-exec-masking-pre-ra"
33 
34 namespace {
35 
36 class SIOptimizeExecMaskingPreRA : public MachineFunctionPass {
37 private:
38   const SIRegisterInfo *TRI;
39   const SIInstrInfo *TII;
40   MachineRegisterInfo *MRI;
41 
42 public:
43   MachineBasicBlock::iterator skipIgnoreExecInsts(
44     MachineBasicBlock::iterator I, MachineBasicBlock::iterator E) const;
45 
46     MachineBasicBlock::iterator skipIgnoreExecInstsTrivialSucc(
47       MachineBasicBlock *&MBB,
48       MachineBasicBlock::iterator It) const;
49 
50 public:
51   static char ID;
52 
53   SIOptimizeExecMaskingPreRA() : MachineFunctionPass(ID) {
54     initializeSIOptimizeExecMaskingPreRAPass(*PassRegistry::getPassRegistry());
55   }
56 
57   bool runOnMachineFunction(MachineFunction &MF) override;
58 
59   StringRef getPassName() const override {
60     return "SI optimize exec mask operations pre-RA";
61   }
62 
63   void getAnalysisUsage(AnalysisUsage &AU) const override {
64     AU.addRequired<LiveIntervals>();
65     AU.setPreservesAll();
66     MachineFunctionPass::getAnalysisUsage(AU);
67   }
68 };
69 
70 } // End anonymous namespace.
71 
72 INITIALIZE_PASS_BEGIN(SIOptimizeExecMaskingPreRA, DEBUG_TYPE,
73                       "SI optimize exec mask operations pre-RA", false, false)
74 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
75 INITIALIZE_PASS_END(SIOptimizeExecMaskingPreRA, DEBUG_TYPE,
76                     "SI optimize exec mask operations pre-RA", false, false)
77 
78 char SIOptimizeExecMaskingPreRA::ID = 0;
79 
80 char &llvm::SIOptimizeExecMaskingPreRAID = SIOptimizeExecMaskingPreRA::ID;
81 
82 FunctionPass *llvm::createSIOptimizeExecMaskingPreRAPass() {
83   return new SIOptimizeExecMaskingPreRA();
84 }
85 
86 static bool isEndCF(const MachineInstr &MI, const SIRegisterInfo *TRI,
87                     const GCNSubtarget &ST) {
88   if (ST.isWave32()) {
89     return MI.getOpcode() == AMDGPU::S_OR_B32 &&
90            MI.modifiesRegister(AMDGPU::EXEC_LO, TRI);
91   }
92 
93   return MI.getOpcode() == AMDGPU::S_OR_B64 &&
94          MI.modifiesRegister(AMDGPU::EXEC, TRI);
95 }
96 
97 static bool isFullExecCopy(const MachineInstr& MI, const GCNSubtarget& ST) {
98   unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
99 
100   if (MI.isCopy() && MI.getOperand(1).getReg() == Exec) {
101     assert(MI.isFullCopy());
102     return true;
103   }
104 
105   return false;
106 }
107 
108 static unsigned getOrNonExecReg(const MachineInstr &MI,
109                                 const SIInstrInfo &TII,
110                                 const GCNSubtarget& ST) {
111   unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
112   auto Op = TII.getNamedOperand(MI, AMDGPU::OpName::src1);
113   if (Op->isReg() && Op->getReg() != Exec)
114      return Op->getReg();
115   Op = TII.getNamedOperand(MI, AMDGPU::OpName::src0);
116   if (Op->isReg() && Op->getReg() != Exec)
117      return Op->getReg();
118   return AMDGPU::NoRegister;
119 }
120 
121 static MachineInstr* getOrExecSource(const MachineInstr &MI,
122                                      const SIInstrInfo &TII,
123                                      const MachineRegisterInfo &MRI,
124                                      const GCNSubtarget& ST) {
125   auto SavedExec = getOrNonExecReg(MI, TII, ST);
126   if (SavedExec == AMDGPU::NoRegister)
127     return nullptr;
128   auto SaveExecInst = MRI.getUniqueVRegDef(SavedExec);
129   if (!SaveExecInst || !isFullExecCopy(*SaveExecInst, ST))
130     return nullptr;
131   return SaveExecInst;
132 }
133 
134 /// Skip over instructions that don't care about the exec mask.
135 MachineBasicBlock::iterator SIOptimizeExecMaskingPreRA::skipIgnoreExecInsts(
136   MachineBasicBlock::iterator I, MachineBasicBlock::iterator E) const {
137   for ( ; I != E; ++I) {
138     if (TII->mayReadEXEC(*MRI, *I))
139       break;
140   }
141 
142   return I;
143 }
144 
145 // Skip to the next instruction, ignoring debug instructions, and trivial block
146 // boundaries (blocks that have one (typically fallthrough) successor, and the
147 // successor has one predecessor.
148 MachineBasicBlock::iterator
149 SIOptimizeExecMaskingPreRA::skipIgnoreExecInstsTrivialSucc(
150   MachineBasicBlock *&MBB,
151   MachineBasicBlock::iterator It) const {
152 
153   do {
154     It = skipIgnoreExecInsts(It, MBB->end());
155     if (It != MBB->end() || MBB->succ_size() != 1)
156       break;
157 
158     // If there is one trivial successor, advance to the next block.
159     MachineBasicBlock *Succ = *MBB->succ_begin();
160 
161     // TODO: Is this really necessary?
162     if (!MBB->isLayoutSuccessor(Succ))
163       break;
164 
165     It = Succ->begin();
166     MBB = Succ;
167   } while (true);
168 
169   return It;
170 }
171 
172 
173 // Optimize sequence
174 //    %sel = V_CNDMASK_B32_e64 0, 1, %cc
175 //    %cmp = V_CMP_NE_U32 1, %1
176 //    $vcc = S_AND_B64 $exec, %cmp
177 //    S_CBRANCH_VCC[N]Z
178 // =>
179 //    $vcc = S_ANDN2_B64 $exec, %cc
180 //    S_CBRANCH_VCC[N]Z
181 //
182 // It is the negation pattern inserted by DAGCombiner::visitBRCOND() in the
183 // rebuildSetCC(). We start with S_CBRANCH to avoid exhaustive search, but
184 // only 3 first instructions are really needed. S_AND_B64 with exec is a
185 // required part of the pattern since V_CNDMASK_B32 writes zeroes for inactive
186 // lanes.
187 //
188 // Returns %cc register on success.
189 static unsigned optimizeVcndVcmpPair(MachineBasicBlock &MBB,
190                                      const GCNSubtarget &ST,
191                                      MachineRegisterInfo &MRI,
192                                      LiveIntervals *LIS) {
193   const SIRegisterInfo *TRI = ST.getRegisterInfo();
194   const SIInstrInfo *TII = ST.getInstrInfo();
195   bool Wave32 = ST.isWave32();
196   const unsigned AndOpc = Wave32 ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64;
197   const unsigned Andn2Opc = Wave32 ? AMDGPU::S_ANDN2_B32 : AMDGPU::S_ANDN2_B64;
198   const unsigned CondReg = Wave32 ? AMDGPU::VCC_LO : AMDGPU::VCC;
199   const unsigned ExecReg = Wave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
200 
201   auto I = llvm::find_if(MBB.terminators(), [](const MachineInstr &MI) {
202                            unsigned Opc = MI.getOpcode();
203                            return Opc == AMDGPU::S_CBRANCH_VCCZ ||
204                                   Opc == AMDGPU::S_CBRANCH_VCCNZ; });
205   if (I == MBB.terminators().end())
206     return AMDGPU::NoRegister;
207 
208   auto *And = TRI->findReachingDef(CondReg, AMDGPU::NoSubRegister,
209                                    *I, MRI, LIS);
210   if (!And || And->getOpcode() != AndOpc ||
211       !And->getOperand(1).isReg() || !And->getOperand(2).isReg())
212     return AMDGPU::NoRegister;
213 
214   MachineOperand *AndCC = &And->getOperand(1);
215   Register CmpReg = AndCC->getReg();
216   unsigned CmpSubReg = AndCC->getSubReg();
217   if (CmpReg == ExecReg) {
218     AndCC = &And->getOperand(2);
219     CmpReg = AndCC->getReg();
220     CmpSubReg = AndCC->getSubReg();
221   } else if (And->getOperand(2).getReg() != ExecReg) {
222     return AMDGPU::NoRegister;
223   }
224 
225   auto *Cmp = TRI->findReachingDef(CmpReg, CmpSubReg, *And, MRI, LIS);
226   if (!Cmp || !(Cmp->getOpcode() == AMDGPU::V_CMP_NE_U32_e32 ||
227                 Cmp->getOpcode() == AMDGPU::V_CMP_NE_U32_e64) ||
228       Cmp->getParent() != And->getParent())
229     return AMDGPU::NoRegister;
230 
231   MachineOperand *Op1 = TII->getNamedOperand(*Cmp, AMDGPU::OpName::src0);
232   MachineOperand *Op2 = TII->getNamedOperand(*Cmp, AMDGPU::OpName::src1);
233   if (Op1->isImm() && Op2->isReg())
234     std::swap(Op1, Op2);
235   if (!Op1->isReg() || !Op2->isImm() || Op2->getImm() != 1)
236     return AMDGPU::NoRegister;
237 
238   Register SelReg = Op1->getReg();
239   auto *Sel = TRI->findReachingDef(SelReg, Op1->getSubReg(), *Cmp, MRI, LIS);
240   if (!Sel || Sel->getOpcode() != AMDGPU::V_CNDMASK_B32_e64)
241     return AMDGPU::NoRegister;
242 
243   if (TII->hasModifiersSet(*Sel, AMDGPU::OpName::src0_modifiers) ||
244       TII->hasModifiersSet(*Sel, AMDGPU::OpName::src1_modifiers))
245     return AMDGPU::NoRegister;
246 
247   Op1 = TII->getNamedOperand(*Sel, AMDGPU::OpName::src0);
248   Op2 = TII->getNamedOperand(*Sel, AMDGPU::OpName::src1);
249   MachineOperand *CC = TII->getNamedOperand(*Sel, AMDGPU::OpName::src2);
250   if (!Op1->isImm() || !Op2->isImm() || !CC->isReg() ||
251       Op1->getImm() != 0 || Op2->getImm() != 1)
252     return AMDGPU::NoRegister;
253 
254   LLVM_DEBUG(dbgs() << "Folding sequence:\n\t" << *Sel << '\t' << *Cmp << '\t'
255                     << *And);
256 
257   Register CCReg = CC->getReg();
258   LIS->RemoveMachineInstrFromMaps(*And);
259   MachineInstr *Andn2 =
260       BuildMI(MBB, *And, And->getDebugLoc(), TII->get(Andn2Opc),
261               And->getOperand(0).getReg())
262           .addReg(ExecReg)
263           .addReg(CCReg, getUndefRegState(CC->isUndef()), CC->getSubReg());
264   And->eraseFromParent();
265   LIS->InsertMachineInstrInMaps(*Andn2);
266 
267   LLVM_DEBUG(dbgs() << "=>\n\t" << *Andn2 << '\n');
268 
269   // Try to remove compare. Cmp value should not used in between of cmp
270   // and s_and_b64 if VCC or just unused if any other register.
271   if ((Register::isVirtualRegister(CmpReg) && MRI.use_nodbg_empty(CmpReg)) ||
272       (CmpReg == CondReg &&
273        std::none_of(std::next(Cmp->getIterator()), Andn2->getIterator(),
274                     [&](const MachineInstr &MI) {
275                       return MI.readsRegister(CondReg, TRI);
276                     }))) {
277     LLVM_DEBUG(dbgs() << "Erasing: " << *Cmp << '\n');
278 
279     LIS->RemoveMachineInstrFromMaps(*Cmp);
280     Cmp->eraseFromParent();
281 
282     // Try to remove v_cndmask_b32.
283     if (Register::isVirtualRegister(SelReg) && MRI.use_nodbg_empty(SelReg)) {
284       LLVM_DEBUG(dbgs() << "Erasing: " << *Sel << '\n');
285 
286       LIS->RemoveMachineInstrFromMaps(*Sel);
287       Sel->eraseFromParent();
288     }
289   }
290 
291   return CCReg;
292 }
293 
294 bool SIOptimizeExecMaskingPreRA::runOnMachineFunction(MachineFunction &MF) {
295   if (skipFunction(MF.getFunction()))
296     return false;
297 
298   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
299   TRI = ST.getRegisterInfo();
300   TII = ST.getInstrInfo();
301   MRI = &MF.getRegInfo();
302 
303   MachineRegisterInfo &MRI = MF.getRegInfo();
304   LiveIntervals *LIS = &getAnalysis<LiveIntervals>();
305   DenseSet<unsigned> RecalcRegs({AMDGPU::EXEC_LO, AMDGPU::EXEC_HI});
306   unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
307   bool Changed = false;
308 
309   for (MachineBasicBlock &MBB : MF) {
310 
311     if (unsigned Reg = optimizeVcndVcmpPair(MBB, ST, MRI, LIS)) {
312       RecalcRegs.insert(Reg);
313       RecalcRegs.insert(AMDGPU::VCC_LO);
314       RecalcRegs.insert(AMDGPU::VCC_HI);
315       RecalcRegs.insert(AMDGPU::SCC);
316       Changed = true;
317     }
318 
319     // Try to remove unneeded instructions before s_endpgm.
320     if (MBB.succ_empty()) {
321       if (MBB.empty())
322         continue;
323 
324       // Skip this if the endpgm has any implicit uses, otherwise we would need
325       // to be careful to update / remove them.
326       // S_ENDPGM always has a single imm operand that is not used other than to
327       // end up in the encoding
328       MachineInstr &Term = MBB.back();
329       if (Term.getOpcode() != AMDGPU::S_ENDPGM || Term.getNumOperands() != 1)
330         continue;
331 
332       SmallVector<MachineBasicBlock*, 4> Blocks({&MBB});
333 
334       while (!Blocks.empty()) {
335         auto CurBB = Blocks.pop_back_val();
336         auto I = CurBB->rbegin(), E = CurBB->rend();
337         if (I != E) {
338           if (I->isUnconditionalBranch() || I->getOpcode() == AMDGPU::S_ENDPGM)
339             ++I;
340           else if (I->isBranch())
341             continue;
342         }
343 
344         while (I != E) {
345           if (I->isDebugInstr()) {
346             I = std::next(I);
347             continue;
348           }
349 
350           if (I->mayStore() || I->isBarrier() || I->isCall() ||
351               I->hasUnmodeledSideEffects() || I->hasOrderedMemoryRef())
352             break;
353 
354           LLVM_DEBUG(dbgs()
355                      << "Removing no effect instruction: " << *I << '\n');
356 
357           for (auto &Op : I->operands()) {
358             if (Op.isReg())
359               RecalcRegs.insert(Op.getReg());
360           }
361 
362           auto Next = std::next(I);
363           LIS->RemoveMachineInstrFromMaps(*I);
364           I->eraseFromParent();
365           I = Next;
366 
367           Changed = true;
368         }
369 
370         if (I != E)
371           continue;
372 
373         // Try to ascend predecessors.
374         for (auto *Pred : CurBB->predecessors()) {
375           if (Pred->succ_size() == 1)
376             Blocks.push_back(Pred);
377         }
378       }
379       continue;
380     }
381 
382     // Try to collapse adjacent endifs.
383     auto E = MBB.end();
384     auto Lead = skipDebugInstructionsForward(MBB.begin(), E);
385     if (MBB.succ_size() != 1 || Lead == E || !isEndCF(*Lead, TRI, ST))
386       continue;
387 
388     MachineBasicBlock *TmpMBB = &MBB;
389     auto NextLead = skipIgnoreExecInstsTrivialSucc(TmpMBB, std::next(Lead));
390     if (NextLead == TmpMBB->end() || !isEndCF(*NextLead, TRI, ST) ||
391         !getOrExecSource(*NextLead, *TII, MRI, ST))
392       continue;
393 
394     LLVM_DEBUG(dbgs() << "Redundant EXEC = S_OR_B64 found: " << *Lead << '\n');
395 
396     auto SaveExec = getOrExecSource(*Lead, *TII, MRI, ST);
397     unsigned SaveExecReg = getOrNonExecReg(*Lead, *TII, ST);
398     for (auto &Op : Lead->operands()) {
399       if (Op.isReg())
400         RecalcRegs.insert(Op.getReg());
401     }
402 
403     LIS->RemoveMachineInstrFromMaps(*Lead);
404     Lead->eraseFromParent();
405     if (SaveExecReg) {
406       LIS->removeInterval(SaveExecReg);
407       LIS->createAndComputeVirtRegInterval(SaveExecReg);
408     }
409 
410     Changed = true;
411 
412     // If the only use of saved exec in the removed instruction is S_AND_B64
413     // fold the copy now.
414     if (!SaveExec || !SaveExec->isFullCopy())
415       continue;
416 
417     Register SavedExec = SaveExec->getOperand(0).getReg();
418     bool SafeToReplace = true;
419     for (auto& U : MRI.use_nodbg_instructions(SavedExec)) {
420       if (U.getParent() != SaveExec->getParent()) {
421         SafeToReplace = false;
422         break;
423       }
424 
425       LLVM_DEBUG(dbgs() << "Redundant EXEC COPY: " << *SaveExec << '\n');
426     }
427 
428     if (SafeToReplace) {
429       LIS->RemoveMachineInstrFromMaps(*SaveExec);
430       SaveExec->eraseFromParent();
431       MRI.replaceRegWith(SavedExec, Exec);
432       LIS->removeInterval(SavedExec);
433     }
434   }
435 
436   if (Changed) {
437     for (auto Reg : RecalcRegs) {
438       if (Register::isVirtualRegister(Reg)) {
439         LIS->removeInterval(Reg);
440         if (!MRI.reg_empty(Reg))
441           LIS->createAndComputeVirtRegInterval(Reg);
442       } else {
443         LIS->removeAllRegUnitsForPhysReg(Reg);
444       }
445     }
446   }
447 
448   return Changed;
449 }
450