1 //===-- SILowerI1Copies.cpp - Lower I1 Copies -----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass lowers all occurrences of i1 values (with a vreg_1 register class)
10 // to lane masks (32 / 64-bit scalar registers). The pass assumes machine SSA
11 // form and a wave-level control flow graph.
12 //
13 // Before this pass, values that are semantically i1 and are defined and used
14 // within the same basic block are already represented as lane masks in scalar
15 // registers. However, values that cross basic blocks are always transferred
16 // between basic blocks in vreg_1 virtual registers and are lowered by this
17 // pass.
18 //
19 // The only instructions that use or define vreg_1 virtual registers are COPY,
20 // PHI, and IMPLICIT_DEF.
21 //
22 //===----------------------------------------------------------------------===//
23
24 #include "SILowerI1Copies.h"
25 #include "AMDGPU.h"
26 #include "llvm/CodeGen/MachineSSAUpdater.h"
27 #include "llvm/InitializePasses.h"
28 #include "llvm/Target/CGPassBuilderOption.h"
29
30 #define DEBUG_TYPE "si-i1-copies"
31
32 using namespace llvm;
33
34 static Register insertUndefLaneMask(MachineBasicBlock *MBB,
35 MachineRegisterInfo *MRI,
36 Register LaneMaskRegAttrs);
37
38 namespace {
39
40 class SILowerI1Copies : public MachineFunctionPass {
41 public:
42 static char ID;
43
SILowerI1Copies()44 SILowerI1Copies() : MachineFunctionPass(ID) {
45 initializeSILowerI1CopiesPass(*PassRegistry::getPassRegistry());
46 }
47
48 bool runOnMachineFunction(MachineFunction &MF) override;
49
getPassName() const50 StringRef getPassName() const override { return "SI Lower i1 Copies"; }
51
getAnalysisUsage(AnalysisUsage & AU) const52 void getAnalysisUsage(AnalysisUsage &AU) const override {
53 AU.setPreservesCFG();
54 AU.addRequired<MachineDominatorTree>();
55 AU.addRequired<MachinePostDominatorTree>();
56 MachineFunctionPass::getAnalysisUsage(AU);
57 }
58 };
59
60 class Vreg1LoweringHelper : public PhiLoweringHelper {
61 public:
62 Vreg1LoweringHelper(MachineFunction *MF, MachineDominatorTree *DT,
63 MachinePostDominatorTree *PDT);
64
65 private:
66 DenseSet<Register> ConstrainRegs;
67
68 public:
69 void markAsLaneMask(Register DstReg) const override;
70 void getCandidatesForLowering(
71 SmallVectorImpl<MachineInstr *> &Vreg1Phis) const override;
72 void collectIncomingValuesFromPhi(
73 const MachineInstr *MI,
74 SmallVectorImpl<Incoming> &Incomings) const override;
75 void replaceDstReg(Register NewReg, Register OldReg,
76 MachineBasicBlock *MBB) override;
77 void buildMergeLaneMasks(MachineBasicBlock &MBB,
78 MachineBasicBlock::iterator I, const DebugLoc &DL,
79 Register DstReg, Register PrevReg,
80 Register CurReg) override;
81 void constrainIncomingRegisterTakenAsIs(Incoming &In) override;
82
83 bool lowerCopiesFromI1();
84 bool lowerCopiesToI1();
85 bool cleanConstrainRegs(bool Changed);
isVreg1(Register Reg) const86 bool isVreg1(Register Reg) const {
87 return Reg.isVirtual() && MRI->getRegClass(Reg) == &AMDGPU::VReg_1RegClass;
88 }
89 };
90
Vreg1LoweringHelper(MachineFunction * MF,MachineDominatorTree * DT,MachinePostDominatorTree * PDT)91 Vreg1LoweringHelper::Vreg1LoweringHelper(MachineFunction *MF,
92 MachineDominatorTree *DT,
93 MachinePostDominatorTree *PDT)
94 : PhiLoweringHelper(MF, DT, PDT) {}
95
cleanConstrainRegs(bool Changed)96 bool Vreg1LoweringHelper::cleanConstrainRegs(bool Changed) {
97 assert(Changed || ConstrainRegs.empty());
98 for (Register Reg : ConstrainRegs)
99 MRI->constrainRegClass(Reg, &AMDGPU::SReg_1_XEXECRegClass);
100 ConstrainRegs.clear();
101
102 return Changed;
103 }
104
105 /// Helper class that determines the relationship between incoming values of a
106 /// phi in the control flow graph to determine where an incoming value can
107 /// simply be taken as a scalar lane mask as-is, and where it needs to be
108 /// merged with another, previously defined lane mask.
109 ///
110 /// The approach is as follows:
111 /// - Determine all basic blocks which, starting from the incoming blocks,
112 /// a wave may reach before entering the def block (the block containing the
113 /// phi).
114 /// - If an incoming block has no predecessors in this set, we can take the
115 /// incoming value as a scalar lane mask as-is.
116 /// -- A special case of this is when the def block has a self-loop.
117 /// - Otherwise, the incoming value needs to be merged with a previously
118 /// defined lane mask.
119 /// - If there is a path into the set of reachable blocks that does _not_ go
120 /// through an incoming block where we can take the scalar lane mask as-is,
121 /// we need to invent an available value for the SSAUpdater. Choices are
122 /// 0 and undef, with differing consequences for how to merge values etc.
123 ///
124 /// TODO: We could use region analysis to quickly skip over SESE regions during
125 /// the traversal.
126 ///
127 class PhiIncomingAnalysis {
128 MachinePostDominatorTree &PDT;
129 const SIInstrInfo *TII;
130
131 // For each reachable basic block, whether it is a source in the induced
132 // subgraph of the CFG.
133 DenseMap<MachineBasicBlock *, bool> ReachableMap;
134 SmallVector<MachineBasicBlock *, 4> ReachableOrdered;
135 SmallVector<MachineBasicBlock *, 4> Stack;
136 SmallVector<MachineBasicBlock *, 4> Predecessors;
137
138 public:
PhiIncomingAnalysis(MachinePostDominatorTree & PDT,const SIInstrInfo * TII)139 PhiIncomingAnalysis(MachinePostDominatorTree &PDT, const SIInstrInfo *TII)
140 : PDT(PDT), TII(TII) {}
141
142 /// Returns whether \p MBB is a source in the induced subgraph of reachable
143 /// blocks.
isSource(MachineBasicBlock & MBB) const144 bool isSource(MachineBasicBlock &MBB) const {
145 return ReachableMap.find(&MBB)->second;
146 }
147
predecessors() const148 ArrayRef<MachineBasicBlock *> predecessors() const { return Predecessors; }
149
analyze(MachineBasicBlock & DefBlock,ArrayRef<Incoming> Incomings)150 void analyze(MachineBasicBlock &DefBlock, ArrayRef<Incoming> Incomings) {
151 assert(Stack.empty());
152 ReachableMap.clear();
153 ReachableOrdered.clear();
154 Predecessors.clear();
155
156 // Insert the def block first, so that it acts as an end point for the
157 // traversal.
158 ReachableMap.try_emplace(&DefBlock, false);
159 ReachableOrdered.push_back(&DefBlock);
160
161 for (auto Incoming : Incomings) {
162 MachineBasicBlock *MBB = Incoming.Block;
163 if (MBB == &DefBlock) {
164 ReachableMap[&DefBlock] = true; // self-loop on DefBlock
165 continue;
166 }
167
168 ReachableMap.try_emplace(MBB, false);
169 ReachableOrdered.push_back(MBB);
170
171 // If this block has a divergent terminator and the def block is its
172 // post-dominator, the wave may first visit the other successors.
173 if (TII->hasDivergentBranch(MBB) && PDT.dominates(&DefBlock, MBB))
174 append_range(Stack, MBB->successors());
175 }
176
177 while (!Stack.empty()) {
178 MachineBasicBlock *MBB = Stack.pop_back_val();
179 if (!ReachableMap.try_emplace(MBB, false).second)
180 continue;
181 ReachableOrdered.push_back(MBB);
182
183 append_range(Stack, MBB->successors());
184 }
185
186 for (MachineBasicBlock *MBB : ReachableOrdered) {
187 bool HaveReachablePred = false;
188 for (MachineBasicBlock *Pred : MBB->predecessors()) {
189 if (ReachableMap.count(Pred)) {
190 HaveReachablePred = true;
191 } else {
192 Stack.push_back(Pred);
193 }
194 }
195 if (!HaveReachablePred)
196 ReachableMap[MBB] = true;
197 if (HaveReachablePred) {
198 for (MachineBasicBlock *UnreachablePred : Stack) {
199 if (!llvm::is_contained(Predecessors, UnreachablePred))
200 Predecessors.push_back(UnreachablePred);
201 }
202 }
203 Stack.clear();
204 }
205 }
206 };
207
208 /// Helper class that detects loops which require us to lower an i1 COPY into
209 /// bitwise manipulation.
210 ///
211 /// Unfortunately, we cannot use LoopInfo because LoopInfo does not distinguish
212 /// between loops with the same header. Consider this example:
213 ///
214 /// A-+-+
215 /// | | |
216 /// B-+ |
217 /// | |
218 /// C---+
219 ///
220 /// A is the header of a loop containing A, B, and C as far as LoopInfo is
221 /// concerned. However, an i1 COPY in B that is used in C must be lowered to
222 /// bitwise operations to combine results from different loop iterations when
223 /// B has a divergent branch (since by default we will compile this code such
224 /// that threads in a wave are merged at the entry of C).
225 ///
226 /// The following rule is implemented to determine whether bitwise operations
227 /// are required: use the bitwise lowering for a def in block B if a backward
228 /// edge to B is reachable without going through the nearest common
229 /// post-dominator of B and all uses of the def.
230 ///
231 /// TODO: This rule is conservative because it does not check whether the
232 /// relevant branches are actually divergent.
233 ///
234 /// The class is designed to cache the CFG traversal so that it can be re-used
235 /// for multiple defs within the same basic block.
236 ///
237 /// TODO: We could use region analysis to quickly skip over SESE regions during
238 /// the traversal.
239 ///
240 class LoopFinder {
241 MachineDominatorTree &DT;
242 MachinePostDominatorTree &PDT;
243
244 // All visited / reachable block, tagged by level (level 0 is the def block,
245 // level 1 are all blocks reachable including but not going through the def
246 // block's IPDOM, etc.).
247 DenseMap<MachineBasicBlock *, unsigned> Visited;
248
249 // Nearest common dominator of all visited blocks by level (level 0 is the
250 // def block). Used for seeding the SSAUpdater.
251 SmallVector<MachineBasicBlock *, 4> CommonDominators;
252
253 // Post-dominator of all visited blocks.
254 MachineBasicBlock *VisitedPostDom = nullptr;
255
256 // Level at which a loop was found: 0 is not possible; 1 = a backward edge is
257 // reachable without going through the IPDOM of the def block (if the IPDOM
258 // itself has an edge to the def block, the loop level is 2), etc.
259 unsigned FoundLoopLevel = ~0u;
260
261 MachineBasicBlock *DefBlock = nullptr;
262 SmallVector<MachineBasicBlock *, 4> Stack;
263 SmallVector<MachineBasicBlock *, 4> NextLevel;
264
265 public:
LoopFinder(MachineDominatorTree & DT,MachinePostDominatorTree & PDT)266 LoopFinder(MachineDominatorTree &DT, MachinePostDominatorTree &PDT)
267 : DT(DT), PDT(PDT) {}
268
initialize(MachineBasicBlock & MBB)269 void initialize(MachineBasicBlock &MBB) {
270 Visited.clear();
271 CommonDominators.clear();
272 Stack.clear();
273 NextLevel.clear();
274 VisitedPostDom = nullptr;
275 FoundLoopLevel = ~0u;
276
277 DefBlock = &MBB;
278 }
279
280 /// Check whether a backward edge can be reached without going through the
281 /// given \p PostDom of the def block.
282 ///
283 /// Return the level of \p PostDom if a loop was found, or 0 otherwise.
findLoop(MachineBasicBlock * PostDom)284 unsigned findLoop(MachineBasicBlock *PostDom) {
285 MachineDomTreeNode *PDNode = PDT.getNode(DefBlock);
286
287 if (!VisitedPostDom)
288 advanceLevel();
289
290 unsigned Level = 0;
291 while (PDNode->getBlock() != PostDom) {
292 if (PDNode->getBlock() == VisitedPostDom)
293 advanceLevel();
294 PDNode = PDNode->getIDom();
295 Level++;
296 if (FoundLoopLevel == Level)
297 return Level;
298 }
299
300 return 0;
301 }
302
303 /// Add undef values dominating the loop and the optionally given additional
304 /// blocks, so that the SSA updater doesn't have to search all the way to the
305 /// function entry.
addLoopEntries(unsigned LoopLevel,MachineSSAUpdater & SSAUpdater,MachineRegisterInfo & MRI,Register LaneMaskRegAttrs,ArrayRef<Incoming> Incomings={})306 void addLoopEntries(unsigned LoopLevel, MachineSSAUpdater &SSAUpdater,
307 MachineRegisterInfo &MRI, Register LaneMaskRegAttrs,
308 ArrayRef<Incoming> Incomings = {}) {
309 assert(LoopLevel < CommonDominators.size());
310
311 MachineBasicBlock *Dom = CommonDominators[LoopLevel];
312 for (auto &Incoming : Incomings)
313 Dom = DT.findNearestCommonDominator(Dom, Incoming.Block);
314
315 if (!inLoopLevel(*Dom, LoopLevel, Incomings)) {
316 SSAUpdater.AddAvailableValue(
317 Dom, insertUndefLaneMask(Dom, &MRI, LaneMaskRegAttrs));
318 } else {
319 // The dominator is part of the loop or the given blocks, so add the
320 // undef value to unreachable predecessors instead.
321 for (MachineBasicBlock *Pred : Dom->predecessors()) {
322 if (!inLoopLevel(*Pred, LoopLevel, Incomings))
323 SSAUpdater.AddAvailableValue(
324 Pred, insertUndefLaneMask(Pred, &MRI, LaneMaskRegAttrs));
325 }
326 }
327 }
328
329 private:
inLoopLevel(MachineBasicBlock & MBB,unsigned LoopLevel,ArrayRef<Incoming> Incomings) const330 bool inLoopLevel(MachineBasicBlock &MBB, unsigned LoopLevel,
331 ArrayRef<Incoming> Incomings) const {
332 auto DomIt = Visited.find(&MBB);
333 if (DomIt != Visited.end() && DomIt->second <= LoopLevel)
334 return true;
335
336 for (auto &Incoming : Incomings)
337 if (Incoming.Block == &MBB)
338 return true;
339
340 return false;
341 }
342
advanceLevel()343 void advanceLevel() {
344 MachineBasicBlock *VisitedDom;
345
346 if (!VisitedPostDom) {
347 VisitedPostDom = DefBlock;
348 VisitedDom = DefBlock;
349 Stack.push_back(DefBlock);
350 } else {
351 VisitedPostDom = PDT.getNode(VisitedPostDom)->getIDom()->getBlock();
352 VisitedDom = CommonDominators.back();
353
354 for (unsigned i = 0; i < NextLevel.size();) {
355 if (PDT.dominates(VisitedPostDom, NextLevel[i])) {
356 Stack.push_back(NextLevel[i]);
357
358 NextLevel[i] = NextLevel.back();
359 NextLevel.pop_back();
360 } else {
361 i++;
362 }
363 }
364 }
365
366 unsigned Level = CommonDominators.size();
367 while (!Stack.empty()) {
368 MachineBasicBlock *MBB = Stack.pop_back_val();
369 if (!PDT.dominates(VisitedPostDom, MBB))
370 NextLevel.push_back(MBB);
371
372 Visited[MBB] = Level;
373 VisitedDom = DT.findNearestCommonDominator(VisitedDom, MBB);
374
375 for (MachineBasicBlock *Succ : MBB->successors()) {
376 if (Succ == DefBlock) {
377 if (MBB == VisitedPostDom)
378 FoundLoopLevel = std::min(FoundLoopLevel, Level + 1);
379 else
380 FoundLoopLevel = std::min(FoundLoopLevel, Level);
381 continue;
382 }
383
384 if (Visited.try_emplace(Succ, ~0u).second) {
385 if (MBB == VisitedPostDom)
386 NextLevel.push_back(Succ);
387 else
388 Stack.push_back(Succ);
389 }
390 }
391 }
392
393 CommonDominators.push_back(VisitedDom);
394 }
395 };
396
397 } // End anonymous namespace.
398
399 INITIALIZE_PASS_BEGIN(SILowerI1Copies, DEBUG_TYPE, "SI Lower i1 Copies", false,
400 false)
401 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
402 INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTree)
403 INITIALIZE_PASS_END(SILowerI1Copies, DEBUG_TYPE, "SI Lower i1 Copies", false,
404 false)
405
406 char SILowerI1Copies::ID = 0;
407
408 char &llvm::SILowerI1CopiesID = SILowerI1Copies::ID;
409
createSILowerI1CopiesPass()410 FunctionPass *llvm::createSILowerI1CopiesPass() {
411 return new SILowerI1Copies();
412 }
413
createLaneMaskReg(MachineRegisterInfo * MRI,Register LaneMaskRegAttrs)414 Register llvm::createLaneMaskReg(MachineRegisterInfo *MRI,
415 Register LaneMaskRegAttrs) {
416 return MRI->cloneVirtualRegister(LaneMaskRegAttrs);
417 }
418
insertUndefLaneMask(MachineBasicBlock * MBB,MachineRegisterInfo * MRI,Register LaneMaskRegAttrs)419 static Register insertUndefLaneMask(MachineBasicBlock *MBB,
420 MachineRegisterInfo *MRI,
421 Register LaneMaskRegAttrs) {
422 MachineFunction &MF = *MBB->getParent();
423 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
424 const SIInstrInfo *TII = ST.getInstrInfo();
425 Register UndefReg = createLaneMaskReg(MRI, LaneMaskRegAttrs);
426 BuildMI(*MBB, MBB->getFirstTerminator(), {}, TII->get(AMDGPU::IMPLICIT_DEF),
427 UndefReg);
428 return UndefReg;
429 }
430
431 /// Lower all instructions that def or use vreg_1 registers.
432 ///
433 /// In a first pass, we lower COPYs from vreg_1 to vector registers, as can
434 /// occur around inline assembly. We do this first, before vreg_1 registers
435 /// are changed to scalar mask registers.
436 ///
437 /// Then we lower all defs of vreg_1 registers. Phi nodes are lowered before
438 /// all others, because phi lowering looks through copies and can therefore
439 /// often make copy lowering unnecessary.
runOnMachineFunction(MachineFunction & TheMF)440 bool SILowerI1Copies::runOnMachineFunction(MachineFunction &TheMF) {
441 // Only need to run this in SelectionDAG path.
442 if (TheMF.getProperties().hasProperty(
443 MachineFunctionProperties::Property::Selected))
444 return false;
445
446 Vreg1LoweringHelper Helper(&TheMF, &getAnalysis<MachineDominatorTree>(),
447 &getAnalysis<MachinePostDominatorTree>());
448
449 bool Changed = false;
450 Changed |= Helper.lowerCopiesFromI1();
451 Changed |= Helper.lowerPhis();
452 Changed |= Helper.lowerCopiesToI1();
453 return Helper.cleanConstrainRegs(Changed);
454 }
455
456 #ifndef NDEBUG
isVRegCompatibleReg(const SIRegisterInfo & TRI,const MachineRegisterInfo & MRI,Register Reg)457 static bool isVRegCompatibleReg(const SIRegisterInfo &TRI,
458 const MachineRegisterInfo &MRI,
459 Register Reg) {
460 unsigned Size = TRI.getRegSizeInBits(Reg, MRI);
461 return Size == 1 || Size == 32;
462 }
463 #endif
464
lowerCopiesFromI1()465 bool Vreg1LoweringHelper::lowerCopiesFromI1() {
466 bool Changed = false;
467 SmallVector<MachineInstr *, 4> DeadCopies;
468
469 for (MachineBasicBlock &MBB : *MF) {
470 for (MachineInstr &MI : MBB) {
471 if (MI.getOpcode() != AMDGPU::COPY)
472 continue;
473
474 Register DstReg = MI.getOperand(0).getReg();
475 Register SrcReg = MI.getOperand(1).getReg();
476 if (!isVreg1(SrcReg))
477 continue;
478
479 if (isLaneMaskReg(DstReg) || isVreg1(DstReg))
480 continue;
481
482 Changed = true;
483
484 // Copy into a 32-bit vector register.
485 LLVM_DEBUG(dbgs() << "Lower copy from i1: " << MI);
486 DebugLoc DL = MI.getDebugLoc();
487
488 assert(isVRegCompatibleReg(TII->getRegisterInfo(), *MRI, DstReg));
489 assert(!MI.getOperand(0).getSubReg());
490
491 ConstrainRegs.insert(SrcReg);
492 BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
493 .addImm(0)
494 .addImm(0)
495 .addImm(0)
496 .addImm(-1)
497 .addReg(SrcReg);
498 DeadCopies.push_back(&MI);
499 }
500
501 for (MachineInstr *MI : DeadCopies)
502 MI->eraseFromParent();
503 DeadCopies.clear();
504 }
505 return Changed;
506 }
507
PhiLoweringHelper(MachineFunction * MF,MachineDominatorTree * DT,MachinePostDominatorTree * PDT)508 PhiLoweringHelper::PhiLoweringHelper(MachineFunction *MF,
509 MachineDominatorTree *DT,
510 MachinePostDominatorTree *PDT)
511 : MF(MF), DT(DT), PDT(PDT) {
512 MRI = &MF->getRegInfo();
513
514 ST = &MF->getSubtarget<GCNSubtarget>();
515 TII = ST->getInstrInfo();
516 IsWave32 = ST->isWave32();
517
518 if (IsWave32) {
519 ExecReg = AMDGPU::EXEC_LO;
520 MovOp = AMDGPU::S_MOV_B32;
521 AndOp = AMDGPU::S_AND_B32;
522 OrOp = AMDGPU::S_OR_B32;
523 XorOp = AMDGPU::S_XOR_B32;
524 AndN2Op = AMDGPU::S_ANDN2_B32;
525 OrN2Op = AMDGPU::S_ORN2_B32;
526 } else {
527 ExecReg = AMDGPU::EXEC;
528 MovOp = AMDGPU::S_MOV_B64;
529 AndOp = AMDGPU::S_AND_B64;
530 OrOp = AMDGPU::S_OR_B64;
531 XorOp = AMDGPU::S_XOR_B64;
532 AndN2Op = AMDGPU::S_ANDN2_B64;
533 OrN2Op = AMDGPU::S_ORN2_B64;
534 }
535 }
536
lowerPhis()537 bool PhiLoweringHelper::lowerPhis() {
538 MachineSSAUpdater SSAUpdater(*MF);
539 LoopFinder LF(*DT, *PDT);
540 PhiIncomingAnalysis PIA(*PDT, TII);
541 SmallVector<MachineInstr *, 4> Vreg1Phis;
542 SmallVector<Incoming, 4> Incomings;
543
544 getCandidatesForLowering(Vreg1Phis);
545 if (Vreg1Phis.empty())
546 return false;
547
548 DT->getBase().updateDFSNumbers();
549 MachineBasicBlock *PrevMBB = nullptr;
550 for (MachineInstr *MI : Vreg1Phis) {
551 MachineBasicBlock &MBB = *MI->getParent();
552 if (&MBB != PrevMBB) {
553 LF.initialize(MBB);
554 PrevMBB = &MBB;
555 }
556
557 LLVM_DEBUG(dbgs() << "Lower PHI: " << *MI);
558
559 Register DstReg = MI->getOperand(0).getReg();
560 markAsLaneMask(DstReg);
561 initializeLaneMaskRegisterAttributes(DstReg);
562
563 collectIncomingValuesFromPhi(MI, Incomings);
564
565 // Sort the incomings such that incoming values that dominate other incoming
566 // values are sorted earlier. This allows us to do some amount of on-the-fly
567 // constant folding.
568 // Incoming with smaller DFSNumIn goes first, DFSNumIn is 0 for entry block.
569 llvm::sort(Incomings, [this](Incoming LHS, Incoming RHS) {
570 return DT->getNode(LHS.Block)->getDFSNumIn() <
571 DT->getNode(RHS.Block)->getDFSNumIn();
572 });
573
574 #ifndef NDEBUG
575 PhiRegisters.insert(DstReg);
576 #endif
577
578 // Phis in a loop that are observed outside the loop receive a simple but
579 // conservatively correct treatment.
580 std::vector<MachineBasicBlock *> DomBlocks = {&MBB};
581 for (MachineInstr &Use : MRI->use_instructions(DstReg))
582 DomBlocks.push_back(Use.getParent());
583
584 MachineBasicBlock *PostDomBound =
585 PDT->findNearestCommonDominator(DomBlocks);
586
587 // FIXME: This fails to find irreducible cycles. If we have a def (other
588 // than a constant) in a pair of blocks that end up looping back to each
589 // other, it will be mishandle. Due to structurization this shouldn't occur
590 // in practice.
591 unsigned FoundLoopLevel = LF.findLoop(PostDomBound);
592
593 SSAUpdater.Initialize(DstReg);
594
595 if (FoundLoopLevel) {
596 LF.addLoopEntries(FoundLoopLevel, SSAUpdater, *MRI, LaneMaskRegAttrs,
597 Incomings);
598
599 for (auto &Incoming : Incomings) {
600 Incoming.UpdatedReg = createLaneMaskReg(MRI, LaneMaskRegAttrs);
601 SSAUpdater.AddAvailableValue(Incoming.Block, Incoming.UpdatedReg);
602 }
603
604 for (auto &Incoming : Incomings) {
605 MachineBasicBlock &IMBB = *Incoming.Block;
606 buildMergeLaneMasks(
607 IMBB, getSaluInsertionAtEnd(IMBB), {}, Incoming.UpdatedReg,
608 SSAUpdater.GetValueInMiddleOfBlock(&IMBB), Incoming.Reg);
609 }
610 } else {
611 // The phi is not observed from outside a loop. Use a more accurate
612 // lowering.
613 PIA.analyze(MBB, Incomings);
614
615 for (MachineBasicBlock *MBB : PIA.predecessors())
616 SSAUpdater.AddAvailableValue(
617 MBB, insertUndefLaneMask(MBB, MRI, LaneMaskRegAttrs));
618
619 for (auto &Incoming : Incomings) {
620 MachineBasicBlock &IMBB = *Incoming.Block;
621 if (PIA.isSource(IMBB)) {
622 constrainIncomingRegisterTakenAsIs(Incoming);
623 SSAUpdater.AddAvailableValue(&IMBB, Incoming.Reg);
624 } else {
625 Incoming.UpdatedReg = createLaneMaskReg(MRI, LaneMaskRegAttrs);
626 SSAUpdater.AddAvailableValue(&IMBB, Incoming.UpdatedReg);
627 }
628 }
629
630 for (auto &Incoming : Incomings) {
631 if (!Incoming.UpdatedReg.isValid())
632 continue;
633
634 MachineBasicBlock &IMBB = *Incoming.Block;
635 buildMergeLaneMasks(
636 IMBB, getSaluInsertionAtEnd(IMBB), {}, Incoming.UpdatedReg,
637 SSAUpdater.GetValueInMiddleOfBlock(&IMBB), Incoming.Reg);
638 }
639 }
640
641 Register NewReg = SSAUpdater.GetValueInMiddleOfBlock(&MBB);
642 if (NewReg != DstReg) {
643 replaceDstReg(NewReg, DstReg, &MBB);
644 MI->eraseFromParent();
645 }
646
647 Incomings.clear();
648 }
649 return true;
650 }
651
lowerCopiesToI1()652 bool Vreg1LoweringHelper::lowerCopiesToI1() {
653 bool Changed = false;
654 MachineSSAUpdater SSAUpdater(*MF);
655 LoopFinder LF(*DT, *PDT);
656 SmallVector<MachineInstr *, 4> DeadCopies;
657
658 for (MachineBasicBlock &MBB : *MF) {
659 LF.initialize(MBB);
660
661 for (MachineInstr &MI : MBB) {
662 if (MI.getOpcode() != AMDGPU::IMPLICIT_DEF &&
663 MI.getOpcode() != AMDGPU::COPY)
664 continue;
665
666 Register DstReg = MI.getOperand(0).getReg();
667 if (!isVreg1(DstReg))
668 continue;
669
670 Changed = true;
671
672 if (MRI->use_empty(DstReg)) {
673 DeadCopies.push_back(&MI);
674 continue;
675 }
676
677 LLVM_DEBUG(dbgs() << "Lower Other: " << MI);
678
679 markAsLaneMask(DstReg);
680 initializeLaneMaskRegisterAttributes(DstReg);
681
682 if (MI.getOpcode() == AMDGPU::IMPLICIT_DEF)
683 continue;
684
685 DebugLoc DL = MI.getDebugLoc();
686 Register SrcReg = MI.getOperand(1).getReg();
687 assert(!MI.getOperand(1).getSubReg());
688
689 if (!SrcReg.isVirtual() || (!isLaneMaskReg(SrcReg) && !isVreg1(SrcReg))) {
690 assert(TII->getRegisterInfo().getRegSizeInBits(SrcReg, *MRI) == 32);
691 Register TmpReg = createLaneMaskReg(MRI, LaneMaskRegAttrs);
692 BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_CMP_NE_U32_e64), TmpReg)
693 .addReg(SrcReg)
694 .addImm(0);
695 MI.getOperand(1).setReg(TmpReg);
696 SrcReg = TmpReg;
697 } else {
698 // SrcReg needs to be live beyond copy.
699 MI.getOperand(1).setIsKill(false);
700 }
701
702 // Defs in a loop that are observed outside the loop must be transformed
703 // into appropriate bit manipulation.
704 std::vector<MachineBasicBlock *> DomBlocks = {&MBB};
705 for (MachineInstr &Use : MRI->use_instructions(DstReg))
706 DomBlocks.push_back(Use.getParent());
707
708 MachineBasicBlock *PostDomBound =
709 PDT->findNearestCommonDominator(DomBlocks);
710 unsigned FoundLoopLevel = LF.findLoop(PostDomBound);
711 if (FoundLoopLevel) {
712 SSAUpdater.Initialize(DstReg);
713 SSAUpdater.AddAvailableValue(&MBB, DstReg);
714 LF.addLoopEntries(FoundLoopLevel, SSAUpdater, *MRI, LaneMaskRegAttrs);
715
716 buildMergeLaneMasks(MBB, MI, DL, DstReg,
717 SSAUpdater.GetValueInMiddleOfBlock(&MBB), SrcReg);
718 DeadCopies.push_back(&MI);
719 }
720 }
721
722 for (MachineInstr *MI : DeadCopies)
723 MI->eraseFromParent();
724 DeadCopies.clear();
725 }
726 return Changed;
727 }
728
isConstantLaneMask(Register Reg,bool & Val) const729 bool PhiLoweringHelper::isConstantLaneMask(Register Reg, bool &Val) const {
730 const MachineInstr *MI;
731 for (;;) {
732 MI = MRI->getUniqueVRegDef(Reg);
733 if (MI->getOpcode() == AMDGPU::IMPLICIT_DEF)
734 return true;
735
736 if (MI->getOpcode() != AMDGPU::COPY)
737 break;
738
739 Reg = MI->getOperand(1).getReg();
740 if (!Reg.isVirtual())
741 return false;
742 if (!isLaneMaskReg(Reg))
743 return false;
744 }
745
746 if (MI->getOpcode() != MovOp)
747 return false;
748
749 if (!MI->getOperand(1).isImm())
750 return false;
751
752 int64_t Imm = MI->getOperand(1).getImm();
753 if (Imm == 0) {
754 Val = false;
755 return true;
756 }
757 if (Imm == -1) {
758 Val = true;
759 return true;
760 }
761
762 return false;
763 }
764
instrDefsUsesSCC(const MachineInstr & MI,bool & Def,bool & Use)765 static void instrDefsUsesSCC(const MachineInstr &MI, bool &Def, bool &Use) {
766 Def = false;
767 Use = false;
768
769 for (const MachineOperand &MO : MI.operands()) {
770 if (MO.isReg() && MO.getReg() == AMDGPU::SCC) {
771 if (MO.isUse())
772 Use = true;
773 else
774 Def = true;
775 }
776 }
777 }
778
779 /// Return a point at the end of the given \p MBB to insert SALU instructions
780 /// for lane mask calculation. Take terminators and SCC into account.
781 MachineBasicBlock::iterator
getSaluInsertionAtEnd(MachineBasicBlock & MBB) const782 PhiLoweringHelper::getSaluInsertionAtEnd(MachineBasicBlock &MBB) const {
783 auto InsertionPt = MBB.getFirstTerminator();
784 bool TerminatorsUseSCC = false;
785 for (auto I = InsertionPt, E = MBB.end(); I != E; ++I) {
786 bool DefsSCC;
787 instrDefsUsesSCC(*I, DefsSCC, TerminatorsUseSCC);
788 if (TerminatorsUseSCC || DefsSCC)
789 break;
790 }
791
792 if (!TerminatorsUseSCC)
793 return InsertionPt;
794
795 while (InsertionPt != MBB.begin()) {
796 InsertionPt--;
797
798 bool DefSCC, UseSCC;
799 instrDefsUsesSCC(*InsertionPt, DefSCC, UseSCC);
800 if (DefSCC)
801 return InsertionPt;
802 }
803
804 // We should have at least seen an IMPLICIT_DEF or COPY
805 llvm_unreachable("SCC used by terminator but no def in block");
806 }
807
808 // VReg_1 -> SReg_32 or SReg_64
markAsLaneMask(Register DstReg) const809 void Vreg1LoweringHelper::markAsLaneMask(Register DstReg) const {
810 MRI->setRegClass(DstReg, ST->getBoolRC());
811 }
812
getCandidatesForLowering(SmallVectorImpl<MachineInstr * > & Vreg1Phis) const813 void Vreg1LoweringHelper::getCandidatesForLowering(
814 SmallVectorImpl<MachineInstr *> &Vreg1Phis) const {
815 for (MachineBasicBlock &MBB : *MF) {
816 for (MachineInstr &MI : MBB.phis()) {
817 if (isVreg1(MI.getOperand(0).getReg()))
818 Vreg1Phis.push_back(&MI);
819 }
820 }
821 }
822
collectIncomingValuesFromPhi(const MachineInstr * MI,SmallVectorImpl<Incoming> & Incomings) const823 void Vreg1LoweringHelper::collectIncomingValuesFromPhi(
824 const MachineInstr *MI, SmallVectorImpl<Incoming> &Incomings) const {
825 for (unsigned i = 1; i < MI->getNumOperands(); i += 2) {
826 assert(i + 1 < MI->getNumOperands());
827 Register IncomingReg = MI->getOperand(i).getReg();
828 MachineBasicBlock *IncomingMBB = MI->getOperand(i + 1).getMBB();
829 MachineInstr *IncomingDef = MRI->getUniqueVRegDef(IncomingReg);
830
831 if (IncomingDef->getOpcode() == AMDGPU::COPY) {
832 IncomingReg = IncomingDef->getOperand(1).getReg();
833 assert(isLaneMaskReg(IncomingReg) || isVreg1(IncomingReg));
834 assert(!IncomingDef->getOperand(1).getSubReg());
835 } else if (IncomingDef->getOpcode() == AMDGPU::IMPLICIT_DEF) {
836 continue;
837 } else {
838 assert(IncomingDef->isPHI() || PhiRegisters.count(IncomingReg));
839 }
840
841 Incomings.emplace_back(IncomingReg, IncomingMBB, Register());
842 }
843 }
844
replaceDstReg(Register NewReg,Register OldReg,MachineBasicBlock * MBB)845 void Vreg1LoweringHelper::replaceDstReg(Register NewReg, Register OldReg,
846 MachineBasicBlock *MBB) {
847 MRI->replaceRegWith(NewReg, OldReg);
848 }
849
buildMergeLaneMasks(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,const DebugLoc & DL,Register DstReg,Register PrevReg,Register CurReg)850 void Vreg1LoweringHelper::buildMergeLaneMasks(MachineBasicBlock &MBB,
851 MachineBasicBlock::iterator I,
852 const DebugLoc &DL,
853 Register DstReg, Register PrevReg,
854 Register CurReg) {
855 bool PrevVal = false;
856 bool PrevConstant = isConstantLaneMask(PrevReg, PrevVal);
857 bool CurVal = false;
858 bool CurConstant = isConstantLaneMask(CurReg, CurVal);
859
860 if (PrevConstant && CurConstant) {
861 if (PrevVal == CurVal) {
862 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg).addReg(CurReg);
863 } else if (CurVal) {
864 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg).addReg(ExecReg);
865 } else {
866 BuildMI(MBB, I, DL, TII->get(XorOp), DstReg)
867 .addReg(ExecReg)
868 .addImm(-1);
869 }
870 return;
871 }
872
873 Register PrevMaskedReg;
874 Register CurMaskedReg;
875 if (!PrevConstant) {
876 if (CurConstant && CurVal) {
877 PrevMaskedReg = PrevReg;
878 } else {
879 PrevMaskedReg = createLaneMaskReg(MRI, LaneMaskRegAttrs);
880 BuildMI(MBB, I, DL, TII->get(AndN2Op), PrevMaskedReg)
881 .addReg(PrevReg)
882 .addReg(ExecReg);
883 }
884 }
885 if (!CurConstant) {
886 // TODO: check whether CurReg is already masked by EXEC
887 if (PrevConstant && PrevVal) {
888 CurMaskedReg = CurReg;
889 } else {
890 CurMaskedReg = createLaneMaskReg(MRI, LaneMaskRegAttrs);
891 BuildMI(MBB, I, DL, TII->get(AndOp), CurMaskedReg)
892 .addReg(CurReg)
893 .addReg(ExecReg);
894 }
895 }
896
897 if (PrevConstant && !PrevVal) {
898 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg)
899 .addReg(CurMaskedReg);
900 } else if (CurConstant && !CurVal) {
901 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg)
902 .addReg(PrevMaskedReg);
903 } else if (PrevConstant && PrevVal) {
904 BuildMI(MBB, I, DL, TII->get(OrN2Op), DstReg)
905 .addReg(CurMaskedReg)
906 .addReg(ExecReg);
907 } else {
908 BuildMI(MBB, I, DL, TII->get(OrOp), DstReg)
909 .addReg(PrevMaskedReg)
910 .addReg(CurMaskedReg ? CurMaskedReg : ExecReg);
911 }
912 }
913
constrainIncomingRegisterTakenAsIs(Incoming & In)914 void Vreg1LoweringHelper::constrainIncomingRegisterTakenAsIs(Incoming &In) {
915 return;
916 }
917