1 //===-- SIShrinkInstructions.cpp - Shrink Instructions --------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 /// The pass tries to use the 32-bit encoding for instructions when possible.
8 //===----------------------------------------------------------------------===//
9 //
10 
11 #include "AMDGPU.h"
12 #include "AMDGPUSubtarget.h"
13 #include "SIInstrInfo.h"
14 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
15 #include "llvm/ADT/Statistic.h"
16 #include "llvm/CodeGen/MachineFunctionPass.h"
17 #include "llvm/CodeGen/MachineInstrBuilder.h"
18 #include "llvm/CodeGen/MachineRegisterInfo.h"
19 #include "llvm/IR/Constants.h"
20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/LLVMContext.h"
22 #include "llvm/Support/Debug.h"
23 #include "llvm/Support/raw_ostream.h"
24 #include "llvm/Target/TargetMachine.h"
25 
26 #define DEBUG_TYPE "si-shrink-instructions"
27 
28 STATISTIC(NumInstructionsShrunk,
29           "Number of 64-bit instruction reduced to 32-bit.");
30 STATISTIC(NumLiteralConstantsFolded,
31           "Number of literal constants folded into 32-bit instructions.");
32 
33 using namespace llvm;
34 
35 namespace {
36 
37 class SIShrinkInstructions : public MachineFunctionPass {
38 public:
39   static char ID;
40 
41   void shrinkMIMG(MachineInstr &MI);
42 
43 public:
44   SIShrinkInstructions() : MachineFunctionPass(ID) {
45   }
46 
47   bool runOnMachineFunction(MachineFunction &MF) override;
48 
49   StringRef getPassName() const override { return "SI Shrink Instructions"; }
50 
51   void getAnalysisUsage(AnalysisUsage &AU) const override {
52     AU.setPreservesCFG();
53     MachineFunctionPass::getAnalysisUsage(AU);
54   }
55 };
56 
57 } // End anonymous namespace.
58 
59 INITIALIZE_PASS(SIShrinkInstructions, DEBUG_TYPE,
60                 "SI Shrink Instructions", false, false)
61 
62 char SIShrinkInstructions::ID = 0;
63 
64 FunctionPass *llvm::createSIShrinkInstructionsPass() {
65   return new SIShrinkInstructions();
66 }
67 
68 /// This function checks \p MI for operands defined by a move immediate
69 /// instruction and then folds the literal constant into the instruction if it
70 /// can. This function assumes that \p MI is a VOP1, VOP2, or VOPC instructions.
71 static bool foldImmediates(MachineInstr &MI, const SIInstrInfo *TII,
72                            MachineRegisterInfo &MRI, bool TryToCommute = true) {
73   assert(TII->isVOP1(MI) || TII->isVOP2(MI) || TII->isVOPC(MI));
74 
75   int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
76 
77   // Try to fold Src0
78   MachineOperand &Src0 = MI.getOperand(Src0Idx);
79   if (Src0.isReg()) {
80     Register Reg = Src0.getReg();
81     if (Register::isVirtualRegister(Reg) && MRI.hasOneUse(Reg)) {
82       MachineInstr *Def = MRI.getUniqueVRegDef(Reg);
83       if (Def && Def->isMoveImmediate()) {
84         MachineOperand &MovSrc = Def->getOperand(1);
85         bool ConstantFolded = false;
86 
87         if (MovSrc.isImm() && (isInt<32>(MovSrc.getImm()) ||
88                                isUInt<32>(MovSrc.getImm()))) {
89           // It's possible to have only one component of a super-reg defined by
90           // a single mov, so we need to clear any subregister flag.
91           Src0.setSubReg(0);
92           Src0.ChangeToImmediate(MovSrc.getImm());
93           ConstantFolded = true;
94         } else if (MovSrc.isFI()) {
95           Src0.setSubReg(0);
96           Src0.ChangeToFrameIndex(MovSrc.getIndex());
97           ConstantFolded = true;
98         } else if (MovSrc.isGlobal()) {
99           Src0.ChangeToGA(MovSrc.getGlobal(), MovSrc.getOffset(),
100                           MovSrc.getTargetFlags());
101           ConstantFolded = true;
102         }
103 
104         if (ConstantFolded) {
105           assert(MRI.use_empty(Reg));
106           Def->eraseFromParent();
107           ++NumLiteralConstantsFolded;
108           return true;
109         }
110       }
111     }
112   }
113 
114   // We have failed to fold src0, so commute the instruction and try again.
115   if (TryToCommute && MI.isCommutable()) {
116     if (TII->commuteInstruction(MI)) {
117       if (foldImmediates(MI, TII, MRI, false))
118         return true;
119 
120       // Commute back.
121       TII->commuteInstruction(MI);
122     }
123   }
124 
125   return false;
126 }
127 
128 static bool isKImmOperand(const SIInstrInfo *TII, const MachineOperand &Src) {
129   return isInt<16>(Src.getImm()) &&
130     !TII->isInlineConstant(*Src.getParent(),
131                            Src.getParent()->getOperandNo(&Src));
132 }
133 
134 static bool isKUImmOperand(const SIInstrInfo *TII, const MachineOperand &Src) {
135   return isUInt<16>(Src.getImm()) &&
136     !TII->isInlineConstant(*Src.getParent(),
137                            Src.getParent()->getOperandNo(&Src));
138 }
139 
140 static bool isKImmOrKUImmOperand(const SIInstrInfo *TII,
141                                  const MachineOperand &Src,
142                                  bool &IsUnsigned) {
143   if (isInt<16>(Src.getImm())) {
144     IsUnsigned = false;
145     return !TII->isInlineConstant(Src);
146   }
147 
148   if (isUInt<16>(Src.getImm())) {
149     IsUnsigned = true;
150     return !TII->isInlineConstant(Src);
151   }
152 
153   return false;
154 }
155 
156 /// \returns true if the constant in \p Src should be replaced with a bitreverse
157 /// of an inline immediate.
158 static bool isReverseInlineImm(const SIInstrInfo *TII,
159                                const MachineOperand &Src,
160                                int32_t &ReverseImm) {
161   if (!isInt<32>(Src.getImm()) || TII->isInlineConstant(Src))
162     return false;
163 
164   ReverseImm = reverseBits<int32_t>(static_cast<int32_t>(Src.getImm()));
165   return ReverseImm >= -16 && ReverseImm <= 64;
166 }
167 
168 /// Copy implicit register operands from specified instruction to this
169 /// instruction that are not part of the instruction definition.
170 static void copyExtraImplicitOps(MachineInstr &NewMI, MachineFunction &MF,
171                                  const MachineInstr &MI) {
172   for (unsigned i = MI.getDesc().getNumOperands() +
173          MI.getDesc().getNumImplicitUses() +
174          MI.getDesc().getNumImplicitDefs(), e = MI.getNumOperands();
175        i != e; ++i) {
176     const MachineOperand &MO = MI.getOperand(i);
177     if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
178       NewMI.addOperand(MF, MO);
179   }
180 }
181 
182 static void shrinkScalarCompare(const SIInstrInfo *TII, MachineInstr &MI) {
183   // cmpk instructions do scc = dst <cc op> imm16, so commute the instruction to
184   // get constants on the RHS.
185   if (!MI.getOperand(0).isReg())
186     TII->commuteInstruction(MI, false, 0, 1);
187 
188   // cmpk requires src0 to be a register
189   const MachineOperand &Src0 = MI.getOperand(0);
190   if (!Src0.isReg())
191     return;
192 
193   const MachineOperand &Src1 = MI.getOperand(1);
194   if (!Src1.isImm())
195     return;
196 
197   int SOPKOpc = AMDGPU::getSOPKOp(MI.getOpcode());
198   if (SOPKOpc == -1)
199     return;
200 
201   // eq/ne is special because the imm16 can be treated as signed or unsigned,
202   // and initially selectd to the unsigned versions.
203   if (SOPKOpc == AMDGPU::S_CMPK_EQ_U32 || SOPKOpc == AMDGPU::S_CMPK_LG_U32) {
204     bool HasUImm;
205     if (isKImmOrKUImmOperand(TII, Src1, HasUImm)) {
206       if (!HasUImm) {
207         SOPKOpc = (SOPKOpc == AMDGPU::S_CMPK_EQ_U32) ?
208           AMDGPU::S_CMPK_EQ_I32 : AMDGPU::S_CMPK_LG_I32;
209       }
210 
211       MI.setDesc(TII->get(SOPKOpc));
212     }
213 
214     return;
215   }
216 
217   const MCInstrDesc &NewDesc = TII->get(SOPKOpc);
218 
219   if ((TII->sopkIsZext(SOPKOpc) && isKUImmOperand(TII, Src1)) ||
220       (!TII->sopkIsZext(SOPKOpc) && isKImmOperand(TII, Src1))) {
221     MI.setDesc(NewDesc);
222   }
223 }
224 
225 // Shrink NSA encoded instructions with contiguous VGPRs to non-NSA encoding.
226 void SIShrinkInstructions::shrinkMIMG(MachineInstr &MI) {
227   const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(MI.getOpcode());
228   if (!Info || Info->MIMGEncoding != AMDGPU::MIMGEncGfx10NSA)
229     return;
230 
231   MachineFunction *MF = MI.getParent()->getParent();
232   const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
233   const SIInstrInfo *TII = ST.getInstrInfo();
234   const SIRegisterInfo &TRI = TII->getRegisterInfo();
235   int VAddr0Idx =
236       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0);
237   unsigned NewAddrDwords = Info->VAddrDwords;
238   const TargetRegisterClass *RC;
239 
240   if (Info->VAddrDwords == 2) {
241     RC = &AMDGPU::VReg_64RegClass;
242   } else if (Info->VAddrDwords == 3) {
243     RC = &AMDGPU::VReg_96RegClass;
244   } else if (Info->VAddrDwords == 4) {
245     RC = &AMDGPU::VReg_128RegClass;
246   } else if (Info->VAddrDwords <= 8) {
247     RC = &AMDGPU::VReg_256RegClass;
248     NewAddrDwords = 8;
249   } else {
250     RC = &AMDGPU::VReg_512RegClass;
251     NewAddrDwords = 16;
252   }
253 
254   unsigned VgprBase = 0;
255   bool IsUndef = true;
256   bool IsKill = NewAddrDwords == Info->VAddrDwords;
257   for (unsigned i = 0; i < Info->VAddrDwords; ++i) {
258     const MachineOperand &Op = MI.getOperand(VAddr0Idx + i);
259     unsigned Vgpr = TRI.getHWRegIndex(Op.getReg());
260 
261     if (i == 0) {
262       VgprBase = Vgpr;
263     } else if (VgprBase + i != Vgpr)
264       return;
265 
266     if (!Op.isUndef())
267       IsUndef = false;
268     if (!Op.isKill())
269       IsKill = false;
270   }
271 
272   if (VgprBase + NewAddrDwords > 256)
273     return;
274 
275   // Further check for implicit tied operands - this may be present if TFE is
276   // enabled
277   int TFEIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::tfe);
278   int LWEIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::lwe);
279   unsigned TFEVal = MI.getOperand(TFEIdx).getImm();
280   unsigned LWEVal = MI.getOperand(LWEIdx).getImm();
281   int ToUntie = -1;
282   if (TFEVal || LWEVal) {
283     // TFE/LWE is enabled so we need to deal with an implicit tied operand
284     for (unsigned i = LWEIdx + 1, e = MI.getNumOperands(); i != e; ++i) {
285       if (MI.getOperand(i).isReg() && MI.getOperand(i).isTied() &&
286           MI.getOperand(i).isImplicit()) {
287         // This is the tied operand
288         assert(
289             ToUntie == -1 &&
290             "found more than one tied implicit operand when expecting only 1");
291         ToUntie = i;
292         MI.untieRegOperand(ToUntie);
293       }
294     }
295   }
296 
297   unsigned NewOpcode =
298       AMDGPU::getMIMGOpcode(Info->BaseOpcode, AMDGPU::MIMGEncGfx10Default,
299                             Info->VDataDwords, NewAddrDwords);
300   MI.setDesc(TII->get(NewOpcode));
301   MI.getOperand(VAddr0Idx).setReg(RC->getRegister(VgprBase));
302   MI.getOperand(VAddr0Idx).setIsUndef(IsUndef);
303   MI.getOperand(VAddr0Idx).setIsKill(IsKill);
304 
305   for (unsigned i = 1; i < Info->VAddrDwords; ++i)
306     MI.RemoveOperand(VAddr0Idx + 1);
307 
308   if (ToUntie >= 0) {
309     MI.tieOperands(
310         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata),
311         ToUntie - (Info->VAddrDwords - 1));
312   }
313 }
314 
315 /// Attempt to shink AND/OR/XOR operations requiring non-inlineable literals.
316 /// For AND or OR, try using S_BITSET{0,1} to clear or set bits.
317 /// If the inverse of the immediate is legal, use ANDN2, ORN2 or
318 /// XNOR (as a ^ b == ~(a ^ ~b)).
319 /// \returns true if the caller should continue the machine function iterator
320 static bool shrinkScalarLogicOp(const GCNSubtarget &ST,
321                                 MachineRegisterInfo &MRI,
322                                 const SIInstrInfo *TII,
323                                 MachineInstr &MI) {
324   unsigned Opc = MI.getOpcode();
325   const MachineOperand *Dest = &MI.getOperand(0);
326   MachineOperand *Src0 = &MI.getOperand(1);
327   MachineOperand *Src1 = &MI.getOperand(2);
328   MachineOperand *SrcReg = Src0;
329   MachineOperand *SrcImm = Src1;
330 
331   if (!SrcImm->isImm() ||
332       AMDGPU::isInlinableLiteral32(SrcImm->getImm(), ST.hasInv2PiInlineImm()))
333     return false;
334 
335   uint32_t Imm = static_cast<uint32_t>(SrcImm->getImm());
336   uint32_t NewImm = 0;
337 
338   if (Opc == AMDGPU::S_AND_B32) {
339     if (isPowerOf2_32(~Imm)) {
340       NewImm = countTrailingOnes(Imm);
341       Opc = AMDGPU::S_BITSET0_B32;
342     } else if (AMDGPU::isInlinableLiteral32(~Imm, ST.hasInv2PiInlineImm())) {
343       NewImm = ~Imm;
344       Opc = AMDGPU::S_ANDN2_B32;
345     }
346   } else if (Opc == AMDGPU::S_OR_B32) {
347     if (isPowerOf2_32(Imm)) {
348       NewImm = countTrailingZeros(Imm);
349       Opc = AMDGPU::S_BITSET1_B32;
350     } else if (AMDGPU::isInlinableLiteral32(~Imm, ST.hasInv2PiInlineImm())) {
351       NewImm = ~Imm;
352       Opc = AMDGPU::S_ORN2_B32;
353     }
354   } else if (Opc == AMDGPU::S_XOR_B32) {
355     if (AMDGPU::isInlinableLiteral32(~Imm, ST.hasInv2PiInlineImm())) {
356       NewImm = ~Imm;
357       Opc = AMDGPU::S_XNOR_B32;
358     }
359   } else {
360     llvm_unreachable("unexpected opcode");
361   }
362 
363   if ((Opc == AMDGPU::S_ANDN2_B32 || Opc == AMDGPU::S_ORN2_B32) &&
364       SrcImm == Src0) {
365     if (!TII->commuteInstruction(MI, false, 1, 2))
366       NewImm = 0;
367   }
368 
369   if (NewImm != 0) {
370     if (Register::isVirtualRegister(Dest->getReg()) && SrcReg->isReg()) {
371       MRI.setRegAllocationHint(Dest->getReg(), 0, SrcReg->getReg());
372       MRI.setRegAllocationHint(SrcReg->getReg(), 0, Dest->getReg());
373       return true;
374     }
375 
376     if (SrcReg->isReg() && SrcReg->getReg() == Dest->getReg()) {
377       MI.setDesc(TII->get(Opc));
378       if (Opc == AMDGPU::S_BITSET0_B32 ||
379           Opc == AMDGPU::S_BITSET1_B32) {
380         Src0->ChangeToImmediate(NewImm);
381         // Remove the immediate and add the tied input.
382         MI.getOperand(2).ChangeToRegister(Dest->getReg(), false);
383         MI.tieOperands(0, 2);
384       } else {
385         SrcImm->setImm(NewImm);
386       }
387     }
388   }
389 
390   return false;
391 }
392 
393 // This is the same as MachineInstr::readsRegister/modifiesRegister except
394 // it takes subregs into account.
395 static bool instAccessReg(iterator_range<MachineInstr::const_mop_iterator> &&R,
396                           unsigned Reg, unsigned SubReg,
397                           const SIRegisterInfo &TRI) {
398   for (const MachineOperand &MO : R) {
399     if (!MO.isReg())
400       continue;
401 
402     if (Register::isPhysicalRegister(Reg) &&
403         Register::isPhysicalRegister(MO.getReg())) {
404       if (TRI.regsOverlap(Reg, MO.getReg()))
405         return true;
406     } else if (MO.getReg() == Reg && Register::isVirtualRegister(Reg)) {
407       LaneBitmask Overlap = TRI.getSubRegIndexLaneMask(SubReg) &
408                             TRI.getSubRegIndexLaneMask(MO.getSubReg());
409       if (Overlap.any())
410         return true;
411     }
412   }
413   return false;
414 }
415 
416 static bool instReadsReg(const MachineInstr *MI,
417                          unsigned Reg, unsigned SubReg,
418                          const SIRegisterInfo &TRI) {
419   return instAccessReg(MI->uses(), Reg, SubReg, TRI);
420 }
421 
422 static bool instModifiesReg(const MachineInstr *MI,
423                             unsigned Reg, unsigned SubReg,
424                             const SIRegisterInfo &TRI) {
425   return instAccessReg(MI->defs(), Reg, SubReg, TRI);
426 }
427 
428 static TargetInstrInfo::RegSubRegPair
429 getSubRegForIndex(unsigned Reg, unsigned Sub, unsigned I,
430                   const SIRegisterInfo &TRI, const MachineRegisterInfo &MRI) {
431   if (TRI.getRegSizeInBits(Reg, MRI) != 32) {
432     if (Register::isPhysicalRegister(Reg)) {
433       Reg = TRI.getSubReg(Reg, TRI.getSubRegFromChannel(I));
434     } else {
435       Sub = TRI.getSubRegFromChannel(I + TRI.getChannelFromSubReg(Sub));
436     }
437   }
438   return TargetInstrInfo::RegSubRegPair(Reg, Sub);
439 }
440 
441 // Match:
442 // mov t, x
443 // mov x, y
444 // mov y, t
445 //
446 // =>
447 //
448 // mov t, x (t is potentially dead and move eliminated)
449 // v_swap_b32 x, y
450 //
451 // Returns next valid instruction pointer if was able to create v_swap_b32.
452 //
453 // This shall not be done too early not to prevent possible folding which may
454 // remove matched moves, and this should prefereably be done before RA to
455 // release saved registers and also possibly after RA which can insert copies
456 // too.
457 //
458 // This is really just a generic peephole that is not a canocical shrinking,
459 // although requirements match the pass placement and it reduces code size too.
460 static MachineInstr* matchSwap(MachineInstr &MovT, MachineRegisterInfo &MRI,
461                                const SIInstrInfo *TII) {
462   assert(MovT.getOpcode() == AMDGPU::V_MOV_B32_e32 ||
463          MovT.getOpcode() == AMDGPU::COPY);
464 
465   Register T = MovT.getOperand(0).getReg();
466   unsigned Tsub = MovT.getOperand(0).getSubReg();
467   MachineOperand &Xop = MovT.getOperand(1);
468 
469   if (!Xop.isReg())
470     return nullptr;
471   Register X = Xop.getReg();
472   unsigned Xsub = Xop.getSubReg();
473 
474   unsigned Size = TII->getOpSize(MovT, 0) / 4;
475 
476   const SIRegisterInfo &TRI = TII->getRegisterInfo();
477   if (!TRI.isVGPR(MRI, X))
478     return nullptr;
479 
480   const unsigned SearchLimit = 16;
481   unsigned Count = 0;
482   for (auto Iter = std::next(MovT.getIterator()),
483             E = MovT.getParent()->instr_end();
484        Iter != E && Count < SearchLimit; ++Iter, ++Count) {
485 
486     MachineInstr *MovY = &*Iter;
487     if ((MovY->getOpcode() != AMDGPU::V_MOV_B32_e32 &&
488          MovY->getOpcode() != AMDGPU::COPY) ||
489         !MovY->getOperand(1).isReg()        ||
490         MovY->getOperand(1).getReg() != T   ||
491         MovY->getOperand(1).getSubReg() != Tsub)
492       continue;
493 
494     Register Y = MovY->getOperand(0).getReg();
495     unsigned Ysub = MovY->getOperand(0).getSubReg();
496 
497     if (!TRI.isVGPR(MRI, Y))
498       continue;
499 
500     MachineInstr *MovX = nullptr;
501     for (auto IY = MovY->getIterator(), I = std::next(MovT.getIterator());
502          I != IY; ++I) {
503       if (instReadsReg(&*I, X, Xsub, TRI)    ||
504           instModifiesReg(&*I, Y, Ysub, TRI) ||
505           instModifiesReg(&*I, T, Tsub, TRI) ||
506           (MovX && instModifiesReg(&*I, X, Xsub, TRI))) {
507         MovX = nullptr;
508         break;
509       }
510       if (!instReadsReg(&*I, Y, Ysub, TRI)) {
511         if (!MovX && instModifiesReg(&*I, X, Xsub, TRI)) {
512           MovX = nullptr;
513           break;
514         }
515         continue;
516       }
517       if (MovX ||
518           (I->getOpcode() != AMDGPU::V_MOV_B32_e32 &&
519            I->getOpcode() != AMDGPU::COPY) ||
520           I->getOperand(0).getReg() != X ||
521           I->getOperand(0).getSubReg() != Xsub) {
522         MovX = nullptr;
523         break;
524       }
525       MovX = &*I;
526     }
527 
528     if (!MovX)
529       continue;
530 
531     LLVM_DEBUG(dbgs() << "Matched v_swap_b32:\n" << MovT << *MovX << MovY);
532 
533     for (unsigned I = 0; I < Size; ++I) {
534       TargetInstrInfo::RegSubRegPair X1, Y1;
535       X1 = getSubRegForIndex(X, Xsub, I, TRI, MRI);
536       Y1 = getSubRegForIndex(Y, Ysub, I, TRI, MRI);
537       BuildMI(*MovT.getParent(), MovX->getIterator(), MovT.getDebugLoc(),
538                 TII->get(AMDGPU::V_SWAP_B32))
539         .addDef(X1.Reg, 0, X1.SubReg)
540         .addDef(Y1.Reg, 0, Y1.SubReg)
541         .addReg(Y1.Reg, 0, Y1.SubReg)
542         .addReg(X1.Reg, 0, X1.SubReg).getInstr();
543     }
544     MovX->eraseFromParent();
545     MovY->eraseFromParent();
546     MachineInstr *Next = &*std::next(MovT.getIterator());
547     if (MRI.use_nodbg_empty(T))
548       MovT.eraseFromParent();
549     else
550       Xop.setIsKill(false);
551 
552     return Next;
553   }
554 
555   return nullptr;
556 }
557 
558 bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
559   if (skipFunction(MF.getFunction()))
560     return false;
561 
562   MachineRegisterInfo &MRI = MF.getRegInfo();
563   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
564   const SIInstrInfo *TII = ST.getInstrInfo();
565   unsigned VCCReg = ST.isWave32() ? AMDGPU::VCC_LO : AMDGPU::VCC;
566 
567   std::vector<unsigned> I1Defs;
568 
569   for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
570                                                   BI != BE; ++BI) {
571 
572     MachineBasicBlock &MBB = *BI;
573     MachineBasicBlock::iterator I, Next;
574     for (I = MBB.begin(); I != MBB.end(); I = Next) {
575       Next = std::next(I);
576       MachineInstr &MI = *I;
577 
578       if (MI.getOpcode() == AMDGPU::V_MOV_B32_e32) {
579         // If this has a literal constant source that is the same as the
580         // reversed bits of an inline immediate, replace with a bitreverse of
581         // that constant. This saves 4 bytes in the common case of materializing
582         // sign bits.
583 
584         // Test if we are after regalloc. We only want to do this after any
585         // optimizations happen because this will confuse them.
586         // XXX - not exactly a check for post-regalloc run.
587         MachineOperand &Src = MI.getOperand(1);
588         if (Src.isImm() &&
589             Register::isPhysicalRegister(MI.getOperand(0).getReg())) {
590           int32_t ReverseImm;
591           if (isReverseInlineImm(TII, Src, ReverseImm)) {
592             MI.setDesc(TII->get(AMDGPU::V_BFREV_B32_e32));
593             Src.setImm(ReverseImm);
594             continue;
595           }
596         }
597       }
598 
599       if (ST.hasSwap() && (MI.getOpcode() == AMDGPU::V_MOV_B32_e32 ||
600                            MI.getOpcode() == AMDGPU::COPY)) {
601         if (auto *NextMI = matchSwap(MI, MRI, TII)) {
602           Next = NextMI->getIterator();
603           continue;
604         }
605       }
606 
607       // Combine adjacent s_nops to use the immediate operand encoding how long
608       // to wait.
609       //
610       // s_nop N
611       // s_nop M
612       //  =>
613       // s_nop (N + M)
614       if (MI.getOpcode() == AMDGPU::S_NOP &&
615           MI.getNumOperands() == 1 && // Don't merge with implicit operands
616           Next != MBB.end() &&
617           (*Next).getOpcode() == AMDGPU::S_NOP &&
618           (*Next).getNumOperands() == 1) {
619 
620         MachineInstr &NextMI = *Next;
621         // The instruction encodes the amount to wait with an offset of 1,
622         // i.e. 0 is wait 1 cycle. Convert both to cycles and then convert back
623         // after adding.
624         uint8_t Nop0 = MI.getOperand(0).getImm() + 1;
625         uint8_t Nop1 = NextMI.getOperand(0).getImm() + 1;
626 
627         // Make sure we don't overflow the bounds.
628         if (Nop0 + Nop1 <= 8) {
629           NextMI.getOperand(0).setImm(Nop0 + Nop1 - 1);
630           MI.eraseFromParent();
631         }
632 
633         continue;
634       }
635 
636       // FIXME: We also need to consider movs of constant operands since
637       // immediate operands are not folded if they have more than one use, and
638       // the operand folding pass is unaware if the immediate will be free since
639       // it won't know if the src == dest constraint will end up being
640       // satisfied.
641       if (MI.getOpcode() == AMDGPU::S_ADD_I32 ||
642           MI.getOpcode() == AMDGPU::S_MUL_I32) {
643         const MachineOperand *Dest = &MI.getOperand(0);
644         MachineOperand *Src0 = &MI.getOperand(1);
645         MachineOperand *Src1 = &MI.getOperand(2);
646 
647         if (!Src0->isReg() && Src1->isReg()) {
648           if (TII->commuteInstruction(MI, false, 1, 2))
649             std::swap(Src0, Src1);
650         }
651 
652         // FIXME: This could work better if hints worked with subregisters. If
653         // we have a vector add of a constant, we usually don't get the correct
654         // allocation due to the subregister usage.
655         if (Register::isVirtualRegister(Dest->getReg()) && Src0->isReg()) {
656           MRI.setRegAllocationHint(Dest->getReg(), 0, Src0->getReg());
657           MRI.setRegAllocationHint(Src0->getReg(), 0, Dest->getReg());
658           continue;
659         }
660 
661         if (Src0->isReg() && Src0->getReg() == Dest->getReg()) {
662           if (Src1->isImm() && isKImmOperand(TII, *Src1)) {
663             unsigned Opc = (MI.getOpcode() == AMDGPU::S_ADD_I32) ?
664               AMDGPU::S_ADDK_I32 : AMDGPU::S_MULK_I32;
665 
666             MI.setDesc(TII->get(Opc));
667             MI.tieOperands(0, 1);
668           }
669         }
670       }
671 
672       // Try to use s_cmpk_*
673       if (MI.isCompare() && TII->isSOPC(MI)) {
674         shrinkScalarCompare(TII, MI);
675         continue;
676       }
677 
678       // Try to use S_MOVK_I32, which will save 4 bytes for small immediates.
679       if (MI.getOpcode() == AMDGPU::S_MOV_B32) {
680         const MachineOperand &Dst = MI.getOperand(0);
681         MachineOperand &Src = MI.getOperand(1);
682 
683         if (Src.isImm() && Register::isPhysicalRegister(Dst.getReg())) {
684           int32_t ReverseImm;
685           if (isKImmOperand(TII, Src))
686             MI.setDesc(TII->get(AMDGPU::S_MOVK_I32));
687           else if (isReverseInlineImm(TII, Src, ReverseImm)) {
688             MI.setDesc(TII->get(AMDGPU::S_BREV_B32));
689             Src.setImm(ReverseImm);
690           }
691         }
692 
693         continue;
694       }
695 
696       // Shrink scalar logic operations.
697       if (MI.getOpcode() == AMDGPU::S_AND_B32 ||
698           MI.getOpcode() == AMDGPU::S_OR_B32 ||
699           MI.getOpcode() == AMDGPU::S_XOR_B32) {
700         if (shrinkScalarLogicOp(ST, MRI, TII, MI))
701           continue;
702       }
703 
704       if (TII->isMIMG(MI.getOpcode()) &&
705           ST.getGeneration() >= AMDGPUSubtarget::GFX10 &&
706           MF.getProperties().hasProperty(
707               MachineFunctionProperties::Property::NoVRegs)) {
708         shrinkMIMG(MI);
709         continue;
710       }
711 
712       if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
713         continue;
714 
715       if (!TII->canShrink(MI, MRI)) {
716         // Try commuting the instruction and see if that enables us to shrink
717         // it.
718         if (!MI.isCommutable() || !TII->commuteInstruction(MI) ||
719             !TII->canShrink(MI, MRI))
720           continue;
721       }
722 
723       // getVOPe32 could be -1 here if we started with an instruction that had
724       // a 32-bit encoding and then commuted it to an instruction that did not.
725       if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
726         continue;
727 
728       int Op32 = AMDGPU::getVOPe32(MI.getOpcode());
729 
730       if (TII->isVOPC(Op32)) {
731         Register DstReg = MI.getOperand(0).getReg();
732         if (Register::isVirtualRegister(DstReg)) {
733           // VOPC instructions can only write to the VCC register. We can't
734           // force them to use VCC here, because this is only one register and
735           // cannot deal with sequences which would require multiple copies of
736           // VCC, e.g. S_AND_B64 (vcc = V_CMP_...), (vcc = V_CMP_...)
737           //
738           // So, instead of forcing the instruction to write to VCC, we provide
739           // a hint to the register allocator to use VCC and then we will run
740           // this pass again after RA and shrink it if it outputs to VCC.
741           MRI.setRegAllocationHint(MI.getOperand(0).getReg(), 0, VCCReg);
742           continue;
743         }
744         if (DstReg != VCCReg)
745           continue;
746       }
747 
748       if (Op32 == AMDGPU::V_CNDMASK_B32_e32) {
749         // We shrink V_CNDMASK_B32_e64 using regalloc hints like we do for VOPC
750         // instructions.
751         const MachineOperand *Src2 =
752             TII->getNamedOperand(MI, AMDGPU::OpName::src2);
753         if (!Src2->isReg())
754           continue;
755         Register SReg = Src2->getReg();
756         if (Register::isVirtualRegister(SReg)) {
757           MRI.setRegAllocationHint(SReg, 0, VCCReg);
758           continue;
759         }
760         if (SReg != VCCReg)
761           continue;
762       }
763 
764       // Check for the bool flag output for instructions like V_ADD_I32_e64.
765       const MachineOperand *SDst = TII->getNamedOperand(MI,
766                                                         AMDGPU::OpName::sdst);
767 
768       // Check the carry-in operand for v_addc_u32_e64.
769       const MachineOperand *Src2 = TII->getNamedOperand(MI,
770                                                         AMDGPU::OpName::src2);
771 
772       if (SDst) {
773         bool Next = false;
774 
775         if (SDst->getReg() != VCCReg) {
776           if (Register::isVirtualRegister(SDst->getReg()))
777             MRI.setRegAllocationHint(SDst->getReg(), 0, VCCReg);
778           Next = true;
779         }
780 
781         // All of the instructions with carry outs also have an SGPR input in
782         // src2.
783         if (Src2 && Src2->getReg() != VCCReg) {
784           if (Register::isVirtualRegister(Src2->getReg()))
785             MRI.setRegAllocationHint(Src2->getReg(), 0, VCCReg);
786           Next = true;
787         }
788 
789         if (Next)
790           continue;
791       }
792 
793       // We can shrink this instruction
794       LLVM_DEBUG(dbgs() << "Shrinking " << MI);
795 
796       MachineInstr *Inst32 = TII->buildShrunkInst(MI, Op32);
797       ++NumInstructionsShrunk;
798 
799       // Copy extra operands not present in the instruction definition.
800       copyExtraImplicitOps(*Inst32, MF, MI);
801 
802       MI.eraseFromParent();
803       foldImmediates(*Inst32, TII, MRI);
804 
805       LLVM_DEBUG(dbgs() << "e32 MI = " << *Inst32 << '\n');
806     }
807   }
808   return false;
809 }
810