1 //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 /// \file
8 //===----------------------------------------------------------------------===//
9 //
10 
11 #include "AMDGPU.h"
12 #include "GCNSubtarget.h"
13 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
14 #include "SIMachineFunctionInfo.h"
15 #include "llvm/ADT/DepthFirstIterator.h"
16 #include "llvm/CodeGen/MachineFunctionPass.h"
17 
18 #define DEBUG_TYPE "si-fold-operands"
19 using namespace llvm;
20 
21 namespace {
22 
23 struct FoldCandidate {
24   MachineInstr *UseMI;
25   union {
26     MachineOperand *OpToFold;
27     uint64_t ImmToFold;
28     int FrameIndexToFold;
29   };
30   int ShrinkOpcode;
31   unsigned UseOpNo;
32   MachineOperand::MachineOperandType Kind;
33   bool Commuted;
34 
35   FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp,
36                 bool Commuted_ = false,
37                 int ShrinkOp = -1) :
38     UseMI(MI), OpToFold(nullptr), ShrinkOpcode(ShrinkOp), UseOpNo(OpNo),
39     Kind(FoldOp->getType()),
40     Commuted(Commuted_) {
41     if (FoldOp->isImm()) {
42       ImmToFold = FoldOp->getImm();
43     } else if (FoldOp->isFI()) {
44       FrameIndexToFold = FoldOp->getIndex();
45     } else {
46       assert(FoldOp->isReg() || FoldOp->isGlobal());
47       OpToFold = FoldOp;
48     }
49   }
50 
51   bool isFI() const {
52     return Kind == MachineOperand::MO_FrameIndex;
53   }
54 
55   bool isImm() const {
56     return Kind == MachineOperand::MO_Immediate;
57   }
58 
59   bool isReg() const {
60     return Kind == MachineOperand::MO_Register;
61   }
62 
63   bool isGlobal() const { return Kind == MachineOperand::MO_GlobalAddress; }
64 
65   bool isCommuted() const {
66     return Commuted;
67   }
68 
69   bool needsShrink() const {
70     return ShrinkOpcode != -1;
71   }
72 
73   int getShrinkOpcode() const {
74     return ShrinkOpcode;
75   }
76 };
77 
78 class SIFoldOperands : public MachineFunctionPass {
79 public:
80   static char ID;
81   MachineRegisterInfo *MRI;
82   const SIInstrInfo *TII;
83   const SIRegisterInfo *TRI;
84   const GCNSubtarget *ST;
85   const SIMachineFunctionInfo *MFI;
86 
87   void foldOperand(MachineOperand &OpToFold,
88                    MachineInstr *UseMI,
89                    int UseOpIdx,
90                    SmallVectorImpl<FoldCandidate> &FoldList,
91                    SmallVectorImpl<MachineInstr *> &CopiesToReplace) const;
92 
93   bool tryFoldCndMask(MachineInstr &MI) const;
94   bool tryFoldZeroHighBits(MachineInstr &MI) const;
95   bool foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const;
96 
97   const MachineOperand *isClamp(const MachineInstr &MI) const;
98   bool tryFoldClamp(MachineInstr &MI);
99 
100   std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const;
101   bool tryFoldOMod(MachineInstr &MI);
102   bool tryFoldRegSequence(MachineInstr &MI);
103   bool tryFoldLCSSAPhi(MachineInstr &MI);
104   bool tryFoldLoad(MachineInstr &MI);
105 
106 public:
107   SIFoldOperands() : MachineFunctionPass(ID) {
108     initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
109   }
110 
111   bool runOnMachineFunction(MachineFunction &MF) override;
112 
113   StringRef getPassName() const override { return "SI Fold Operands"; }
114 
115   void getAnalysisUsage(AnalysisUsage &AU) const override {
116     AU.setPreservesCFG();
117     MachineFunctionPass::getAnalysisUsage(AU);
118   }
119 };
120 
121 } // End anonymous namespace.
122 
123 INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
124                 "SI Fold Operands", false, false)
125 
126 char SIFoldOperands::ID = 0;
127 
128 char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
129 
130 // Map multiply-accumulate opcode to corresponding multiply-add opcode if any.
131 static unsigned macToMad(unsigned Opc) {
132   switch (Opc) {
133   case AMDGPU::V_MAC_F32_e64:
134     return AMDGPU::V_MAD_F32_e64;
135   case AMDGPU::V_MAC_F16_e64:
136     return AMDGPU::V_MAD_F16_e64;
137   case AMDGPU::V_FMAC_F32_e64:
138     return AMDGPU::V_FMA_F32_e64;
139   case AMDGPU::V_FMAC_F16_e64:
140     return AMDGPU::V_FMA_F16_gfx9_e64;
141   case AMDGPU::V_FMAC_LEGACY_F32_e64:
142     return AMDGPU::V_FMA_LEGACY_F32_e64;
143   case AMDGPU::V_FMAC_F64_e64:
144     return AMDGPU::V_FMA_F64_e64;
145   }
146   return AMDGPU::INSTRUCTION_LIST_END;
147 }
148 
149 // TODO: Add heuristic that the frame index might not fit in the addressing mode
150 // immediate offset to avoid materializing in loops.
151 static bool frameIndexMayFold(const SIInstrInfo *TII,
152                               const MachineInstr &UseMI,
153                               int OpNo,
154                               const MachineOperand &OpToFold) {
155   if (!OpToFold.isFI())
156     return false;
157 
158   if (TII->isMUBUF(UseMI))
159     return OpNo == AMDGPU::getNamedOperandIdx(UseMI.getOpcode(),
160                                               AMDGPU::OpName::vaddr);
161   if (!TII->isFLATScratch(UseMI))
162     return false;
163 
164   int SIdx = AMDGPU::getNamedOperandIdx(UseMI.getOpcode(),
165                                         AMDGPU::OpName::saddr);
166   if (OpNo == SIdx)
167     return true;
168 
169   int VIdx = AMDGPU::getNamedOperandIdx(UseMI.getOpcode(),
170                                         AMDGPU::OpName::vaddr);
171   return OpNo == VIdx && SIdx == -1;
172 }
173 
174 FunctionPass *llvm::createSIFoldOperandsPass() {
175   return new SIFoldOperands();
176 }
177 
178 static bool updateOperand(FoldCandidate &Fold,
179                           const SIInstrInfo &TII,
180                           const TargetRegisterInfo &TRI,
181                           const GCNSubtarget &ST) {
182   MachineInstr *MI = Fold.UseMI;
183   MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
184   assert(Old.isReg());
185 
186   if (Fold.isImm()) {
187     if (MI->getDesc().TSFlags & SIInstrFlags::IsPacked &&
188         !(MI->getDesc().TSFlags & SIInstrFlags::IsMAI) &&
189         (!ST.hasDOTOpSelHazard() ||
190          !(MI->getDesc().TSFlags & SIInstrFlags::IsDOT)) &&
191         AMDGPU::isFoldableLiteralV216(Fold.ImmToFold,
192                                       ST.hasInv2PiInlineImm())) {
193       // Set op_sel/op_sel_hi on this operand or bail out if op_sel is
194       // already set.
195       unsigned Opcode = MI->getOpcode();
196       int OpNo = MI->getOperandNo(&Old);
197       int ModIdx = -1;
198       if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0))
199         ModIdx = AMDGPU::OpName::src0_modifiers;
200       else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1))
201         ModIdx = AMDGPU::OpName::src1_modifiers;
202       else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2))
203         ModIdx = AMDGPU::OpName::src2_modifiers;
204       assert(ModIdx != -1);
205       ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx);
206       MachineOperand &Mod = MI->getOperand(ModIdx);
207       unsigned Val = Mod.getImm();
208       if (!(Val & SISrcMods::OP_SEL_0) && (Val & SISrcMods::OP_SEL_1)) {
209         // Only apply the following transformation if that operand requires
210         // a packed immediate.
211         switch (TII.get(Opcode).OpInfo[OpNo].OperandType) {
212         case AMDGPU::OPERAND_REG_IMM_V2FP16:
213         case AMDGPU::OPERAND_REG_IMM_V2INT16:
214         case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
215         case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
216           // If upper part is all zero we do not need op_sel_hi.
217           if (!isUInt<16>(Fold.ImmToFold)) {
218             if (!(Fold.ImmToFold & 0xffff)) {
219               Mod.setImm(Mod.getImm() | SISrcMods::OP_SEL_0);
220               Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
221               Old.ChangeToImmediate((Fold.ImmToFold >> 16) & 0xffff);
222               return true;
223             }
224             Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
225             Old.ChangeToImmediate(Fold.ImmToFold & 0xffff);
226             return true;
227           }
228           break;
229         default:
230           break;
231         }
232       }
233     }
234   }
235 
236   if ((Fold.isImm() || Fold.isFI() || Fold.isGlobal()) && Fold.needsShrink()) {
237     MachineBasicBlock *MBB = MI->getParent();
238     auto Liveness = MBB->computeRegisterLiveness(&TRI, AMDGPU::VCC, MI, 16);
239     if (Liveness != MachineBasicBlock::LQR_Dead) {
240       LLVM_DEBUG(dbgs() << "Not shrinking " << MI << " due to vcc liveness\n");
241       return false;
242     }
243 
244     MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
245     int Op32 = Fold.getShrinkOpcode();
246     MachineOperand &Dst0 = MI->getOperand(0);
247     MachineOperand &Dst1 = MI->getOperand(1);
248     assert(Dst0.isDef() && Dst1.isDef());
249 
250     bool HaveNonDbgCarryUse = !MRI.use_nodbg_empty(Dst1.getReg());
251 
252     const TargetRegisterClass *Dst0RC = MRI.getRegClass(Dst0.getReg());
253     Register NewReg0 = MRI.createVirtualRegister(Dst0RC);
254 
255     MachineInstr *Inst32 = TII.buildShrunkInst(*MI, Op32);
256 
257     if (HaveNonDbgCarryUse) {
258       BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), Dst1.getReg())
259         .addReg(AMDGPU::VCC, RegState::Kill);
260     }
261 
262     // Keep the old instruction around to avoid breaking iterators, but
263     // replace it with a dummy instruction to remove uses.
264     //
265     // FIXME: We should not invert how this pass looks at operands to avoid
266     // this. Should track set of foldable movs instead of looking for uses
267     // when looking at a use.
268     Dst0.setReg(NewReg0);
269     for (unsigned I = MI->getNumOperands() - 1; I > 0; --I)
270       MI->removeOperand(I);
271     MI->setDesc(TII.get(AMDGPU::IMPLICIT_DEF));
272 
273     if (Fold.isCommuted())
274       TII.commuteInstruction(*Inst32, false);
275     return true;
276   }
277 
278   assert(!Fold.needsShrink() && "not handled");
279 
280   if (Fold.isImm()) {
281     if (Old.isTied()) {
282       int NewMFMAOpc = AMDGPU::getMFMAEarlyClobberOp(MI->getOpcode());
283       if (NewMFMAOpc == -1)
284         return false;
285       MI->setDesc(TII.get(NewMFMAOpc));
286       MI->untieRegOperand(0);
287     }
288     Old.ChangeToImmediate(Fold.ImmToFold);
289     return true;
290   }
291 
292   if (Fold.isGlobal()) {
293     Old.ChangeToGA(Fold.OpToFold->getGlobal(), Fold.OpToFold->getOffset(),
294                    Fold.OpToFold->getTargetFlags());
295     return true;
296   }
297 
298   if (Fold.isFI()) {
299     Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
300     return true;
301   }
302 
303   MachineOperand *New = Fold.OpToFold;
304   Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
305   Old.setIsUndef(New->isUndef());
306   return true;
307 }
308 
309 static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList,
310                               const MachineInstr *MI) {
311   for (auto Candidate : FoldList) {
312     if (Candidate.UseMI == MI)
313       return true;
314   }
315   return false;
316 }
317 
318 static void appendFoldCandidate(SmallVectorImpl<FoldCandidate> &FoldList,
319                                 MachineInstr *MI, unsigned OpNo,
320                                 MachineOperand *FoldOp, bool Commuted = false,
321                                 int ShrinkOp = -1) {
322   // Skip additional folding on the same operand.
323   for (FoldCandidate &Fold : FoldList)
324     if (Fold.UseMI == MI && Fold.UseOpNo == OpNo)
325       return;
326   LLVM_DEBUG(dbgs() << "Append " << (Commuted ? "commuted" : "normal")
327                     << " operand " << OpNo << "\n  " << *MI);
328   FoldList.emplace_back(MI, OpNo, FoldOp, Commuted, ShrinkOp);
329 }
330 
331 static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
332                              MachineInstr *MI, unsigned OpNo,
333                              MachineOperand *OpToFold,
334                              const SIInstrInfo *TII) {
335   if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
336     // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
337     unsigned Opc = MI->getOpcode();
338     unsigned NewOpc = macToMad(Opc);
339     if (NewOpc != AMDGPU::INSTRUCTION_LIST_END) {
340       // Check if changing this to a v_mad_{f16, f32} instruction will allow us
341       // to fold the operand.
342       MI->setDesc(TII->get(NewOpc));
343       bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
344       if (FoldAsMAD) {
345         MI->untieRegOperand(OpNo);
346         return true;
347       }
348       MI->setDesc(TII->get(Opc));
349     }
350 
351     // Special case for s_setreg_b32
352     if (OpToFold->isImm()) {
353       unsigned ImmOpc = 0;
354       if (Opc == AMDGPU::S_SETREG_B32)
355         ImmOpc = AMDGPU::S_SETREG_IMM32_B32;
356       else if (Opc == AMDGPU::S_SETREG_B32_mode)
357         ImmOpc = AMDGPU::S_SETREG_IMM32_B32_mode;
358       if (ImmOpc) {
359         MI->setDesc(TII->get(ImmOpc));
360         appendFoldCandidate(FoldList, MI, OpNo, OpToFold);
361         return true;
362       }
363     }
364 
365     // If we are already folding into another operand of MI, then
366     // we can't commute the instruction, otherwise we risk making the
367     // other fold illegal.
368     if (isUseMIInFoldList(FoldList, MI))
369       return false;
370 
371     unsigned CommuteOpNo = OpNo;
372 
373     // Operand is not legal, so try to commute the instruction to
374     // see if this makes it possible to fold.
375     unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
376     unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
377     bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
378 
379     if (CanCommute) {
380       if (CommuteIdx0 == OpNo)
381         CommuteOpNo = CommuteIdx1;
382       else if (CommuteIdx1 == OpNo)
383         CommuteOpNo = CommuteIdx0;
384     }
385 
386 
387     // One of operands might be an Imm operand, and OpNo may refer to it after
388     // the call of commuteInstruction() below. Such situations are avoided
389     // here explicitly as OpNo must be a register operand to be a candidate
390     // for memory folding.
391     if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
392                        !MI->getOperand(CommuteIdx1).isReg()))
393       return false;
394 
395     if (!CanCommute ||
396         !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
397       return false;
398 
399     if (!TII->isOperandLegal(*MI, CommuteOpNo, OpToFold)) {
400       if ((Opc == AMDGPU::V_ADD_CO_U32_e64 ||
401            Opc == AMDGPU::V_SUB_CO_U32_e64 ||
402            Opc == AMDGPU::V_SUBREV_CO_U32_e64) && // FIXME
403           (OpToFold->isImm() || OpToFold->isFI() || OpToFold->isGlobal())) {
404         MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
405 
406         // Verify the other operand is a VGPR, otherwise we would violate the
407         // constant bus restriction.
408         unsigned OtherIdx = CommuteOpNo == CommuteIdx0 ? CommuteIdx1 : CommuteIdx0;
409         MachineOperand &OtherOp = MI->getOperand(OtherIdx);
410         if (!OtherOp.isReg() ||
411             !TII->getRegisterInfo().isVGPR(MRI, OtherOp.getReg()))
412           return false;
413 
414         assert(MI->getOperand(1).isDef());
415 
416         // Make sure to get the 32-bit version of the commuted opcode.
417         unsigned MaybeCommutedOpc = MI->getOpcode();
418         int Op32 = AMDGPU::getVOPe32(MaybeCommutedOpc);
419 
420         appendFoldCandidate(FoldList, MI, CommuteOpNo, OpToFold, true, Op32);
421         return true;
422       }
423 
424       TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1);
425       return false;
426     }
427 
428     appendFoldCandidate(FoldList, MI, CommuteOpNo, OpToFold, true);
429     return true;
430   }
431 
432   // Check the case where we might introduce a second constant operand to a
433   // scalar instruction
434   if (TII->isSALU(MI->getOpcode())) {
435     const MCInstrDesc &InstDesc = MI->getDesc();
436     const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo];
437     const SIRegisterInfo &SRI = TII->getRegisterInfo();
438 
439     // Fine if the operand can be encoded as an inline constant
440     if (TII->isLiteralConstantLike(*OpToFold, OpInfo)) {
441       if (!SRI.opCanUseInlineConstant(OpInfo.OperandType) ||
442           !TII->isInlineConstant(*OpToFold, OpInfo)) {
443         // Otherwise check for another constant
444         for (unsigned i = 0, e = InstDesc.getNumOperands(); i != e; ++i) {
445           auto &Op = MI->getOperand(i);
446           if (OpNo != i &&
447               TII->isLiteralConstantLike(Op, OpInfo)) {
448             return false;
449           }
450         }
451       }
452     }
453   }
454 
455   appendFoldCandidate(FoldList, MI, OpNo, OpToFold);
456   return true;
457 }
458 
459 // If the use operand doesn't care about the value, this may be an operand only
460 // used for register indexing, in which case it is unsafe to fold.
461 static bool isUseSafeToFold(const SIInstrInfo *TII,
462                             const MachineInstr &MI,
463                             const MachineOperand &UseMO) {
464   if (UseMO.isUndef() || TII->isSDWA(MI))
465     return false;
466 
467   switch (MI.getOpcode()) {
468   case AMDGPU::V_MOV_B32_e32:
469   case AMDGPU::V_MOV_B32_e64:
470   case AMDGPU::V_MOV_B64_PSEUDO:
471   case AMDGPU::V_MOV_B64_e32:
472   case AMDGPU::V_MOV_B64_e64:
473     // Do not fold into an indirect mov.
474     return !MI.hasRegisterImplicitUseOperand(AMDGPU::M0);
475   }
476 
477   return true;
478   //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg());
479 }
480 
481 // Find a def of the UseReg, check if it is a reg_sequence and find initializers
482 // for each subreg, tracking it to foldable inline immediate if possible.
483 // Returns true on success.
484 static bool getRegSeqInit(
485     SmallVectorImpl<std::pair<MachineOperand*, unsigned>> &Defs,
486     Register UseReg, uint8_t OpTy,
487     const SIInstrInfo *TII, const MachineRegisterInfo &MRI) {
488   MachineInstr *Def = MRI.getVRegDef(UseReg);
489   if (!Def || !Def->isRegSequence())
490     return false;
491 
492   for (unsigned I = 1, E = Def->getNumExplicitOperands(); I < E; I += 2) {
493     MachineOperand *Sub = &Def->getOperand(I);
494     assert(Sub->isReg());
495 
496     for (MachineInstr *SubDef = MRI.getVRegDef(Sub->getReg());
497          SubDef && Sub->isReg() && Sub->getReg().isVirtual() &&
498          !Sub->getSubReg() && TII->isFoldableCopy(*SubDef);
499          SubDef = MRI.getVRegDef(Sub->getReg())) {
500       MachineOperand *Op = &SubDef->getOperand(1);
501       if (Op->isImm()) {
502         if (TII->isInlineConstant(*Op, OpTy))
503           Sub = Op;
504         break;
505       }
506       if (!Op->isReg() || Op->getReg().isPhysical())
507         break;
508       Sub = Op;
509     }
510 
511     Defs.emplace_back(Sub, Def->getOperand(I + 1).getImm());
512   }
513 
514   return true;
515 }
516 
517 static bool tryToFoldACImm(const SIInstrInfo *TII,
518                            const MachineOperand &OpToFold,
519                            MachineInstr *UseMI,
520                            unsigned UseOpIdx,
521                            SmallVectorImpl<FoldCandidate> &FoldList) {
522   const MCInstrDesc &Desc = UseMI->getDesc();
523   const MCOperandInfo *OpInfo = Desc.OpInfo;
524   if (!OpInfo || UseOpIdx >= Desc.getNumOperands())
525     return false;
526 
527   uint8_t OpTy = OpInfo[UseOpIdx].OperandType;
528   if ((OpTy < AMDGPU::OPERAND_REG_INLINE_AC_FIRST ||
529        OpTy > AMDGPU::OPERAND_REG_INLINE_AC_LAST) &&
530       (OpTy < AMDGPU::OPERAND_REG_INLINE_C_FIRST ||
531        OpTy > AMDGPU::OPERAND_REG_INLINE_C_LAST))
532     return false;
533 
534   if (OpToFold.isImm() && TII->isInlineConstant(OpToFold, OpTy) &&
535       TII->isOperandLegal(*UseMI, UseOpIdx, &OpToFold)) {
536     UseMI->getOperand(UseOpIdx).ChangeToImmediate(OpToFold.getImm());
537     return true;
538   }
539 
540   if (!OpToFold.isReg())
541     return false;
542 
543   Register UseReg = OpToFold.getReg();
544   if (!UseReg.isVirtual())
545     return false;
546 
547   if (isUseMIInFoldList(FoldList, UseMI))
548     return false;
549 
550   MachineRegisterInfo &MRI = UseMI->getParent()->getParent()->getRegInfo();
551 
552   // Maybe it is just a COPY of an immediate itself.
553   MachineInstr *Def = MRI.getVRegDef(UseReg);
554   MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
555   if (!UseOp.getSubReg() && Def && TII->isFoldableCopy(*Def)) {
556     MachineOperand &DefOp = Def->getOperand(1);
557     if (DefOp.isImm() && TII->isInlineConstant(DefOp, OpTy) &&
558         TII->isOperandLegal(*UseMI, UseOpIdx, &DefOp)) {
559       UseMI->getOperand(UseOpIdx).ChangeToImmediate(DefOp.getImm());
560       return true;
561     }
562   }
563 
564   SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs;
565   if (!getRegSeqInit(Defs, UseReg, OpTy, TII, MRI))
566     return false;
567 
568   int32_t Imm;
569   for (unsigned I = 0, E = Defs.size(); I != E; ++I) {
570     const MachineOperand *Op = Defs[I].first;
571     if (!Op->isImm())
572       return false;
573 
574     auto SubImm = Op->getImm();
575     if (!I) {
576       Imm = SubImm;
577       if (!TII->isInlineConstant(*Op, OpTy) ||
578           !TII->isOperandLegal(*UseMI, UseOpIdx, Op))
579         return false;
580 
581       continue;
582     }
583     if (Imm != SubImm)
584       return false; // Can only fold splat constants
585   }
586 
587   appendFoldCandidate(FoldList, UseMI, UseOpIdx, Defs[0].first);
588   return true;
589 }
590 
591 void SIFoldOperands::foldOperand(
592   MachineOperand &OpToFold,
593   MachineInstr *UseMI,
594   int UseOpIdx,
595   SmallVectorImpl<FoldCandidate> &FoldList,
596   SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
597   const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
598 
599   if (!isUseSafeToFold(TII, *UseMI, UseOp))
600     return;
601 
602   // FIXME: Fold operands with subregs.
603   if (UseOp.isReg() && OpToFold.isReg()) {
604     if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
605       return;
606   }
607 
608   // Special case for REG_SEQUENCE: We can't fold literals into
609   // REG_SEQUENCE instructions, so we have to fold them into the
610   // uses of REG_SEQUENCE.
611   if (UseMI->isRegSequence()) {
612     Register RegSeqDstReg = UseMI->getOperand(0).getReg();
613     unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
614 
615     for (auto &RSUse : make_early_inc_range(MRI->use_nodbg_operands(RegSeqDstReg))) {
616       MachineInstr *RSUseMI = RSUse.getParent();
617 
618       if (tryToFoldACImm(TII, UseMI->getOperand(0), RSUseMI,
619                          RSUseMI->getOperandNo(&RSUse), FoldList))
620         continue;
621 
622       if (RSUse.getSubReg() != RegSeqDstSubReg)
623         continue;
624 
625       foldOperand(OpToFold, RSUseMI, RSUseMI->getOperandNo(&RSUse), FoldList,
626                   CopiesToReplace);
627     }
628 
629     return;
630   }
631 
632   if (tryToFoldACImm(TII, OpToFold, UseMI, UseOpIdx, FoldList))
633     return;
634 
635   if (frameIndexMayFold(TII, *UseMI, UseOpIdx, OpToFold)) {
636     // Verify that this is a stack access.
637     // FIXME: Should probably use stack pseudos before frame lowering.
638 
639     if (TII->isMUBUF(*UseMI)) {
640       if (TII->getNamedOperand(*UseMI, AMDGPU::OpName::srsrc)->getReg() !=
641           MFI->getScratchRSrcReg())
642         return;
643 
644       // Ensure this is either relative to the current frame or the current
645       // wave.
646       MachineOperand &SOff =
647           *TII->getNamedOperand(*UseMI, AMDGPU::OpName::soffset);
648       if (!SOff.isImm() || SOff.getImm() != 0)
649         return;
650     }
651 
652     // A frame index will resolve to a positive constant, so it should always be
653     // safe to fold the addressing mode, even pre-GFX9.
654     UseMI->getOperand(UseOpIdx).ChangeToFrameIndex(OpToFold.getIndex());
655 
656     if (TII->isFLATScratch(*UseMI) &&
657         AMDGPU::getNamedOperandIdx(UseMI->getOpcode(),
658                                    AMDGPU::OpName::vaddr) != -1 &&
659         AMDGPU::getNamedOperandIdx(UseMI->getOpcode(),
660                                    AMDGPU::OpName::saddr) == -1) {
661       unsigned NewOpc = AMDGPU::getFlatScratchInstSSfromSV(UseMI->getOpcode());
662       UseMI->setDesc(TII->get(NewOpc));
663     }
664 
665     return;
666   }
667 
668   bool FoldingImmLike =
669       OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
670 
671   if (FoldingImmLike && UseMI->isCopy()) {
672     Register DestReg = UseMI->getOperand(0).getReg();
673     Register SrcReg = UseMI->getOperand(1).getReg();
674     assert(SrcReg.isVirtual());
675 
676     const TargetRegisterClass *SrcRC = MRI->getRegClass(SrcReg);
677 
678     // Don't fold into a copy to a physical register with the same class. Doing
679     // so would interfere with the register coalescer's logic which would avoid
680     // redundant initializations.
681     if (DestReg.isPhysical() && SrcRC->contains(DestReg))
682       return;
683 
684     const TargetRegisterClass *DestRC = TRI->getRegClassForReg(*MRI, DestReg);
685     if (!DestReg.isPhysical()) {
686       if (TRI->isSGPRClass(SrcRC) && TRI->hasVectorRegisters(DestRC)) {
687         SmallVector<FoldCandidate, 4> CopyUses;
688         for (auto &Use : MRI->use_nodbg_operands(DestReg)) {
689           // There's no point trying to fold into an implicit operand.
690           if (Use.isImplicit())
691             continue;
692 
693           CopyUses.emplace_back(Use.getParent(),
694                                 Use.getParent()->getOperandNo(&Use),
695                                 &UseMI->getOperand(1));
696         }
697         for (auto &F : CopyUses) {
698           foldOperand(*F.OpToFold, F.UseMI, F.UseOpNo, FoldList, CopiesToReplace);
699         }
700       }
701 
702       if (DestRC == &AMDGPU::AGPR_32RegClass &&
703           TII->isInlineConstant(OpToFold, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
704         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64));
705         UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
706         CopiesToReplace.push_back(UseMI);
707         return;
708       }
709     }
710 
711     // In order to fold immediates into copies, we need to change the
712     // copy to a MOV.
713 
714     unsigned MovOp = TII->getMovOpcode(DestRC);
715     if (MovOp == AMDGPU::COPY)
716       return;
717 
718     UseMI->setDesc(TII->get(MovOp));
719     MachineInstr::mop_iterator ImpOpI = UseMI->implicit_operands().begin();
720     MachineInstr::mop_iterator ImpOpE = UseMI->implicit_operands().end();
721     while (ImpOpI != ImpOpE) {
722       MachineInstr::mop_iterator Tmp = ImpOpI;
723       ImpOpI++;
724       UseMI->removeOperand(UseMI->getOperandNo(Tmp));
725     }
726     CopiesToReplace.push_back(UseMI);
727   } else {
728     if (UseMI->isCopy() && OpToFold.isReg() &&
729         UseMI->getOperand(0).getReg().isVirtual() &&
730         !UseMI->getOperand(1).getSubReg()) {
731       LLVM_DEBUG(dbgs() << "Folding " << OpToFold << "\n into " << *UseMI);
732       unsigned Size = TII->getOpSize(*UseMI, 1);
733       Register UseReg = OpToFold.getReg();
734       UseMI->getOperand(1).setReg(UseReg);
735       UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
736       UseMI->getOperand(1).setIsKill(false);
737       CopiesToReplace.push_back(UseMI);
738       OpToFold.setIsKill(false);
739 
740       // That is very tricky to store a value into an AGPR. v_accvgpr_write_b32
741       // can only accept VGPR or inline immediate. Recreate a reg_sequence with
742       // its initializers right here, so we will rematerialize immediates and
743       // avoid copies via different reg classes.
744       SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs;
745       if (Size > 4 && TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) &&
746           getRegSeqInit(Defs, UseReg, AMDGPU::OPERAND_REG_INLINE_C_INT32, TII,
747                         *MRI)) {
748         const DebugLoc &DL = UseMI->getDebugLoc();
749         MachineBasicBlock &MBB = *UseMI->getParent();
750 
751         UseMI->setDesc(TII->get(AMDGPU::REG_SEQUENCE));
752         for (unsigned I = UseMI->getNumOperands() - 1; I > 0; --I)
753           UseMI->removeOperand(I);
754 
755         MachineInstrBuilder B(*MBB.getParent(), UseMI);
756         DenseMap<TargetInstrInfo::RegSubRegPair, Register> VGPRCopies;
757         SmallSetVector<TargetInstrInfo::RegSubRegPair, 32> SeenAGPRs;
758         for (unsigned I = 0; I < Size / 4; ++I) {
759           MachineOperand *Def = Defs[I].first;
760           TargetInstrInfo::RegSubRegPair CopyToVGPR;
761           if (Def->isImm() &&
762               TII->isInlineConstant(*Def, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
763             int64_t Imm = Def->getImm();
764 
765             auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
766             BuildMI(MBB, UseMI, DL,
767                     TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), Tmp).addImm(Imm);
768             B.addReg(Tmp);
769           } else if (Def->isReg() && TRI->isAGPR(*MRI, Def->getReg())) {
770             auto Src = getRegSubRegPair(*Def);
771             Def->setIsKill(false);
772             if (!SeenAGPRs.insert(Src)) {
773               // We cannot build a reg_sequence out of the same registers, they
774               // must be copied. Better do it here before copyPhysReg() created
775               // several reads to do the AGPR->VGPR->AGPR copy.
776               CopyToVGPR = Src;
777             } else {
778               B.addReg(Src.Reg, Def->isUndef() ? RegState::Undef : 0,
779                        Src.SubReg);
780             }
781           } else {
782             assert(Def->isReg());
783             Def->setIsKill(false);
784             auto Src = getRegSubRegPair(*Def);
785 
786             // Direct copy from SGPR to AGPR is not possible. To avoid creation
787             // of exploded copies SGPR->VGPR->AGPR in the copyPhysReg() later,
788             // create a copy here and track if we already have such a copy.
789             if (TRI->isSGPRReg(*MRI, Src.Reg)) {
790               CopyToVGPR = Src;
791             } else {
792               auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
793               BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Tmp).add(*Def);
794               B.addReg(Tmp);
795             }
796           }
797 
798           if (CopyToVGPR.Reg) {
799             Register Vgpr;
800             if (VGPRCopies.count(CopyToVGPR)) {
801               Vgpr = VGPRCopies[CopyToVGPR];
802             } else {
803               Vgpr = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
804               BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Vgpr).add(*Def);
805               VGPRCopies[CopyToVGPR] = Vgpr;
806             }
807             auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
808             BuildMI(MBB, UseMI, DL,
809                     TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), Tmp).addReg(Vgpr);
810             B.addReg(Tmp);
811           }
812 
813           B.addImm(Defs[I].second);
814         }
815         LLVM_DEBUG(dbgs() << "Folded " << *UseMI);
816         return;
817       }
818 
819       if (Size != 4)
820         return;
821       if (TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) &&
822           TRI->isVGPR(*MRI, UseMI->getOperand(1).getReg()))
823         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64));
824       else if (TRI->isVGPR(*MRI, UseMI->getOperand(0).getReg()) &&
825                TRI->isAGPR(*MRI, UseMI->getOperand(1).getReg()))
826         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_READ_B32_e64));
827       else if (ST->hasGFX90AInsts() &&
828                TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) &&
829                TRI->isAGPR(*MRI, UseMI->getOperand(1).getReg()))
830         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_MOV_B32));
831       return;
832     }
833 
834     unsigned UseOpc = UseMI->getOpcode();
835     if (UseOpc == AMDGPU::V_READFIRSTLANE_B32 ||
836         (UseOpc == AMDGPU::V_READLANE_B32 &&
837          (int)UseOpIdx ==
838          AMDGPU::getNamedOperandIdx(UseOpc, AMDGPU::OpName::src0))) {
839       // %vgpr = V_MOV_B32 imm
840       // %sgpr = V_READFIRSTLANE_B32 %vgpr
841       // =>
842       // %sgpr = S_MOV_B32 imm
843       if (FoldingImmLike) {
844         if (execMayBeModifiedBeforeUse(*MRI,
845                                        UseMI->getOperand(UseOpIdx).getReg(),
846                                        *OpToFold.getParent(),
847                                        *UseMI))
848           return;
849 
850         UseMI->setDesc(TII->get(AMDGPU::S_MOV_B32));
851 
852         if (OpToFold.isImm())
853           UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
854         else
855           UseMI->getOperand(1).ChangeToFrameIndex(OpToFold.getIndex());
856         UseMI->removeOperand(2); // Remove exec read (or src1 for readlane)
857         return;
858       }
859 
860       if (OpToFold.isReg() && TRI->isSGPRReg(*MRI, OpToFold.getReg())) {
861         if (execMayBeModifiedBeforeUse(*MRI,
862                                        UseMI->getOperand(UseOpIdx).getReg(),
863                                        *OpToFold.getParent(),
864                                        *UseMI))
865           return;
866 
867         // %vgpr = COPY %sgpr0
868         // %sgpr1 = V_READFIRSTLANE_B32 %vgpr
869         // =>
870         // %sgpr1 = COPY %sgpr0
871         UseMI->setDesc(TII->get(AMDGPU::COPY));
872         UseMI->getOperand(1).setReg(OpToFold.getReg());
873         UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
874         UseMI->getOperand(1).setIsKill(false);
875         UseMI->removeOperand(2); // Remove exec read (or src1 for readlane)
876         return;
877       }
878     }
879 
880     const MCInstrDesc &UseDesc = UseMI->getDesc();
881 
882     // Don't fold into target independent nodes.  Target independent opcodes
883     // don't have defined register classes.
884     if (UseDesc.isVariadic() ||
885         UseOp.isImplicit() ||
886         UseDesc.OpInfo[UseOpIdx].RegClass == -1)
887       return;
888   }
889 
890   if (!FoldingImmLike) {
891     if (OpToFold.isReg() && ST->needsAlignedVGPRs()) {
892       // Don't fold if OpToFold doesn't hold an aligned register.
893       const TargetRegisterClass *RC =
894           TRI->getRegClassForReg(*MRI, OpToFold.getReg());
895       if (TRI->hasVectorRegisters(RC) && OpToFold.getSubReg()) {
896         unsigned SubReg = OpToFold.getSubReg();
897         const TargetRegisterClass *SubRC = TRI->getSubRegClass(RC, SubReg);
898         RC = TRI->getCompatibleSubRegClass(RC, SubRC, SubReg);
899         if (RC)
900           RC = SubRC;
901       }
902 
903       if (!RC || !TRI->isProperlyAlignedRC(*RC))
904         return;
905     }
906 
907     tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
908 
909     // FIXME: We could try to change the instruction from 64-bit to 32-bit
910     // to enable more folding opportunities.  The shrink operands pass
911     // already does this.
912     return;
913   }
914 
915 
916   const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
917   const TargetRegisterClass *FoldRC =
918     TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);
919 
920   // Split 64-bit constants into 32-bits for folding.
921   if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
922     Register UseReg = UseOp.getReg();
923     const TargetRegisterClass *UseRC = MRI->getRegClass(UseReg);
924 
925     if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
926       return;
927 
928     APInt Imm(64, OpToFold.getImm());
929     if (UseOp.getSubReg() == AMDGPU::sub0) {
930       Imm = Imm.getLoBits(32);
931     } else {
932       assert(UseOp.getSubReg() == AMDGPU::sub1);
933       Imm = Imm.getHiBits(32);
934     }
935 
936     MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
937     tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
938     return;
939   }
940 
941 
942 
943   tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
944 }
945 
946 static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
947                                   uint32_t LHS, uint32_t RHS) {
948   switch (Opcode) {
949   case AMDGPU::V_AND_B32_e64:
950   case AMDGPU::V_AND_B32_e32:
951   case AMDGPU::S_AND_B32:
952     Result = LHS & RHS;
953     return true;
954   case AMDGPU::V_OR_B32_e64:
955   case AMDGPU::V_OR_B32_e32:
956   case AMDGPU::S_OR_B32:
957     Result = LHS | RHS;
958     return true;
959   case AMDGPU::V_XOR_B32_e64:
960   case AMDGPU::V_XOR_B32_e32:
961   case AMDGPU::S_XOR_B32:
962     Result = LHS ^ RHS;
963     return true;
964   case AMDGPU::S_XNOR_B32:
965     Result = ~(LHS ^ RHS);
966     return true;
967   case AMDGPU::S_NAND_B32:
968     Result = ~(LHS & RHS);
969     return true;
970   case AMDGPU::S_NOR_B32:
971     Result = ~(LHS | RHS);
972     return true;
973   case AMDGPU::S_ANDN2_B32:
974     Result = LHS & ~RHS;
975     return true;
976   case AMDGPU::S_ORN2_B32:
977     Result = LHS | ~RHS;
978     return true;
979   case AMDGPU::V_LSHL_B32_e64:
980   case AMDGPU::V_LSHL_B32_e32:
981   case AMDGPU::S_LSHL_B32:
982     // The instruction ignores the high bits for out of bounds shifts.
983     Result = LHS << (RHS & 31);
984     return true;
985   case AMDGPU::V_LSHLREV_B32_e64:
986   case AMDGPU::V_LSHLREV_B32_e32:
987     Result = RHS << (LHS & 31);
988     return true;
989   case AMDGPU::V_LSHR_B32_e64:
990   case AMDGPU::V_LSHR_B32_e32:
991   case AMDGPU::S_LSHR_B32:
992     Result = LHS >> (RHS & 31);
993     return true;
994   case AMDGPU::V_LSHRREV_B32_e64:
995   case AMDGPU::V_LSHRREV_B32_e32:
996     Result = RHS >> (LHS & 31);
997     return true;
998   case AMDGPU::V_ASHR_I32_e64:
999   case AMDGPU::V_ASHR_I32_e32:
1000   case AMDGPU::S_ASHR_I32:
1001     Result = static_cast<int32_t>(LHS) >> (RHS & 31);
1002     return true;
1003   case AMDGPU::V_ASHRREV_I32_e64:
1004   case AMDGPU::V_ASHRREV_I32_e32:
1005     Result = static_cast<int32_t>(RHS) >> (LHS & 31);
1006     return true;
1007   default:
1008     return false;
1009   }
1010 }
1011 
1012 static unsigned getMovOpc(bool IsScalar) {
1013   return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1014 }
1015 
1016 /// Remove any leftover implicit operands from mutating the instruction. e.g.
1017 /// if we replace an s_and_b32 with a copy, we don't need the implicit scc def
1018 /// anymore.
1019 static void stripExtraCopyOperands(MachineInstr &MI) {
1020   const MCInstrDesc &Desc = MI.getDesc();
1021   unsigned NumOps = Desc.getNumOperands() +
1022                     Desc.getNumImplicitUses() +
1023                     Desc.getNumImplicitDefs();
1024 
1025   for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
1026     MI.removeOperand(I);
1027 }
1028 
1029 static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
1030   MI.setDesc(NewDesc);
1031   stripExtraCopyOperands(MI);
1032 }
1033 
1034 static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
1035                                                MachineOperand &Op) {
1036   if (Op.isReg()) {
1037     // If this has a subregister, it obviously is a register source.
1038     if (Op.getSubReg() != AMDGPU::NoSubRegister || !Op.getReg().isVirtual())
1039       return &Op;
1040 
1041     MachineInstr *Def = MRI.getVRegDef(Op.getReg());
1042     if (Def && Def->isMoveImmediate()) {
1043       MachineOperand &ImmSrc = Def->getOperand(1);
1044       if (ImmSrc.isImm())
1045         return &ImmSrc;
1046     }
1047   }
1048 
1049   return &Op;
1050 }
1051 
1052 // Try to simplify operations with a constant that may appear after instruction
1053 // selection.
1054 // TODO: See if a frame index with a fixed offset can fold.
1055 static bool tryConstantFoldOp(MachineRegisterInfo &MRI, const SIInstrInfo *TII,
1056                               MachineInstr *MI) {
1057   unsigned Opc = MI->getOpcode();
1058 
1059   int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
1060   if (Src0Idx == -1)
1061     return false;
1062   MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx));
1063 
1064   if ((Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
1065        Opc == AMDGPU::S_NOT_B32) &&
1066       Src0->isImm()) {
1067     MI->getOperand(1).ChangeToImmediate(~Src0->getImm());
1068     mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
1069     return true;
1070   }
1071 
1072   int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
1073   if (Src1Idx == -1)
1074     return false;
1075   MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx));
1076 
1077   if (!Src0->isImm() && !Src1->isImm())
1078     return false;
1079 
1080   // and k0, k1 -> v_mov_b32 (k0 & k1)
1081   // or k0, k1 -> v_mov_b32 (k0 | k1)
1082   // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
1083   if (Src0->isImm() && Src1->isImm()) {
1084     int32_t NewImm;
1085     if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
1086       return false;
1087 
1088     const SIRegisterInfo &TRI = TII->getRegisterInfo();
1089     bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg());
1090 
1091     // Be careful to change the right operand, src0 may belong to a different
1092     // instruction.
1093     MI->getOperand(Src0Idx).ChangeToImmediate(NewImm);
1094     MI->removeOperand(Src1Idx);
1095     mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
1096     return true;
1097   }
1098 
1099   if (!MI->isCommutable())
1100     return false;
1101 
1102   if (Src0->isImm() && !Src1->isImm()) {
1103     std::swap(Src0, Src1);
1104     std::swap(Src0Idx, Src1Idx);
1105   }
1106 
1107   int32_t Src1Val = static_cast<int32_t>(Src1->getImm());
1108   if (Opc == AMDGPU::V_OR_B32_e64 ||
1109       Opc == AMDGPU::V_OR_B32_e32 ||
1110       Opc == AMDGPU::S_OR_B32) {
1111     if (Src1Val == 0) {
1112       // y = or x, 0 => y = copy x
1113       MI->removeOperand(Src1Idx);
1114       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1115     } else if (Src1Val == -1) {
1116       // y = or x, -1 => y = v_mov_b32 -1
1117       MI->removeOperand(Src1Idx);
1118       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
1119     } else
1120       return false;
1121 
1122     return true;
1123   }
1124 
1125   if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 ||
1126       MI->getOpcode() == AMDGPU::V_AND_B32_e32 ||
1127       MI->getOpcode() == AMDGPU::S_AND_B32) {
1128     if (Src1Val == 0) {
1129       // y = and x, 0 => y = v_mov_b32 0
1130       MI->removeOperand(Src0Idx);
1131       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
1132     } else if (Src1Val == -1) {
1133       // y = and x, -1 => y = copy x
1134       MI->removeOperand(Src1Idx);
1135       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1136       stripExtraCopyOperands(*MI);
1137     } else
1138       return false;
1139 
1140     return true;
1141   }
1142 
1143   if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 ||
1144       MI->getOpcode() == AMDGPU::V_XOR_B32_e32 ||
1145       MI->getOpcode() == AMDGPU::S_XOR_B32) {
1146     if (Src1Val == 0) {
1147       // y = xor x, 0 => y = copy x
1148       MI->removeOperand(Src1Idx);
1149       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1150       return true;
1151     }
1152   }
1153 
1154   return false;
1155 }
1156 
1157 // Try to fold an instruction into a simpler one
1158 bool SIFoldOperands::tryFoldCndMask(MachineInstr &MI) const {
1159   unsigned Opc = MI.getOpcode();
1160   if (Opc != AMDGPU::V_CNDMASK_B32_e32 && Opc != AMDGPU::V_CNDMASK_B32_e64 &&
1161       Opc != AMDGPU::V_CNDMASK_B64_PSEUDO)
1162     return false;
1163 
1164   MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1165   MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1166   if (!Src1->isIdenticalTo(*Src0)) {
1167     auto *Src0Imm = getImmOrMaterializedImm(*MRI, *Src0);
1168     auto *Src1Imm = getImmOrMaterializedImm(*MRI, *Src1);
1169     if (!Src1Imm->isIdenticalTo(*Src0Imm))
1170       return false;
1171   }
1172 
1173   int Src1ModIdx =
1174       AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers);
1175   int Src0ModIdx =
1176       AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
1177   if ((Src1ModIdx != -1 && MI.getOperand(Src1ModIdx).getImm() != 0) ||
1178       (Src0ModIdx != -1 && MI.getOperand(Src0ModIdx).getImm() != 0))
1179     return false;
1180 
1181   LLVM_DEBUG(dbgs() << "Folded " << MI << " into ");
1182   auto &NewDesc =
1183       TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY : getMovOpc(false));
1184   int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
1185   if (Src2Idx != -1)
1186     MI.removeOperand(Src2Idx);
1187   MI.removeOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1));
1188   if (Src1ModIdx != -1)
1189     MI.removeOperand(Src1ModIdx);
1190   if (Src0ModIdx != -1)
1191     MI.removeOperand(Src0ModIdx);
1192   mutateCopyOp(MI, NewDesc);
1193   LLVM_DEBUG(dbgs() << MI);
1194   return true;
1195 }
1196 
1197 bool SIFoldOperands::tryFoldZeroHighBits(MachineInstr &MI) const {
1198   if (MI.getOpcode() != AMDGPU::V_AND_B32_e64 &&
1199       MI.getOpcode() != AMDGPU::V_AND_B32_e32)
1200     return false;
1201 
1202   MachineOperand *Src0 = getImmOrMaterializedImm(*MRI, MI.getOperand(1));
1203   if (!Src0->isImm() || Src0->getImm() != 0xffff)
1204     return false;
1205 
1206   Register Src1 = MI.getOperand(2).getReg();
1207   MachineInstr *SrcDef = MRI->getVRegDef(Src1);
1208   if (ST->zeroesHigh16BitsOfDest(SrcDef->getOpcode())) {
1209     Register Dst = MI.getOperand(0).getReg();
1210     MRI->replaceRegWith(Dst, SrcDef->getOperand(0).getReg());
1211     MI.eraseFromParent();
1212     return true;
1213   }
1214 
1215   return false;
1216 }
1217 
1218 bool SIFoldOperands::foldInstOperand(MachineInstr &MI,
1219                                      MachineOperand &OpToFold) const {
1220   // We need mutate the operands of new mov instructions to add implicit
1221   // uses of EXEC, but adding them invalidates the use_iterator, so defer
1222   // this.
1223   SmallVector<MachineInstr *, 4> CopiesToReplace;
1224   SmallVector<FoldCandidate, 4> FoldList;
1225   MachineOperand &Dst = MI.getOperand(0);
1226   bool Changed = false;
1227 
1228   if (OpToFold.isImm()) {
1229     for (auto &UseMI :
1230          make_early_inc_range(MRI->use_nodbg_instructions(Dst.getReg()))) {
1231       // Folding the immediate may reveal operations that can be constant
1232       // folded or replaced with a copy. This can happen for example after
1233       // frame indices are lowered to constants or from splitting 64-bit
1234       // constants.
1235       //
1236       // We may also encounter cases where one or both operands are
1237       // immediates materialized into a register, which would ordinarily not
1238       // be folded due to multiple uses or operand constraints.
1239       if (tryConstantFoldOp(*MRI, TII, &UseMI)) {
1240         LLVM_DEBUG(dbgs() << "Constant folded " << UseMI);
1241         Changed = true;
1242       }
1243     }
1244   }
1245 
1246   SmallVector<MachineOperand *, 4> UsesToProcess;
1247   for (auto &Use : MRI->use_nodbg_operands(Dst.getReg()))
1248     UsesToProcess.push_back(&Use);
1249   for (auto U : UsesToProcess) {
1250     MachineInstr *UseMI = U->getParent();
1251     foldOperand(OpToFold, UseMI, UseMI->getOperandNo(U), FoldList,
1252                 CopiesToReplace);
1253   }
1254 
1255   if (CopiesToReplace.empty() && FoldList.empty())
1256     return Changed;
1257 
1258   MachineFunction *MF = MI.getParent()->getParent();
1259   // Make sure we add EXEC uses to any new v_mov instructions created.
1260   for (MachineInstr *Copy : CopiesToReplace)
1261     Copy->addImplicitDefUseOperands(*MF);
1262 
1263   for (FoldCandidate &Fold : FoldList) {
1264     assert(!Fold.isReg() || Fold.OpToFold);
1265     if (Fold.isReg() && Fold.OpToFold->getReg().isVirtual()) {
1266       Register Reg = Fold.OpToFold->getReg();
1267       MachineInstr *DefMI = Fold.OpToFold->getParent();
1268       if (DefMI->readsRegister(AMDGPU::EXEC, TRI) &&
1269           execMayBeModifiedBeforeUse(*MRI, Reg, *DefMI, *Fold.UseMI))
1270         continue;
1271     }
1272     if (updateOperand(Fold, *TII, *TRI, *ST)) {
1273       // Clear kill flags.
1274       if (Fold.isReg()) {
1275         assert(Fold.OpToFold && Fold.OpToFold->isReg());
1276         // FIXME: Probably shouldn't bother trying to fold if not an
1277         // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
1278         // copies.
1279         MRI->clearKillFlags(Fold.OpToFold->getReg());
1280       }
1281       LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo "
1282                         << static_cast<int>(Fold.UseOpNo) << " of "
1283                         << *Fold.UseMI);
1284     } else if (Fold.isCommuted()) {
1285       // Restoring instruction's original operand order if fold has failed.
1286       TII->commuteInstruction(*Fold.UseMI, false);
1287     }
1288   }
1289   return true;
1290 }
1291 
1292 // Clamp patterns are canonically selected to v_max_* instructions, so only
1293 // handle them.
1294 const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const {
1295   unsigned Op = MI.getOpcode();
1296   switch (Op) {
1297   case AMDGPU::V_MAX_F32_e64:
1298   case AMDGPU::V_MAX_F16_e64:
1299   case AMDGPU::V_MAX_F64_e64:
1300   case AMDGPU::V_PK_MAX_F16: {
1301     if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm())
1302       return nullptr;
1303 
1304     // Make sure sources are identical.
1305     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1306     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1307     if (!Src0->isReg() || !Src1->isReg() ||
1308         Src0->getReg() != Src1->getReg() ||
1309         Src0->getSubReg() != Src1->getSubReg() ||
1310         Src0->getSubReg() != AMDGPU::NoSubRegister)
1311       return nullptr;
1312 
1313     // Can't fold up if we have modifiers.
1314     if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1315       return nullptr;
1316 
1317     unsigned Src0Mods
1318       = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm();
1319     unsigned Src1Mods
1320       = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm();
1321 
1322     // Having a 0 op_sel_hi would require swizzling the output in the source
1323     // instruction, which we can't do.
1324     unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1
1325                                                       : 0u;
1326     if (Src0Mods != UnsetMods && Src1Mods != UnsetMods)
1327       return nullptr;
1328     return Src0;
1329   }
1330   default:
1331     return nullptr;
1332   }
1333 }
1334 
1335 // FIXME: Clamp for v_mad_mixhi_f16 handled during isel.
1336 bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) {
1337   const MachineOperand *ClampSrc = isClamp(MI);
1338   if (!ClampSrc || !MRI->hasOneNonDBGUser(ClampSrc->getReg()))
1339     return false;
1340 
1341   MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg());
1342 
1343   // The type of clamp must be compatible.
1344   if (TII->getClampMask(*Def) != TII->getClampMask(MI))
1345     return false;
1346 
1347   MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp);
1348   if (!DefClamp)
1349     return false;
1350 
1351   LLVM_DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def);
1352 
1353   // Clamp is applied after omod, so it is OK if omod is set.
1354   DefClamp->setImm(1);
1355   MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1356   MI.eraseFromParent();
1357 
1358   // Use of output modifiers forces VOP3 encoding for a VOP2 mac/fmac
1359   // instruction, so we might as well convert it to the more flexible VOP3-only
1360   // mad/fma form.
1361   if (TII->convertToThreeAddress(*Def, nullptr, nullptr))
1362     Def->eraseFromParent();
1363 
1364   return true;
1365 }
1366 
1367 static int getOModValue(unsigned Opc, int64_t Val) {
1368   switch (Opc) {
1369   case AMDGPU::V_MUL_F64_e64: {
1370     switch (Val) {
1371     case 0x3fe0000000000000: // 0.5
1372       return SIOutMods::DIV2;
1373     case 0x4000000000000000: // 2.0
1374       return SIOutMods::MUL2;
1375     case 0x4010000000000000: // 4.0
1376       return SIOutMods::MUL4;
1377     default:
1378       return SIOutMods::NONE;
1379     }
1380   }
1381   case AMDGPU::V_MUL_F32_e64: {
1382     switch (static_cast<uint32_t>(Val)) {
1383     case 0x3f000000: // 0.5
1384       return SIOutMods::DIV2;
1385     case 0x40000000: // 2.0
1386       return SIOutMods::MUL2;
1387     case 0x40800000: // 4.0
1388       return SIOutMods::MUL4;
1389     default:
1390       return SIOutMods::NONE;
1391     }
1392   }
1393   case AMDGPU::V_MUL_F16_e64: {
1394     switch (static_cast<uint16_t>(Val)) {
1395     case 0x3800: // 0.5
1396       return SIOutMods::DIV2;
1397     case 0x4000: // 2.0
1398       return SIOutMods::MUL2;
1399     case 0x4400: // 4.0
1400       return SIOutMods::MUL4;
1401     default:
1402       return SIOutMods::NONE;
1403     }
1404   }
1405   default:
1406     llvm_unreachable("invalid mul opcode");
1407   }
1408 }
1409 
1410 // FIXME: Does this really not support denormals with f16?
1411 // FIXME: Does this need to check IEEE mode bit? SNaNs are generally not
1412 // handled, so will anything other than that break?
1413 std::pair<const MachineOperand *, int>
1414 SIFoldOperands::isOMod(const MachineInstr &MI) const {
1415   unsigned Op = MI.getOpcode();
1416   switch (Op) {
1417   case AMDGPU::V_MUL_F64_e64:
1418   case AMDGPU::V_MUL_F32_e64:
1419   case AMDGPU::V_MUL_F16_e64: {
1420     // If output denormals are enabled, omod is ignored.
1421     if ((Op == AMDGPU::V_MUL_F32_e64 && MFI->getMode().FP32OutputDenormals) ||
1422         ((Op == AMDGPU::V_MUL_F64_e64 || Op == AMDGPU::V_MUL_F16_e64) &&
1423          MFI->getMode().FP64FP16OutputDenormals))
1424       return std::make_pair(nullptr, SIOutMods::NONE);
1425 
1426     const MachineOperand *RegOp = nullptr;
1427     const MachineOperand *ImmOp = nullptr;
1428     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1429     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1430     if (Src0->isImm()) {
1431       ImmOp = Src0;
1432       RegOp = Src1;
1433     } else if (Src1->isImm()) {
1434       ImmOp = Src1;
1435       RegOp = Src0;
1436     } else
1437       return std::make_pair(nullptr, SIOutMods::NONE);
1438 
1439     int OMod = getOModValue(Op, ImmOp->getImm());
1440     if (OMod == SIOutMods::NONE ||
1441         TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
1442         TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
1443         TII->hasModifiersSet(MI, AMDGPU::OpName::omod) ||
1444         TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
1445       return std::make_pair(nullptr, SIOutMods::NONE);
1446 
1447     return std::make_pair(RegOp, OMod);
1448   }
1449   case AMDGPU::V_ADD_F64_e64:
1450   case AMDGPU::V_ADD_F32_e64:
1451   case AMDGPU::V_ADD_F16_e64: {
1452     // If output denormals are enabled, omod is ignored.
1453     if ((Op == AMDGPU::V_ADD_F32_e64 && MFI->getMode().FP32OutputDenormals) ||
1454         ((Op == AMDGPU::V_ADD_F64_e64 || Op == AMDGPU::V_ADD_F16_e64) &&
1455          MFI->getMode().FP64FP16OutputDenormals))
1456       return std::make_pair(nullptr, SIOutMods::NONE);
1457 
1458     // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x
1459     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1460     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1461 
1462     if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() &&
1463         Src0->getSubReg() == Src1->getSubReg() &&
1464         !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) &&
1465         !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) &&
1466         !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
1467         !TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1468       return std::make_pair(Src0, SIOutMods::MUL2);
1469 
1470     return std::make_pair(nullptr, SIOutMods::NONE);
1471   }
1472   default:
1473     return std::make_pair(nullptr, SIOutMods::NONE);
1474   }
1475 }
1476 
1477 // FIXME: Does this need to check IEEE bit on function?
1478 bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) {
1479   const MachineOperand *RegOp;
1480   int OMod;
1481   std::tie(RegOp, OMod) = isOMod(MI);
1482   if (OMod == SIOutMods::NONE || !RegOp->isReg() ||
1483       RegOp->getSubReg() != AMDGPU::NoSubRegister ||
1484       !MRI->hasOneNonDBGUser(RegOp->getReg()))
1485     return false;
1486 
1487   MachineInstr *Def = MRI->getVRegDef(RegOp->getReg());
1488   MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod);
1489   if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE)
1490     return false;
1491 
1492   // Clamp is applied after omod. If the source already has clamp set, don't
1493   // fold it.
1494   if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp))
1495     return false;
1496 
1497   LLVM_DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def);
1498 
1499   DefOMod->setImm(OMod);
1500   MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1501   MI.eraseFromParent();
1502 
1503   // Use of output modifiers forces VOP3 encoding for a VOP2 mac/fmac
1504   // instruction, so we might as well convert it to the more flexible VOP3-only
1505   // mad/fma form.
1506   if (TII->convertToThreeAddress(*Def, nullptr, nullptr))
1507     Def->eraseFromParent();
1508 
1509   return true;
1510 }
1511 
1512 // Try to fold a reg_sequence with vgpr output and agpr inputs into an
1513 // instruction which can take an agpr. So far that means a store.
1514 bool SIFoldOperands::tryFoldRegSequence(MachineInstr &MI) {
1515   assert(MI.isRegSequence());
1516   auto Reg = MI.getOperand(0).getReg();
1517 
1518   if (!ST->hasGFX90AInsts() || !TRI->isVGPR(*MRI, Reg) ||
1519       !MRI->hasOneNonDBGUse(Reg))
1520     return false;
1521 
1522   SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs;
1523   if (!getRegSeqInit(Defs, Reg, MCOI::OPERAND_REGISTER, TII, *MRI))
1524     return false;
1525 
1526   for (auto &Def : Defs) {
1527     const auto *Op = Def.first;
1528     if (!Op->isReg())
1529       return false;
1530     if (TRI->isAGPR(*MRI, Op->getReg()))
1531       continue;
1532     // Maybe this is a COPY from AREG
1533     const MachineInstr *SubDef = MRI->getVRegDef(Op->getReg());
1534     if (!SubDef || !SubDef->isCopy() || SubDef->getOperand(1).getSubReg())
1535       return false;
1536     if (!TRI->isAGPR(*MRI, SubDef->getOperand(1).getReg()))
1537       return false;
1538   }
1539 
1540   MachineOperand *Op = &*MRI->use_nodbg_begin(Reg);
1541   MachineInstr *UseMI = Op->getParent();
1542   while (UseMI->isCopy() && !Op->getSubReg()) {
1543     Reg = UseMI->getOperand(0).getReg();
1544     if (!TRI->isVGPR(*MRI, Reg) || !MRI->hasOneNonDBGUse(Reg))
1545       return false;
1546     Op = &*MRI->use_nodbg_begin(Reg);
1547     UseMI = Op->getParent();
1548   }
1549 
1550   if (Op->getSubReg())
1551     return false;
1552 
1553   unsigned OpIdx = Op - &UseMI->getOperand(0);
1554   const MCInstrDesc &InstDesc = UseMI->getDesc();
1555   const TargetRegisterClass *OpRC =
1556       TII->getRegClass(InstDesc, OpIdx, TRI, *MI.getMF());
1557   if (!OpRC || !TRI->isVectorSuperClass(OpRC))
1558     return false;
1559 
1560   const auto *NewDstRC = TRI->getEquivalentAGPRClass(MRI->getRegClass(Reg));
1561   auto Dst = MRI->createVirtualRegister(NewDstRC);
1562   auto RS = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1563                     TII->get(AMDGPU::REG_SEQUENCE), Dst);
1564 
1565   for (unsigned I = 0; I < Defs.size(); ++I) {
1566     MachineOperand *Def = Defs[I].first;
1567     Def->setIsKill(false);
1568     if (TRI->isAGPR(*MRI, Def->getReg())) {
1569       RS.add(*Def);
1570     } else { // This is a copy
1571       MachineInstr *SubDef = MRI->getVRegDef(Def->getReg());
1572       SubDef->getOperand(1).setIsKill(false);
1573       RS.addReg(SubDef->getOperand(1).getReg(), 0, Def->getSubReg());
1574     }
1575     RS.addImm(Defs[I].second);
1576   }
1577 
1578   Op->setReg(Dst);
1579   if (!TII->isOperandLegal(*UseMI, OpIdx, Op)) {
1580     Op->setReg(Reg);
1581     RS->eraseFromParent();
1582     return false;
1583   }
1584 
1585   LLVM_DEBUG(dbgs() << "Folded " << *RS << " into " << *UseMI);
1586 
1587   // Erase the REG_SEQUENCE eagerly, unless we followed a chain of COPY users,
1588   // in which case we can erase them all later in runOnMachineFunction.
1589   if (MRI->use_nodbg_empty(MI.getOperand(0).getReg()))
1590     MI.eraseFromParent();
1591   return true;
1592 }
1593 
1594 // Try to hoist an AGPR to VGPR copy out of the loop across a LCSSA PHI.
1595 // This should allow folding of an AGPR into a consumer which may support it.
1596 // I.e.:
1597 //
1598 // loop:                             // loop:
1599 //   %1:vreg = COPY %0:areg          // exit:
1600 // exit:                          => //   %1:areg = PHI %0:areg, %loop
1601 //   %2:vreg = PHI %1:vreg, %loop    //   %2:vreg = COPY %1:areg
1602 bool SIFoldOperands::tryFoldLCSSAPhi(MachineInstr &PHI) {
1603   assert(PHI.isPHI());
1604 
1605   if (PHI.getNumExplicitOperands() != 3) // Single input LCSSA PHI
1606     return false;
1607 
1608   Register PhiIn = PHI.getOperand(1).getReg();
1609   Register PhiOut = PHI.getOperand(0).getReg();
1610   if (PHI.getOperand(1).getSubReg() ||
1611       !TRI->isVGPR(*MRI, PhiIn) || !TRI->isVGPR(*MRI, PhiOut))
1612     return false;
1613 
1614   // A single use should not matter for correctness, but if it has another use
1615   // inside the loop we may perform copy twice in a worst case.
1616   if (!MRI->hasOneNonDBGUse(PhiIn))
1617     return false;
1618 
1619   MachineInstr *Copy = MRI->getVRegDef(PhiIn);
1620   if (!Copy || !Copy->isCopy())
1621     return false;
1622 
1623   Register CopyIn = Copy->getOperand(1).getReg();
1624   if (!TRI->isAGPR(*MRI, CopyIn) || Copy->getOperand(1).getSubReg())
1625     return false;
1626 
1627   const TargetRegisterClass *ARC = MRI->getRegClass(CopyIn);
1628   Register NewReg = MRI->createVirtualRegister(ARC);
1629   PHI.getOperand(1).setReg(CopyIn);
1630   PHI.getOperand(0).setReg(NewReg);
1631 
1632   MachineBasicBlock *MBB = PHI.getParent();
1633   BuildMI(*MBB, MBB->getFirstNonPHI(), Copy->getDebugLoc(),
1634           TII->get(AMDGPU::COPY), PhiOut)
1635     .addReg(NewReg, RegState::Kill);
1636   Copy->eraseFromParent(); // We know this copy had a single use.
1637 
1638   LLVM_DEBUG(dbgs() << "Folded " << PHI);
1639 
1640   return true;
1641 }
1642 
1643 // Attempt to convert VGPR load to an AGPR load.
1644 bool SIFoldOperands::tryFoldLoad(MachineInstr &MI) {
1645   assert(MI.mayLoad());
1646   if (!ST->hasGFX90AInsts() || MI.getNumExplicitDefs() != 1)
1647     return false;
1648 
1649   MachineOperand &Def = MI.getOperand(0);
1650   if (!Def.isDef())
1651     return false;
1652 
1653   Register DefReg = Def.getReg();
1654 
1655   if (DefReg.isPhysical() || !TRI->isVGPR(*MRI, DefReg))
1656     return false;
1657 
1658   SmallVector<const MachineInstr*, 8> Users;
1659   SmallVector<Register, 8> MoveRegs;
1660   for (const MachineInstr &I : MRI->use_nodbg_instructions(DefReg)) {
1661     Users.push_back(&I);
1662   }
1663   if (Users.empty())
1664     return false;
1665 
1666   // Check that all uses a copy to an agpr or a reg_sequence producing an agpr.
1667   while (!Users.empty()) {
1668     const MachineInstr *I = Users.pop_back_val();
1669     if (!I->isCopy() && !I->isRegSequence())
1670       return false;
1671     Register DstReg = I->getOperand(0).getReg();
1672     if (TRI->isAGPR(*MRI, DstReg))
1673       continue;
1674     MoveRegs.push_back(DstReg);
1675     for (const MachineInstr &U : MRI->use_nodbg_instructions(DstReg)) {
1676       Users.push_back(&U);
1677     }
1678   }
1679 
1680   const TargetRegisterClass *RC = MRI->getRegClass(DefReg);
1681   MRI->setRegClass(DefReg, TRI->getEquivalentAGPRClass(RC));
1682   if (!TII->isOperandLegal(MI, 0, &Def)) {
1683     MRI->setRegClass(DefReg, RC);
1684     return false;
1685   }
1686 
1687   while (!MoveRegs.empty()) {
1688     Register Reg = MoveRegs.pop_back_val();
1689     MRI->setRegClass(Reg, TRI->getEquivalentAGPRClass(MRI->getRegClass(Reg)));
1690   }
1691 
1692   LLVM_DEBUG(dbgs() << "Folded " << MI);
1693 
1694   return true;
1695 }
1696 
1697 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
1698   if (skipFunction(MF.getFunction()))
1699     return false;
1700 
1701   MRI = &MF.getRegInfo();
1702   ST = &MF.getSubtarget<GCNSubtarget>();
1703   TII = ST->getInstrInfo();
1704   TRI = &TII->getRegisterInfo();
1705   MFI = MF.getInfo<SIMachineFunctionInfo>();
1706 
1707   // omod is ignored by hardware if IEEE bit is enabled. omod also does not
1708   // correctly handle signed zeros.
1709   //
1710   // FIXME: Also need to check strictfp
1711   bool IsIEEEMode = MFI->getMode().IEEE;
1712   bool HasNSZ = MFI->hasNoSignedZerosFPMath();
1713 
1714   bool Changed = false;
1715   for (MachineBasicBlock *MBB : depth_first(&MF)) {
1716     MachineOperand *CurrentKnownM0Val = nullptr;
1717     for (auto &MI : make_early_inc_range(*MBB)) {
1718       Changed |= tryFoldCndMask(MI);
1719 
1720       if (tryFoldZeroHighBits(MI)) {
1721         Changed = true;
1722         continue;
1723       }
1724 
1725       if (MI.isRegSequence() && tryFoldRegSequence(MI)) {
1726         Changed = true;
1727         continue;
1728       }
1729 
1730       if (MI.isPHI() && tryFoldLCSSAPhi(MI)) {
1731         Changed = true;
1732         continue;
1733       }
1734 
1735       if (MI.mayLoad() && tryFoldLoad(MI)) {
1736         Changed = true;
1737         continue;
1738       }
1739 
1740       if (!TII->isFoldableCopy(MI)) {
1741         // Saw an unknown clobber of m0, so we no longer know what it is.
1742         if (CurrentKnownM0Val && MI.modifiesRegister(AMDGPU::M0, TRI))
1743           CurrentKnownM0Val = nullptr;
1744 
1745         // TODO: Omod might be OK if there is NSZ only on the source
1746         // instruction, and not the omod multiply.
1747         if (IsIEEEMode || (!HasNSZ && !MI.getFlag(MachineInstr::FmNsz)) ||
1748             !tryFoldOMod(MI))
1749           Changed |= tryFoldClamp(MI);
1750 
1751         continue;
1752       }
1753 
1754       // Specially track simple redefs of m0 to the same value in a block, so we
1755       // can erase the later ones.
1756       if (MI.getOperand(0).getReg() == AMDGPU::M0) {
1757         MachineOperand &NewM0Val = MI.getOperand(1);
1758         if (CurrentKnownM0Val && CurrentKnownM0Val->isIdenticalTo(NewM0Val)) {
1759           MI.eraseFromParent();
1760           Changed = true;
1761           continue;
1762         }
1763 
1764         // We aren't tracking other physical registers
1765         CurrentKnownM0Val = (NewM0Val.isReg() && NewM0Val.getReg().isPhysical()) ?
1766           nullptr : &NewM0Val;
1767         continue;
1768       }
1769 
1770       MachineOperand &OpToFold = MI.getOperand(1);
1771       bool FoldingImm =
1772           OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
1773 
1774       // FIXME: We could also be folding things like TargetIndexes.
1775       if (!FoldingImm && !OpToFold.isReg())
1776         continue;
1777 
1778       if (OpToFold.isReg() && !OpToFold.getReg().isVirtual())
1779         continue;
1780 
1781       // Prevent folding operands backwards in the function. For example,
1782       // the COPY opcode must not be replaced by 1 in this example:
1783       //
1784       //    %3 = COPY %vgpr0; VGPR_32:%3
1785       //    ...
1786       //    %vgpr0 = V_MOV_B32_e32 1, implicit %exec
1787       if (!MI.getOperand(0).getReg().isVirtual())
1788         continue;
1789 
1790       Changed |= foldInstOperand(MI, OpToFold);
1791 
1792       // If we managed to fold all uses of this copy then we might as well
1793       // delete it now.
1794       // The only reason we need to follow chains of copies here is that
1795       // tryFoldRegSequence looks forward through copies before folding a
1796       // REG_SEQUENCE into its eventual users.
1797       auto *InstToErase = &MI;
1798       while (MRI->use_nodbg_empty(InstToErase->getOperand(0).getReg())) {
1799         auto &SrcOp = InstToErase->getOperand(1);
1800         auto SrcReg = SrcOp.isReg() ? SrcOp.getReg() : Register();
1801         InstToErase->eraseFromParent();
1802         Changed = true;
1803         InstToErase = nullptr;
1804         if (!SrcReg || SrcReg.isPhysical())
1805           break;
1806         InstToErase = MRI->getVRegDef(SrcReg);
1807         if (!InstToErase || !TII->isFoldableCopy(*InstToErase))
1808           break;
1809       }
1810       if (InstToErase && InstToErase->isRegSequence() &&
1811           MRI->use_nodbg_empty(InstToErase->getOperand(0).getReg())) {
1812         InstToErase->eraseFromParent();
1813         Changed = true;
1814       }
1815     }
1816   }
1817   return Changed;
1818 }
1819