10b57cec5SDimitry Andric //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
20b57cec5SDimitry Andric //
30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
60b57cec5SDimitry Andric //
70b57cec5SDimitry Andric /// \file
80b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
90b57cec5SDimitry Andric //
100b57cec5SDimitry Andric 
110b57cec5SDimitry Andric #include "AMDGPU.h"
12e8d8bef9SDimitry Andric #include "GCNSubtarget.h"
130b57cec5SDimitry Andric #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
14e8d8bef9SDimitry Andric #include "SIMachineFunctionInfo.h"
150b57cec5SDimitry Andric #include "llvm/ADT/DepthFirstIterator.h"
160b57cec5SDimitry Andric #include "llvm/CodeGen/MachineFunctionPass.h"
17bdd1243dSDimitry Andric #include "llvm/CodeGen/MachineOperand.h"
180b57cec5SDimitry Andric 
190b57cec5SDimitry Andric #define DEBUG_TYPE "si-fold-operands"
200b57cec5SDimitry Andric using namespace llvm;
210b57cec5SDimitry Andric 
220b57cec5SDimitry Andric namespace {
230b57cec5SDimitry Andric 
240b57cec5SDimitry Andric struct FoldCandidate {
250b57cec5SDimitry Andric   MachineInstr *UseMI;
260b57cec5SDimitry Andric   union {
270b57cec5SDimitry Andric     MachineOperand *OpToFold;
280b57cec5SDimitry Andric     uint64_t ImmToFold;
290b57cec5SDimitry Andric     int FrameIndexToFold;
300b57cec5SDimitry Andric   };
310b57cec5SDimitry Andric   int ShrinkOpcode;
32e8d8bef9SDimitry Andric   unsigned UseOpNo;
330b57cec5SDimitry Andric   MachineOperand::MachineOperandType Kind;
340b57cec5SDimitry Andric   bool Commuted;
350b57cec5SDimitry Andric 
FoldCandidate__anon4fb005380111::FoldCandidate360b57cec5SDimitry Andric   FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp,
370b57cec5SDimitry Andric                 bool Commuted_ = false,
380b57cec5SDimitry Andric                 int ShrinkOp = -1) :
390b57cec5SDimitry Andric     UseMI(MI), OpToFold(nullptr), ShrinkOpcode(ShrinkOp), UseOpNo(OpNo),
400b57cec5SDimitry Andric     Kind(FoldOp->getType()),
410b57cec5SDimitry Andric     Commuted(Commuted_) {
420b57cec5SDimitry Andric     if (FoldOp->isImm()) {
430b57cec5SDimitry Andric       ImmToFold = FoldOp->getImm();
440b57cec5SDimitry Andric     } else if (FoldOp->isFI()) {
450b57cec5SDimitry Andric       FrameIndexToFold = FoldOp->getIndex();
460b57cec5SDimitry Andric     } else {
470b57cec5SDimitry Andric       assert(FoldOp->isReg() || FoldOp->isGlobal());
480b57cec5SDimitry Andric       OpToFold = FoldOp;
490b57cec5SDimitry Andric     }
500b57cec5SDimitry Andric   }
510b57cec5SDimitry Andric 
isFI__anon4fb005380111::FoldCandidate520b57cec5SDimitry Andric   bool isFI() const {
530b57cec5SDimitry Andric     return Kind == MachineOperand::MO_FrameIndex;
540b57cec5SDimitry Andric   }
550b57cec5SDimitry Andric 
isImm__anon4fb005380111::FoldCandidate560b57cec5SDimitry Andric   bool isImm() const {
570b57cec5SDimitry Andric     return Kind == MachineOperand::MO_Immediate;
580b57cec5SDimitry Andric   }
590b57cec5SDimitry Andric 
isReg__anon4fb005380111::FoldCandidate600b57cec5SDimitry Andric   bool isReg() const {
610b57cec5SDimitry Andric     return Kind == MachineOperand::MO_Register;
620b57cec5SDimitry Andric   }
630b57cec5SDimitry Andric 
isGlobal__anon4fb005380111::FoldCandidate640b57cec5SDimitry Andric   bool isGlobal() const { return Kind == MachineOperand::MO_GlobalAddress; }
650b57cec5SDimitry Andric 
needsShrink__anon4fb005380111::FoldCandidate66bdd1243dSDimitry Andric   bool needsShrink() const { return ShrinkOpcode != -1; }
670b57cec5SDimitry Andric };
680b57cec5SDimitry Andric 
690b57cec5SDimitry Andric class SIFoldOperands : public MachineFunctionPass {
700b57cec5SDimitry Andric public:
710b57cec5SDimitry Andric   static char ID;
720b57cec5SDimitry Andric   MachineRegisterInfo *MRI;
730b57cec5SDimitry Andric   const SIInstrInfo *TII;
740b57cec5SDimitry Andric   const SIRegisterInfo *TRI;
750b57cec5SDimitry Andric   const GCNSubtarget *ST;
760b57cec5SDimitry Andric   const SIMachineFunctionInfo *MFI;
770b57cec5SDimitry Andric 
78bdd1243dSDimitry Andric   bool frameIndexMayFold(const MachineInstr &UseMI, int OpNo,
79bdd1243dSDimitry Andric                          const MachineOperand &OpToFold) const;
80bdd1243dSDimitry Andric 
81bdd1243dSDimitry Andric   bool updateOperand(FoldCandidate &Fold) const;
82bdd1243dSDimitry Andric 
835f757f3fSDimitry Andric   bool canUseImmWithOpSel(FoldCandidate &Fold) const;
845f757f3fSDimitry Andric 
855f757f3fSDimitry Andric   bool tryFoldImmWithOpSel(FoldCandidate &Fold) const;
865f757f3fSDimitry Andric 
87bdd1243dSDimitry Andric   bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
88bdd1243dSDimitry Andric                         MachineInstr *MI, unsigned OpNo,
89bdd1243dSDimitry Andric                         MachineOperand *OpToFold) const;
90bdd1243dSDimitry Andric   bool isUseSafeToFold(const MachineInstr &MI,
91bdd1243dSDimitry Andric                        const MachineOperand &UseMO) const;
92bdd1243dSDimitry Andric   bool
93bdd1243dSDimitry Andric   getRegSeqInit(SmallVectorImpl<std::pair<MachineOperand *, unsigned>> &Defs,
94bdd1243dSDimitry Andric                 Register UseReg, uint8_t OpTy) const;
95bdd1243dSDimitry Andric   bool tryToFoldACImm(const MachineOperand &OpToFold, MachineInstr *UseMI,
96bdd1243dSDimitry Andric                       unsigned UseOpIdx,
97bdd1243dSDimitry Andric                       SmallVectorImpl<FoldCandidate> &FoldList) const;
980b57cec5SDimitry Andric   void foldOperand(MachineOperand &OpToFold,
990b57cec5SDimitry Andric                    MachineInstr *UseMI,
1000b57cec5SDimitry Andric                    int UseOpIdx,
1010b57cec5SDimitry Andric                    SmallVectorImpl<FoldCandidate> &FoldList,
1020b57cec5SDimitry Andric                    SmallVectorImpl<MachineInstr *> &CopiesToReplace) const;
1030b57cec5SDimitry Andric 
104bdd1243dSDimitry Andric   MachineOperand *getImmOrMaterializedImm(MachineOperand &Op) const;
105bdd1243dSDimitry Andric   bool tryConstantFoldOp(MachineInstr *MI) const;
106fe6060f1SDimitry Andric   bool tryFoldCndMask(MachineInstr &MI) const;
107fe6060f1SDimitry Andric   bool tryFoldZeroHighBits(MachineInstr &MI) const;
10881ad6265SDimitry Andric   bool foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const;
109bdd1243dSDimitry Andric   bool tryFoldFoldableCopy(MachineInstr &MI,
110bdd1243dSDimitry Andric                            MachineOperand *&CurrentKnownM0Val) const;
1110b57cec5SDimitry Andric 
1120b57cec5SDimitry Andric   const MachineOperand *isClamp(const MachineInstr &MI) const;
1130b57cec5SDimitry Andric   bool tryFoldClamp(MachineInstr &MI);
1140b57cec5SDimitry Andric 
1150b57cec5SDimitry Andric   std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const;
1160b57cec5SDimitry Andric   bool tryFoldOMod(MachineInstr &MI);
117fe6060f1SDimitry Andric   bool tryFoldRegSequence(MachineInstr &MI);
11806c3fb27SDimitry Andric   bool tryFoldPhiAGPR(MachineInstr &MI);
119fe6060f1SDimitry Andric   bool tryFoldLoad(MachineInstr &MI);
1200b57cec5SDimitry Andric 
12106c3fb27SDimitry Andric   bool tryOptimizeAGPRPhis(MachineBasicBlock &MBB);
12206c3fb27SDimitry Andric 
1230b57cec5SDimitry Andric public:
SIFoldOperands()1240b57cec5SDimitry Andric   SIFoldOperands() : MachineFunctionPass(ID) {
1250b57cec5SDimitry Andric     initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
1260b57cec5SDimitry Andric   }
1270b57cec5SDimitry Andric 
1280b57cec5SDimitry Andric   bool runOnMachineFunction(MachineFunction &MF) override;
1290b57cec5SDimitry Andric 
getPassName() const1300b57cec5SDimitry Andric   StringRef getPassName() const override { return "SI Fold Operands"; }
1310b57cec5SDimitry Andric 
getAnalysisUsage(AnalysisUsage & AU) const1320b57cec5SDimitry Andric   void getAnalysisUsage(AnalysisUsage &AU) const override {
1330b57cec5SDimitry Andric     AU.setPreservesCFG();
1340b57cec5SDimitry Andric     MachineFunctionPass::getAnalysisUsage(AU);
1350b57cec5SDimitry Andric   }
1360b57cec5SDimitry Andric };
1370b57cec5SDimitry Andric 
1380b57cec5SDimitry Andric } // End anonymous namespace.
1390b57cec5SDimitry Andric 
1400b57cec5SDimitry Andric INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
1410b57cec5SDimitry Andric                 "SI Fold Operands", false, false)
1420b57cec5SDimitry Andric 
1430b57cec5SDimitry Andric char SIFoldOperands::ID = 0;
1440b57cec5SDimitry Andric 
1450b57cec5SDimitry Andric char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
1460b57cec5SDimitry Andric 
getRegOpRC(const MachineRegisterInfo & MRI,const TargetRegisterInfo & TRI,const MachineOperand & MO)14706c3fb27SDimitry Andric static const TargetRegisterClass *getRegOpRC(const MachineRegisterInfo &MRI,
14806c3fb27SDimitry Andric                                              const TargetRegisterInfo &TRI,
14906c3fb27SDimitry Andric                                              const MachineOperand &MO) {
15006c3fb27SDimitry Andric   const TargetRegisterClass *RC = MRI.getRegClass(MO.getReg());
15106c3fb27SDimitry Andric   if (const TargetRegisterClass *SubRC =
15206c3fb27SDimitry Andric           TRI.getSubRegisterClass(RC, MO.getSubReg()))
15306c3fb27SDimitry Andric     RC = SubRC;
15406c3fb27SDimitry Andric   return RC;
15506c3fb27SDimitry Andric }
15606c3fb27SDimitry Andric 
157e8d8bef9SDimitry Andric // Map multiply-accumulate opcode to corresponding multiply-add opcode if any.
macToMad(unsigned Opc)158e8d8bef9SDimitry Andric static unsigned macToMad(unsigned Opc) {
159e8d8bef9SDimitry Andric   switch (Opc) {
160e8d8bef9SDimitry Andric   case AMDGPU::V_MAC_F32_e64:
161e8d8bef9SDimitry Andric     return AMDGPU::V_MAD_F32_e64;
162e8d8bef9SDimitry Andric   case AMDGPU::V_MAC_F16_e64:
163e8d8bef9SDimitry Andric     return AMDGPU::V_MAD_F16_e64;
164e8d8bef9SDimitry Andric   case AMDGPU::V_FMAC_F32_e64:
165e8d8bef9SDimitry Andric     return AMDGPU::V_FMA_F32_e64;
166e8d8bef9SDimitry Andric   case AMDGPU::V_FMAC_F16_e64:
167e8d8bef9SDimitry Andric     return AMDGPU::V_FMA_F16_gfx9_e64;
168bdd1243dSDimitry Andric   case AMDGPU::V_FMAC_F16_t16_e64:
169bdd1243dSDimitry Andric     return AMDGPU::V_FMA_F16_gfx9_e64;
170e8d8bef9SDimitry Andric   case AMDGPU::V_FMAC_LEGACY_F32_e64:
171e8d8bef9SDimitry Andric     return AMDGPU::V_FMA_LEGACY_F32_e64;
172fe6060f1SDimitry Andric   case AMDGPU::V_FMAC_F64_e64:
173fe6060f1SDimitry Andric     return AMDGPU::V_FMA_F64_e64;
174e8d8bef9SDimitry Andric   }
175e8d8bef9SDimitry Andric   return AMDGPU::INSTRUCTION_LIST_END;
176e8d8bef9SDimitry Andric }
177e8d8bef9SDimitry Andric 
1780b57cec5SDimitry Andric // TODO: Add heuristic that the frame index might not fit in the addressing mode
1790b57cec5SDimitry Andric // immediate offset to avoid materializing in loops.
frameIndexMayFold(const MachineInstr & UseMI,int OpNo,const MachineOperand & OpToFold) const180bdd1243dSDimitry Andric bool SIFoldOperands::frameIndexMayFold(const MachineInstr &UseMI, int OpNo,
181bdd1243dSDimitry Andric                                        const MachineOperand &OpToFold) const {
182e8d8bef9SDimitry Andric   if (!OpToFold.isFI())
183e8d8bef9SDimitry Andric     return false;
184e8d8bef9SDimitry Andric 
185bdd1243dSDimitry Andric   const unsigned Opc = UseMI.getOpcode();
186e8d8bef9SDimitry Andric   if (TII->isMUBUF(UseMI))
187bdd1243dSDimitry Andric     return OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr);
188e8d8bef9SDimitry Andric   if (!TII->isFLATScratch(UseMI))
189e8d8bef9SDimitry Andric     return false;
190e8d8bef9SDimitry Andric 
191bdd1243dSDimitry Andric   int SIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::saddr);
192e8d8bef9SDimitry Andric   if (OpNo == SIdx)
193e8d8bef9SDimitry Andric     return true;
194e8d8bef9SDimitry Andric 
195bdd1243dSDimitry Andric   int VIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr);
196e8d8bef9SDimitry Andric   return OpNo == VIdx && SIdx == -1;
1970b57cec5SDimitry Andric }
1980b57cec5SDimitry Andric 
createSIFoldOperandsPass()1990b57cec5SDimitry Andric FunctionPass *llvm::createSIFoldOperandsPass() {
2000b57cec5SDimitry Andric   return new SIFoldOperands();
2010b57cec5SDimitry Andric }
2020b57cec5SDimitry Andric 
canUseImmWithOpSel(FoldCandidate & Fold) const2035f757f3fSDimitry Andric bool SIFoldOperands::canUseImmWithOpSel(FoldCandidate &Fold) const {
2040b57cec5SDimitry Andric   MachineInstr *MI = Fold.UseMI;
2050b57cec5SDimitry Andric   MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
206bdd1243dSDimitry Andric   const uint64_t TSFlags = MI->getDesc().TSFlags;
2075f757f3fSDimitry Andric 
2085f757f3fSDimitry Andric   assert(Old.isReg() && Fold.isImm());
2095f757f3fSDimitry Andric 
2105f757f3fSDimitry Andric   if (!(TSFlags & SIInstrFlags::IsPacked) || (TSFlags & SIInstrFlags::IsMAI) ||
211b3edf446SDimitry Andric       (TSFlags & SIInstrFlags::IsWMMA) || (TSFlags & SIInstrFlags::IsSWMMAC) ||
2121db9f3b2SDimitry Andric       (ST->hasDOTOpSelHazard() && (TSFlags & SIInstrFlags::IsDOT)))
2135f757f3fSDimitry Andric     return false;
2145f757f3fSDimitry Andric 
2150b57cec5SDimitry Andric   unsigned Opcode = MI->getOpcode();
2160b57cec5SDimitry Andric   int OpNo = MI->getOperandNo(&Old);
2175f757f3fSDimitry Andric   uint8_t OpType = TII->get(Opcode).operands()[OpNo].OperandType;
2185f757f3fSDimitry Andric   switch (OpType) {
2195f757f3fSDimitry Andric   default:
2205f757f3fSDimitry Andric     return false;
2215f757f3fSDimitry Andric   case AMDGPU::OPERAND_REG_IMM_V2FP16:
2225f757f3fSDimitry Andric   case AMDGPU::OPERAND_REG_IMM_V2INT16:
2235f757f3fSDimitry Andric   case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
2245f757f3fSDimitry Andric   case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
2255f757f3fSDimitry Andric     break;
2265f757f3fSDimitry Andric   }
2275f757f3fSDimitry Andric 
2285f757f3fSDimitry Andric   return true;
2295f757f3fSDimitry Andric }
2305f757f3fSDimitry Andric 
tryFoldImmWithOpSel(FoldCandidate & Fold) const2315f757f3fSDimitry Andric bool SIFoldOperands::tryFoldImmWithOpSel(FoldCandidate &Fold) const {
2325f757f3fSDimitry Andric   MachineInstr *MI = Fold.UseMI;
2335f757f3fSDimitry Andric   MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
2345f757f3fSDimitry Andric   unsigned Opcode = MI->getOpcode();
2355f757f3fSDimitry Andric   int OpNo = MI->getOperandNo(&Old);
2361db9f3b2SDimitry Andric   uint8_t OpType = TII->get(Opcode).operands()[OpNo].OperandType;
2375f757f3fSDimitry Andric 
2381db9f3b2SDimitry Andric   // If the literal can be inlined as-is, apply it and short-circuit the
2391db9f3b2SDimitry Andric   // tests below. The main motivation for this is to avoid unintuitive
2401db9f3b2SDimitry Andric   // uses of opsel.
2411db9f3b2SDimitry Andric   if (AMDGPU::isInlinableLiteralV216(Fold.ImmToFold, OpType)) {
2421db9f3b2SDimitry Andric     Old.ChangeToImmediate(Fold.ImmToFold);
2431db9f3b2SDimitry Andric     return true;
2441db9f3b2SDimitry Andric   }
2451db9f3b2SDimitry Andric 
2461db9f3b2SDimitry Andric   // Refer to op_sel/op_sel_hi and check if we can change the immediate and
2471db9f3b2SDimitry Andric   // op_sel in a way that allows an inline constant.
2480b57cec5SDimitry Andric   int ModIdx = -1;
2491db9f3b2SDimitry Andric   unsigned SrcIdx = ~0;
2501db9f3b2SDimitry Andric   if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0)) {
2510b57cec5SDimitry Andric     ModIdx = AMDGPU::OpName::src0_modifiers;
2521db9f3b2SDimitry Andric     SrcIdx = 0;
2531db9f3b2SDimitry Andric   } else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1)) {
2540b57cec5SDimitry Andric     ModIdx = AMDGPU::OpName::src1_modifiers;
2551db9f3b2SDimitry Andric     SrcIdx = 1;
2561db9f3b2SDimitry Andric   } else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2)) {
2570b57cec5SDimitry Andric     ModIdx = AMDGPU::OpName::src2_modifiers;
2581db9f3b2SDimitry Andric     SrcIdx = 2;
2591db9f3b2SDimitry Andric   }
2600b57cec5SDimitry Andric   assert(ModIdx != -1);
2610b57cec5SDimitry Andric   ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx);
2620b57cec5SDimitry Andric   MachineOperand &Mod = MI->getOperand(ModIdx);
2631db9f3b2SDimitry Andric   unsigned ModVal = Mod.getImm();
2645f757f3fSDimitry Andric 
2651db9f3b2SDimitry Andric   uint16_t ImmLo = static_cast<uint16_t>(
2661db9f3b2SDimitry Andric       Fold.ImmToFold >> (ModVal & SISrcMods::OP_SEL_0 ? 16 : 0));
2671db9f3b2SDimitry Andric   uint16_t ImmHi = static_cast<uint16_t>(
2681db9f3b2SDimitry Andric       Fold.ImmToFold >> (ModVal & SISrcMods::OP_SEL_1 ? 16 : 0));
2691db9f3b2SDimitry Andric   uint32_t Imm = (static_cast<uint32_t>(ImmHi) << 16) | ImmLo;
2701db9f3b2SDimitry Andric   unsigned NewModVal = ModVal & ~(SISrcMods::OP_SEL_0 | SISrcMods::OP_SEL_1);
2711db9f3b2SDimitry Andric 
2721db9f3b2SDimitry Andric   // Helper function that attempts to inline the given value with a newly
2731db9f3b2SDimitry Andric   // chosen opsel pattern.
2741db9f3b2SDimitry Andric   auto tryFoldToInline = [&](uint32_t Imm) -> bool {
2751db9f3b2SDimitry Andric     if (AMDGPU::isInlinableLiteralV216(Imm, OpType)) {
2761db9f3b2SDimitry Andric       Mod.setImm(NewModVal | SISrcMods::OP_SEL_1);
2771db9f3b2SDimitry Andric       Old.ChangeToImmediate(Imm);
2780b57cec5SDimitry Andric       return true;
2790b57cec5SDimitry Andric     }
2801db9f3b2SDimitry Andric 
2811db9f3b2SDimitry Andric     // Try to shuffle the halves around and leverage opsel to get an inline
2821db9f3b2SDimitry Andric     // constant.
2831db9f3b2SDimitry Andric     uint16_t Lo = static_cast<uint16_t>(Imm);
2841db9f3b2SDimitry Andric     uint16_t Hi = static_cast<uint16_t>(Imm >> 16);
2851db9f3b2SDimitry Andric     if (Lo == Hi) {
2861db9f3b2SDimitry Andric       if (AMDGPU::isInlinableLiteralV216(Lo, OpType)) {
2871db9f3b2SDimitry Andric         Mod.setImm(NewModVal);
2881db9f3b2SDimitry Andric         Old.ChangeToImmediate(Lo);
2890b57cec5SDimitry Andric         return true;
2900b57cec5SDimitry Andric       }
2915f757f3fSDimitry Andric 
2921db9f3b2SDimitry Andric       if (static_cast<int16_t>(Lo) < 0) {
2931db9f3b2SDimitry Andric         int32_t SExt = static_cast<int16_t>(Lo);
2941db9f3b2SDimitry Andric         if (AMDGPU::isInlinableLiteralV216(SExt, OpType)) {
2951db9f3b2SDimitry Andric           Mod.setImm(NewModVal);
2961db9f3b2SDimitry Andric           Old.ChangeToImmediate(SExt);
2971db9f3b2SDimitry Andric           return true;
2981db9f3b2SDimitry Andric         }
2991db9f3b2SDimitry Andric       }
3001db9f3b2SDimitry Andric 
3011db9f3b2SDimitry Andric       // This check is only useful for integer instructions
3021db9f3b2SDimitry Andric       if (OpType == AMDGPU::OPERAND_REG_IMM_V2INT16 ||
3031db9f3b2SDimitry Andric           OpType == AMDGPU::OPERAND_REG_INLINE_AC_V2INT16) {
3041db9f3b2SDimitry Andric         if (AMDGPU::isInlinableLiteralV216(Lo << 16, OpType)) {
3051db9f3b2SDimitry Andric           Mod.setImm(NewModVal | SISrcMods::OP_SEL_0 | SISrcMods::OP_SEL_1);
3061db9f3b2SDimitry Andric           Old.ChangeToImmediate(static_cast<uint32_t>(Lo) << 16);
3071db9f3b2SDimitry Andric           return true;
3081db9f3b2SDimitry Andric         }
3091db9f3b2SDimitry Andric       }
3101db9f3b2SDimitry Andric     } else {
3111db9f3b2SDimitry Andric       uint32_t Swapped = (static_cast<uint32_t>(Lo) << 16) | Hi;
3121db9f3b2SDimitry Andric       if (AMDGPU::isInlinableLiteralV216(Swapped, OpType)) {
3131db9f3b2SDimitry Andric         Mod.setImm(NewModVal | SISrcMods::OP_SEL_0);
3141db9f3b2SDimitry Andric         Old.ChangeToImmediate(Swapped);
3151db9f3b2SDimitry Andric         return true;
3161db9f3b2SDimitry Andric       }
3171db9f3b2SDimitry Andric     }
3181db9f3b2SDimitry Andric 
3191db9f3b2SDimitry Andric     return false;
3201db9f3b2SDimitry Andric   };
3211db9f3b2SDimitry Andric 
3221db9f3b2SDimitry Andric   if (tryFoldToInline(Imm))
3231db9f3b2SDimitry Andric     return true;
3241db9f3b2SDimitry Andric 
3251db9f3b2SDimitry Andric   // Replace integer addition by subtraction and vice versa if it allows
3261db9f3b2SDimitry Andric   // folding the immediate to an inline constant.
3271db9f3b2SDimitry Andric   //
3281db9f3b2SDimitry Andric   // We should only ever get here for SrcIdx == 1 due to canonicalization
3291db9f3b2SDimitry Andric   // earlier in the pipeline, but we double-check here to be safe / fully
3301db9f3b2SDimitry Andric   // general.
3311db9f3b2SDimitry Andric   bool IsUAdd = Opcode == AMDGPU::V_PK_ADD_U16;
3321db9f3b2SDimitry Andric   bool IsUSub = Opcode == AMDGPU::V_PK_SUB_U16;
3331db9f3b2SDimitry Andric   if (SrcIdx == 1 && (IsUAdd || IsUSub)) {
3341db9f3b2SDimitry Andric     unsigned ClampIdx =
3351db9f3b2SDimitry Andric         AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::clamp);
3361db9f3b2SDimitry Andric     bool Clamp = MI->getOperand(ClampIdx).getImm() != 0;
3371db9f3b2SDimitry Andric 
3381db9f3b2SDimitry Andric     if (!Clamp) {
3391db9f3b2SDimitry Andric       uint16_t NegLo = -static_cast<uint16_t>(Imm);
3401db9f3b2SDimitry Andric       uint16_t NegHi = -static_cast<uint16_t>(Imm >> 16);
3411db9f3b2SDimitry Andric       uint32_t NegImm = (static_cast<uint32_t>(NegHi) << 16) | NegLo;
3421db9f3b2SDimitry Andric 
3431db9f3b2SDimitry Andric       if (tryFoldToInline(NegImm)) {
3441db9f3b2SDimitry Andric         unsigned NegOpcode =
3451db9f3b2SDimitry Andric             IsUAdd ? AMDGPU::V_PK_SUB_U16 : AMDGPU::V_PK_ADD_U16;
3461db9f3b2SDimitry Andric         MI->setDesc(TII->get(NegOpcode));
3471db9f3b2SDimitry Andric         return true;
3481db9f3b2SDimitry Andric       }
3491db9f3b2SDimitry Andric     }
3501db9f3b2SDimitry Andric   }
3511db9f3b2SDimitry Andric 
3521db9f3b2SDimitry Andric   return false;
3531db9f3b2SDimitry Andric }
3541db9f3b2SDimitry Andric 
updateOperand(FoldCandidate & Fold) const3555f757f3fSDimitry Andric bool SIFoldOperands::updateOperand(FoldCandidate &Fold) const {
3565f757f3fSDimitry Andric   MachineInstr *MI = Fold.UseMI;
3575f757f3fSDimitry Andric   MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
3585f757f3fSDimitry Andric   assert(Old.isReg());
3595f757f3fSDimitry Andric 
3601db9f3b2SDimitry Andric   if (Fold.isImm() && canUseImmWithOpSel(Fold)) {
3611db9f3b2SDimitry Andric     if (tryFoldImmWithOpSel(Fold))
3621db9f3b2SDimitry Andric       return true;
3631db9f3b2SDimitry Andric 
3641db9f3b2SDimitry Andric     // We can't represent the candidate as an inline constant. Try as a literal
3651db9f3b2SDimitry Andric     // with the original opsel, checking constant bus limitations.
3661db9f3b2SDimitry Andric     MachineOperand New = MachineOperand::CreateImm(Fold.ImmToFold);
3671db9f3b2SDimitry Andric     int OpNo = MI->getOperandNo(&Old);
3681db9f3b2SDimitry Andric     if (!TII->isOperandLegal(*MI, OpNo, &New))
3691db9f3b2SDimitry Andric       return false;
3701db9f3b2SDimitry Andric     Old.ChangeToImmediate(Fold.ImmToFold);
3711db9f3b2SDimitry Andric     return true;
3721db9f3b2SDimitry Andric   }
3730b57cec5SDimitry Andric 
3740b57cec5SDimitry Andric   if ((Fold.isImm() || Fold.isFI() || Fold.isGlobal()) && Fold.needsShrink()) {
3750b57cec5SDimitry Andric     MachineBasicBlock *MBB = MI->getParent();
376bdd1243dSDimitry Andric     auto Liveness = MBB->computeRegisterLiveness(TRI, AMDGPU::VCC, MI, 16);
3778bcb0991SDimitry Andric     if (Liveness != MachineBasicBlock::LQR_Dead) {
3788bcb0991SDimitry Andric       LLVM_DEBUG(dbgs() << "Not shrinking " << MI << " due to vcc liveness\n");
3790b57cec5SDimitry Andric       return false;
3808bcb0991SDimitry Andric     }
3810b57cec5SDimitry Andric 
382bdd1243dSDimitry Andric     int Op32 = Fold.ShrinkOpcode;
3830b57cec5SDimitry Andric     MachineOperand &Dst0 = MI->getOperand(0);
3840b57cec5SDimitry Andric     MachineOperand &Dst1 = MI->getOperand(1);
3850b57cec5SDimitry Andric     assert(Dst0.isDef() && Dst1.isDef());
3860b57cec5SDimitry Andric 
387bdd1243dSDimitry Andric     bool HaveNonDbgCarryUse = !MRI->use_nodbg_empty(Dst1.getReg());
3880b57cec5SDimitry Andric 
389bdd1243dSDimitry Andric     const TargetRegisterClass *Dst0RC = MRI->getRegClass(Dst0.getReg());
390bdd1243dSDimitry Andric     Register NewReg0 = MRI->createVirtualRegister(Dst0RC);
3910b57cec5SDimitry Andric 
392bdd1243dSDimitry Andric     MachineInstr *Inst32 = TII->buildShrunkInst(*MI, Op32);
3930b57cec5SDimitry Andric 
3940b57cec5SDimitry Andric     if (HaveNonDbgCarryUse) {
395bdd1243dSDimitry Andric       BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(AMDGPU::COPY),
396bdd1243dSDimitry Andric               Dst1.getReg())
3970b57cec5SDimitry Andric         .addReg(AMDGPU::VCC, RegState::Kill);
3980b57cec5SDimitry Andric     }
3990b57cec5SDimitry Andric 
4000b57cec5SDimitry Andric     // Keep the old instruction around to avoid breaking iterators, but
4010b57cec5SDimitry Andric     // replace it with a dummy instruction to remove uses.
4020b57cec5SDimitry Andric     //
4030b57cec5SDimitry Andric     // FIXME: We should not invert how this pass looks at operands to avoid
4040b57cec5SDimitry Andric     // this. Should track set of foldable movs instead of looking for uses
4050b57cec5SDimitry Andric     // when looking at a use.
4060b57cec5SDimitry Andric     Dst0.setReg(NewReg0);
4070b57cec5SDimitry Andric     for (unsigned I = MI->getNumOperands() - 1; I > 0; --I)
40881ad6265SDimitry Andric       MI->removeOperand(I);
409bdd1243dSDimitry Andric     MI->setDesc(TII->get(AMDGPU::IMPLICIT_DEF));
4100b57cec5SDimitry Andric 
411bdd1243dSDimitry Andric     if (Fold.Commuted)
412bdd1243dSDimitry Andric       TII->commuteInstruction(*Inst32, false);
4130b57cec5SDimitry Andric     return true;
4140b57cec5SDimitry Andric   }
4150b57cec5SDimitry Andric 
4160b57cec5SDimitry Andric   assert(!Fold.needsShrink() && "not handled");
4170b57cec5SDimitry Andric 
4180b57cec5SDimitry Andric   if (Fold.isImm()) {
41904eeddc0SDimitry Andric     if (Old.isTied()) {
42004eeddc0SDimitry Andric       int NewMFMAOpc = AMDGPU::getMFMAEarlyClobberOp(MI->getOpcode());
42104eeddc0SDimitry Andric       if (NewMFMAOpc == -1)
42204eeddc0SDimitry Andric         return false;
423bdd1243dSDimitry Andric       MI->setDesc(TII->get(NewMFMAOpc));
42404eeddc0SDimitry Andric       MI->untieRegOperand(0);
42504eeddc0SDimitry Andric     }
4260b57cec5SDimitry Andric     Old.ChangeToImmediate(Fold.ImmToFold);
4270b57cec5SDimitry Andric     return true;
4280b57cec5SDimitry Andric   }
4290b57cec5SDimitry Andric 
4300b57cec5SDimitry Andric   if (Fold.isGlobal()) {
4310b57cec5SDimitry Andric     Old.ChangeToGA(Fold.OpToFold->getGlobal(), Fold.OpToFold->getOffset(),
4320b57cec5SDimitry Andric                    Fold.OpToFold->getTargetFlags());
4330b57cec5SDimitry Andric     return true;
4340b57cec5SDimitry Andric   }
4350b57cec5SDimitry Andric 
4360b57cec5SDimitry Andric   if (Fold.isFI()) {
4370b57cec5SDimitry Andric     Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
4380b57cec5SDimitry Andric     return true;
4390b57cec5SDimitry Andric   }
4400b57cec5SDimitry Andric 
4410b57cec5SDimitry Andric   MachineOperand *New = Fold.OpToFold;
442bdd1243dSDimitry Andric   Old.substVirtReg(New->getReg(), New->getSubReg(), *TRI);
4430b57cec5SDimitry Andric   Old.setIsUndef(New->isUndef());
4440b57cec5SDimitry Andric   return true;
4450b57cec5SDimitry Andric }
4460b57cec5SDimitry Andric 
isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList,const MachineInstr * MI)4470b57cec5SDimitry Andric static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList,
4480b57cec5SDimitry Andric                               const MachineInstr *MI) {
449bdd1243dSDimitry Andric   return any_of(FoldList, [&](const auto &C) { return C.UseMI == MI; });
4500b57cec5SDimitry Andric }
4510b57cec5SDimitry Andric 
appendFoldCandidate(SmallVectorImpl<FoldCandidate> & FoldList,MachineInstr * MI,unsigned OpNo,MachineOperand * FoldOp,bool Commuted=false,int ShrinkOp=-1)452480093f4SDimitry Andric static void appendFoldCandidate(SmallVectorImpl<FoldCandidate> &FoldList,
453480093f4SDimitry Andric                                 MachineInstr *MI, unsigned OpNo,
454480093f4SDimitry Andric                                 MachineOperand *FoldOp, bool Commuted = false,
455480093f4SDimitry Andric                                 int ShrinkOp = -1) {
456480093f4SDimitry Andric   // Skip additional folding on the same operand.
457480093f4SDimitry Andric   for (FoldCandidate &Fold : FoldList)
458480093f4SDimitry Andric     if (Fold.UseMI == MI && Fold.UseOpNo == OpNo)
459480093f4SDimitry Andric       return;
460480093f4SDimitry Andric   LLVM_DEBUG(dbgs() << "Append " << (Commuted ? "commuted" : "normal")
461fe6060f1SDimitry Andric                     << " operand " << OpNo << "\n  " << *MI);
462fe6060f1SDimitry Andric   FoldList.emplace_back(MI, OpNo, FoldOp, Commuted, ShrinkOp);
463480093f4SDimitry Andric }
464480093f4SDimitry Andric 
tryAddToFoldList(SmallVectorImpl<FoldCandidate> & FoldList,MachineInstr * MI,unsigned OpNo,MachineOperand * OpToFold) const465bdd1243dSDimitry Andric bool SIFoldOperands::tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
4660b57cec5SDimitry Andric                                       MachineInstr *MI, unsigned OpNo,
467bdd1243dSDimitry Andric                                       MachineOperand *OpToFold) const {
4685f757f3fSDimitry Andric   const unsigned Opc = MI->getOpcode();
4695f757f3fSDimitry Andric 
4705f757f3fSDimitry Andric   auto tryToFoldAsFMAAKorMK = [&]() {
4715f757f3fSDimitry Andric     if (!OpToFold->isImm())
4725f757f3fSDimitry Andric       return false;
4735f757f3fSDimitry Andric 
4745f757f3fSDimitry Andric     const bool TryAK = OpNo == 3;
4755f757f3fSDimitry Andric     const unsigned NewOpc = TryAK ? AMDGPU::S_FMAAK_F32 : AMDGPU::S_FMAMK_F32;
4765f757f3fSDimitry Andric     MI->setDesc(TII->get(NewOpc));
4775f757f3fSDimitry Andric 
4785f757f3fSDimitry Andric     // We have to fold into operand which would be Imm not into OpNo.
4795f757f3fSDimitry Andric     bool FoldAsFMAAKorMK =
4805f757f3fSDimitry Andric         tryAddToFoldList(FoldList, MI, TryAK ? 3 : 2, OpToFold);
4815f757f3fSDimitry Andric     if (FoldAsFMAAKorMK) {
4825f757f3fSDimitry Andric       // Untie Src2 of fmac.
4835f757f3fSDimitry Andric       MI->untieRegOperand(3);
4845f757f3fSDimitry Andric       // For fmamk swap operands 1 and 2 if OpToFold was meant for operand 1.
4855f757f3fSDimitry Andric       if (OpNo == 1) {
4865f757f3fSDimitry Andric         MachineOperand &Op1 = MI->getOperand(1);
4875f757f3fSDimitry Andric         MachineOperand &Op2 = MI->getOperand(2);
4885f757f3fSDimitry Andric         Register OldReg = Op1.getReg();
4895f757f3fSDimitry Andric         // Operand 2 might be an inlinable constant
4905f757f3fSDimitry Andric         if (Op2.isImm()) {
4915f757f3fSDimitry Andric           Op1.ChangeToImmediate(Op2.getImm());
4925f757f3fSDimitry Andric           Op2.ChangeToRegister(OldReg, false);
4935f757f3fSDimitry Andric         } else {
4945f757f3fSDimitry Andric           Op1.setReg(Op2.getReg());
4955f757f3fSDimitry Andric           Op2.setReg(OldReg);
4965f757f3fSDimitry Andric         }
4975f757f3fSDimitry Andric       }
4985f757f3fSDimitry Andric       return true;
4995f757f3fSDimitry Andric     }
5005f757f3fSDimitry Andric     MI->setDesc(TII->get(Opc));
5015f757f3fSDimitry Andric     return false;
5025f757f3fSDimitry Andric   };
5035f757f3fSDimitry Andric 
5045f757f3fSDimitry Andric   bool IsLegal = TII->isOperandLegal(*MI, OpNo, OpToFold);
5055f757f3fSDimitry Andric   if (!IsLegal && OpToFold->isImm()) {
5065f757f3fSDimitry Andric     FoldCandidate Fold(MI, OpNo, OpToFold);
5075f757f3fSDimitry Andric     IsLegal = canUseImmWithOpSel(Fold);
5085f757f3fSDimitry Andric   }
5095f757f3fSDimitry Andric 
5105f757f3fSDimitry Andric   if (!IsLegal) {
5110b57cec5SDimitry Andric     // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
512e8d8bef9SDimitry Andric     unsigned NewOpc = macToMad(Opc);
513e8d8bef9SDimitry Andric     if (NewOpc != AMDGPU::INSTRUCTION_LIST_END) {
5140b57cec5SDimitry Andric       // Check if changing this to a v_mad_{f16, f32} instruction will allow us
5150b57cec5SDimitry Andric       // to fold the operand.
5160b57cec5SDimitry Andric       MI->setDesc(TII->get(NewOpc));
51706c3fb27SDimitry Andric       bool AddOpSel = !AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel) &&
51806c3fb27SDimitry Andric                       AMDGPU::hasNamedOperand(NewOpc, AMDGPU::OpName::op_sel);
51906c3fb27SDimitry Andric       if (AddOpSel)
520bdd1243dSDimitry Andric         MI->addOperand(MachineOperand::CreateImm(0));
521bdd1243dSDimitry Andric       bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold);
5220b57cec5SDimitry Andric       if (FoldAsMAD) {
5230b57cec5SDimitry Andric         MI->untieRegOperand(OpNo);
5240b57cec5SDimitry Andric         return true;
5250b57cec5SDimitry Andric       }
52606c3fb27SDimitry Andric       if (AddOpSel)
52706c3fb27SDimitry Andric         MI->removeOperand(MI->getNumExplicitOperands() - 1);
5280b57cec5SDimitry Andric       MI->setDesc(TII->get(Opc));
5290b57cec5SDimitry Andric     }
5300b57cec5SDimitry Andric 
5315f757f3fSDimitry Andric     // Special case for s_fmac_f32 if we are trying to fold into Src2.
5325f757f3fSDimitry Andric     // By transforming into fmaak we can untie Src2 and make folding legal.
5335f757f3fSDimitry Andric     if (Opc == AMDGPU::S_FMAC_F32 && OpNo == 3) {
5345f757f3fSDimitry Andric       if (tryToFoldAsFMAAKorMK())
5355f757f3fSDimitry Andric         return true;
5365f757f3fSDimitry Andric     }
5375f757f3fSDimitry Andric 
5380b57cec5SDimitry Andric     // Special case for s_setreg_b32
539e8d8bef9SDimitry Andric     if (OpToFold->isImm()) {
540e8d8bef9SDimitry Andric       unsigned ImmOpc = 0;
541e8d8bef9SDimitry Andric       if (Opc == AMDGPU::S_SETREG_B32)
542e8d8bef9SDimitry Andric         ImmOpc = AMDGPU::S_SETREG_IMM32_B32;
543e8d8bef9SDimitry Andric       else if (Opc == AMDGPU::S_SETREG_B32_mode)
544e8d8bef9SDimitry Andric         ImmOpc = AMDGPU::S_SETREG_IMM32_B32_mode;
545e8d8bef9SDimitry Andric       if (ImmOpc) {
546e8d8bef9SDimitry Andric         MI->setDesc(TII->get(ImmOpc));
547480093f4SDimitry Andric         appendFoldCandidate(FoldList, MI, OpNo, OpToFold);
5480b57cec5SDimitry Andric         return true;
5490b57cec5SDimitry Andric       }
550e8d8bef9SDimitry Andric     }
5510b57cec5SDimitry Andric 
5520b57cec5SDimitry Andric     // If we are already folding into another operand of MI, then
5530b57cec5SDimitry Andric     // we can't commute the instruction, otherwise we risk making the
5540b57cec5SDimitry Andric     // other fold illegal.
5550b57cec5SDimitry Andric     if (isUseMIInFoldList(FoldList, MI))
5560b57cec5SDimitry Andric       return false;
5570b57cec5SDimitry Andric 
5580b57cec5SDimitry Andric     // Operand is not legal, so try to commute the instruction to
5590b57cec5SDimitry Andric     // see if this makes it possible to fold.
5605f757f3fSDimitry Andric     unsigned CommuteOpNo = TargetInstrInfo::CommuteAnyOperandIndex;
5615f757f3fSDimitry Andric     bool CanCommute = TII->findCommutedOpIndices(*MI, OpNo, CommuteOpNo);
5625f757f3fSDimitry Andric     if (!CanCommute)
5635f757f3fSDimitry Andric       return false;
5640b57cec5SDimitry Andric 
5650b57cec5SDimitry Andric     // One of operands might be an Imm operand, and OpNo may refer to it after
5660b57cec5SDimitry Andric     // the call of commuteInstruction() below. Such situations are avoided
5670b57cec5SDimitry Andric     // here explicitly as OpNo must be a register operand to be a candidate
5680b57cec5SDimitry Andric     // for memory folding.
5695f757f3fSDimitry Andric     if (!MI->getOperand(OpNo).isReg() || !MI->getOperand(CommuteOpNo).isReg())
5700b57cec5SDimitry Andric       return false;
5710b57cec5SDimitry Andric 
5725f757f3fSDimitry Andric     if (!TII->commuteInstruction(*MI, false, OpNo, CommuteOpNo))
5730b57cec5SDimitry Andric       return false;
5740b57cec5SDimitry Andric 
5755f757f3fSDimitry Andric     int Op32 = -1;
5760b57cec5SDimitry Andric     if (!TII->isOperandLegal(*MI, CommuteOpNo, OpToFold)) {
5775f757f3fSDimitry Andric       if ((Opc != AMDGPU::V_ADD_CO_U32_e64 && Opc != AMDGPU::V_SUB_CO_U32_e64 &&
5785f757f3fSDimitry Andric            Opc != AMDGPU::V_SUBREV_CO_U32_e64) || // FIXME
5795f757f3fSDimitry Andric           (!OpToFold->isImm() && !OpToFold->isFI() && !OpToFold->isGlobal())) {
5805f757f3fSDimitry Andric         TII->commuteInstruction(*MI, false, OpNo, CommuteOpNo);
5815f757f3fSDimitry Andric         return false;
5825f757f3fSDimitry Andric       }
5830b57cec5SDimitry Andric 
5840b57cec5SDimitry Andric       // Verify the other operand is a VGPR, otherwise we would violate the
5850b57cec5SDimitry Andric       // constant bus restriction.
5865f757f3fSDimitry Andric       MachineOperand &OtherOp = MI->getOperand(OpNo);
5870b57cec5SDimitry Andric       if (!OtherOp.isReg() ||
588bdd1243dSDimitry Andric           !TII->getRegisterInfo().isVGPR(*MRI, OtherOp.getReg()))
5890b57cec5SDimitry Andric         return false;
5900b57cec5SDimitry Andric 
5910b57cec5SDimitry Andric       assert(MI->getOperand(1).isDef());
5920b57cec5SDimitry Andric 
5930b57cec5SDimitry Andric       // Make sure to get the 32-bit version of the commuted opcode.
5940b57cec5SDimitry Andric       unsigned MaybeCommutedOpc = MI->getOpcode();
5955f757f3fSDimitry Andric       Op32 = AMDGPU::getVOPe32(MaybeCommutedOpc);
5965f757f3fSDimitry Andric     }
5970b57cec5SDimitry Andric 
598480093f4SDimitry Andric     appendFoldCandidate(FoldList, MI, CommuteOpNo, OpToFold, true, Op32);
5990b57cec5SDimitry Andric     return true;
6000b57cec5SDimitry Andric   }
6010b57cec5SDimitry Andric 
6025f757f3fSDimitry Andric   // Inlineable constant might have been folded into Imm operand of fmaak or
6035f757f3fSDimitry Andric   // fmamk and we are trying to fold a non-inlinable constant.
6045f757f3fSDimitry Andric   if ((Opc == AMDGPU::S_FMAAK_F32 || Opc == AMDGPU::S_FMAMK_F32) &&
6055f757f3fSDimitry Andric       !OpToFold->isReg() && !TII->isInlineConstant(*OpToFold)) {
6065f757f3fSDimitry Andric     unsigned ImmIdx = Opc == AMDGPU::S_FMAAK_F32 ? 3 : 2;
6075f757f3fSDimitry Andric     MachineOperand &OpImm = MI->getOperand(ImmIdx);
6085f757f3fSDimitry Andric     if (!OpImm.isReg() &&
6095f757f3fSDimitry Andric         TII->isInlineConstant(*MI, MI->getOperand(OpNo), OpImm))
6105f757f3fSDimitry Andric       return tryToFoldAsFMAAKorMK();
6110b57cec5SDimitry Andric   }
6120b57cec5SDimitry Andric 
6135f757f3fSDimitry Andric   // Special case for s_fmac_f32 if we are trying to fold into Src0 or Src1.
6145f757f3fSDimitry Andric   // By changing into fmamk we can untie Src2.
6155f757f3fSDimitry Andric   // If folding for Src0 happens first and it is identical operand to Src1 we
6165f757f3fSDimitry Andric   // should avoid transforming into fmamk which requires commuting as it would
6175f757f3fSDimitry Andric   // cause folding into Src1 to fail later on due to wrong OpNo used.
6185f757f3fSDimitry Andric   if (Opc == AMDGPU::S_FMAC_F32 &&
6195f757f3fSDimitry Andric       (OpNo != 1 || !MI->getOperand(1).isIdenticalTo(MI->getOperand(2)))) {
6205f757f3fSDimitry Andric     if (tryToFoldAsFMAAKorMK())
6210b57cec5SDimitry Andric       return true;
6220b57cec5SDimitry Andric   }
6230b57cec5SDimitry Andric 
624480093f4SDimitry Andric   // Check the case where we might introduce a second constant operand to a
625480093f4SDimitry Andric   // scalar instruction
626480093f4SDimitry Andric   if (TII->isSALU(MI->getOpcode())) {
627480093f4SDimitry Andric     const MCInstrDesc &InstDesc = MI->getDesc();
628bdd1243dSDimitry Andric     const MCOperandInfo &OpInfo = InstDesc.operands()[OpNo];
629480093f4SDimitry Andric 
630480093f4SDimitry Andric     // Fine if the operand can be encoded as an inline constant
631bdd1243dSDimitry Andric     if (!OpToFold->isReg() && !TII->isInlineConstant(*OpToFold, OpInfo)) {
632480093f4SDimitry Andric       // Otherwise check for another constant
633480093f4SDimitry Andric       for (unsigned i = 0, e = InstDesc.getNumOperands(); i != e; ++i) {
634480093f4SDimitry Andric         auto &Op = MI->getOperand(i);
6355f757f3fSDimitry Andric         if (OpNo != i && !Op.isReg() &&
6365f757f3fSDimitry Andric             !TII->isInlineConstant(Op, InstDesc.operands()[i]))
637480093f4SDimitry Andric           return false;
638480093f4SDimitry Andric       }
639480093f4SDimitry Andric     }
640480093f4SDimitry Andric   }
641480093f4SDimitry Andric 
642480093f4SDimitry Andric   appendFoldCandidate(FoldList, MI, OpNo, OpToFold);
6430b57cec5SDimitry Andric   return true;
6440b57cec5SDimitry Andric }
6450b57cec5SDimitry Andric 
isUseSafeToFold(const MachineInstr & MI,const MachineOperand & UseMO) const646bdd1243dSDimitry Andric bool SIFoldOperands::isUseSafeToFold(const MachineInstr &MI,
647bdd1243dSDimitry Andric                                      const MachineOperand &UseMO) const {
648bdd1243dSDimitry Andric   // Operands of SDWA instructions must be registers.
649bdd1243dSDimitry Andric   return !TII->isSDWA(MI);
6500b57cec5SDimitry Andric }
6510b57cec5SDimitry Andric 
652fe6060f1SDimitry Andric // Find a def of the UseReg, check if it is a reg_sequence and find initializers
653480093f4SDimitry Andric // for each subreg, tracking it to foldable inline immediate if possible.
654480093f4SDimitry Andric // Returns true on success.
getRegSeqInit(SmallVectorImpl<std::pair<MachineOperand *,unsigned>> & Defs,Register UseReg,uint8_t OpTy) const655bdd1243dSDimitry Andric bool SIFoldOperands::getRegSeqInit(
656480093f4SDimitry Andric     SmallVectorImpl<std::pair<MachineOperand *, unsigned>> &Defs,
657bdd1243dSDimitry Andric     Register UseReg, uint8_t OpTy) const {
658bdd1243dSDimitry Andric   MachineInstr *Def = MRI->getVRegDef(UseReg);
659480093f4SDimitry Andric   if (!Def || !Def->isRegSequence())
660480093f4SDimitry Andric     return false;
661480093f4SDimitry Andric 
662480093f4SDimitry Andric   for (unsigned I = 1, E = Def->getNumExplicitOperands(); I < E; I += 2) {
663480093f4SDimitry Andric     MachineOperand *Sub = &Def->getOperand(I);
664480093f4SDimitry Andric     assert(Sub->isReg());
665480093f4SDimitry Andric 
666bdd1243dSDimitry Andric     for (MachineInstr *SubDef = MRI->getVRegDef(Sub->getReg());
667fe6060f1SDimitry Andric          SubDef && Sub->isReg() && Sub->getReg().isVirtual() &&
668fe6060f1SDimitry Andric          !Sub->getSubReg() && TII->isFoldableCopy(*SubDef);
669bdd1243dSDimitry Andric          SubDef = MRI->getVRegDef(Sub->getReg())) {
670480093f4SDimitry Andric       MachineOperand *Op = &SubDef->getOperand(1);
671480093f4SDimitry Andric       if (Op->isImm()) {
672480093f4SDimitry Andric         if (TII->isInlineConstant(*Op, OpTy))
673480093f4SDimitry Andric           Sub = Op;
674480093f4SDimitry Andric         break;
675480093f4SDimitry Andric       }
676fe6060f1SDimitry Andric       if (!Op->isReg() || Op->getReg().isPhysical())
677480093f4SDimitry Andric         break;
678480093f4SDimitry Andric       Sub = Op;
679480093f4SDimitry Andric     }
680480093f4SDimitry Andric 
681fe6060f1SDimitry Andric     Defs.emplace_back(Sub, Def->getOperand(I + 1).getImm());
682480093f4SDimitry Andric   }
683480093f4SDimitry Andric 
684480093f4SDimitry Andric   return true;
685480093f4SDimitry Andric }
686480093f4SDimitry Andric 
tryToFoldACImm(const MachineOperand & OpToFold,MachineInstr * UseMI,unsigned UseOpIdx,SmallVectorImpl<FoldCandidate> & FoldList) const687bdd1243dSDimitry Andric bool SIFoldOperands::tryToFoldACImm(
688bdd1243dSDimitry Andric     const MachineOperand &OpToFold, MachineInstr *UseMI, unsigned UseOpIdx,
689bdd1243dSDimitry Andric     SmallVectorImpl<FoldCandidate> &FoldList) const {
6900b57cec5SDimitry Andric   const MCInstrDesc &Desc = UseMI->getDesc();
691bdd1243dSDimitry Andric   if (UseOpIdx >= Desc.getNumOperands())
6920b57cec5SDimitry Andric     return false;
6930b57cec5SDimitry Andric 
6945f757f3fSDimitry Andric   if (!AMDGPU::isSISrcInlinableOperand(Desc, UseOpIdx))
6950b57cec5SDimitry Andric     return false;
6960b57cec5SDimitry Andric 
6975f757f3fSDimitry Andric   uint8_t OpTy = Desc.operands()[UseOpIdx].OperandType;
6988bcb0991SDimitry Andric   if (OpToFold.isImm() && TII->isInlineConstant(OpToFold, OpTy) &&
6998bcb0991SDimitry Andric       TII->isOperandLegal(*UseMI, UseOpIdx, &OpToFold)) {
7000b57cec5SDimitry Andric     UseMI->getOperand(UseOpIdx).ChangeToImmediate(OpToFold.getImm());
7010b57cec5SDimitry Andric     return true;
7020b57cec5SDimitry Andric   }
7030b57cec5SDimitry Andric 
7040b57cec5SDimitry Andric   if (!OpToFold.isReg())
7050b57cec5SDimitry Andric     return false;
7060b57cec5SDimitry Andric 
7078bcb0991SDimitry Andric   Register UseReg = OpToFold.getReg();
708e8d8bef9SDimitry Andric   if (!UseReg.isVirtual())
7090b57cec5SDimitry Andric     return false;
7100b57cec5SDimitry Andric 
711fe6060f1SDimitry Andric   if (isUseMIInFoldList(FoldList, UseMI))
7120b57cec5SDimitry Andric     return false;
7130b57cec5SDimitry Andric 
714fe6060f1SDimitry Andric   // Maybe it is just a COPY of an immediate itself.
715bdd1243dSDimitry Andric   MachineInstr *Def = MRI->getVRegDef(UseReg);
716fe6060f1SDimitry Andric   MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
717fe6060f1SDimitry Andric   if (!UseOp.getSubReg() && Def && TII->isFoldableCopy(*Def)) {
718fe6060f1SDimitry Andric     MachineOperand &DefOp = Def->getOperand(1);
719fe6060f1SDimitry Andric     if (DefOp.isImm() && TII->isInlineConstant(DefOp, OpTy) &&
720fe6060f1SDimitry Andric         TII->isOperandLegal(*UseMI, UseOpIdx, &DefOp)) {
721fe6060f1SDimitry Andric       UseMI->getOperand(UseOpIdx).ChangeToImmediate(DefOp.getImm());
722fe6060f1SDimitry Andric       return true;
723fe6060f1SDimitry Andric     }
724fe6060f1SDimitry Andric   }
725fe6060f1SDimitry Andric 
726480093f4SDimitry Andric   SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs;
727bdd1243dSDimitry Andric   if (!getRegSeqInit(Defs, UseReg, OpTy))
7280b57cec5SDimitry Andric     return false;
7290b57cec5SDimitry Andric 
730480093f4SDimitry Andric   int32_t Imm;
731480093f4SDimitry Andric   for (unsigned I = 0, E = Defs.size(); I != E; ++I) {
732480093f4SDimitry Andric     const MachineOperand *Op = Defs[I].first;
733480093f4SDimitry Andric     if (!Op->isImm())
7340b57cec5SDimitry Andric       return false;
735480093f4SDimitry Andric 
7360b57cec5SDimitry Andric     auto SubImm = Op->getImm();
737480093f4SDimitry Andric     if (!I) {
738480093f4SDimitry Andric       Imm = SubImm;
739480093f4SDimitry Andric       if (!TII->isInlineConstant(*Op, OpTy) ||
740480093f4SDimitry Andric           !TII->isOperandLegal(*UseMI, UseOpIdx, Op))
7410b57cec5SDimitry Andric         return false;
7420b57cec5SDimitry Andric 
7430b57cec5SDimitry Andric       continue;
7440b57cec5SDimitry Andric     }
7450b57cec5SDimitry Andric     if (Imm != SubImm)
7460b57cec5SDimitry Andric       return false; // Can only fold splat constants
7470b57cec5SDimitry Andric   }
7480b57cec5SDimitry Andric 
749480093f4SDimitry Andric   appendFoldCandidate(FoldList, UseMI, UseOpIdx, Defs[0].first);
7500b57cec5SDimitry Andric   return true;
7510b57cec5SDimitry Andric }
7520b57cec5SDimitry Andric 
foldOperand(MachineOperand & OpToFold,MachineInstr * UseMI,int UseOpIdx,SmallVectorImpl<FoldCandidate> & FoldList,SmallVectorImpl<MachineInstr * > & CopiesToReplace) const7530b57cec5SDimitry Andric void SIFoldOperands::foldOperand(
7540b57cec5SDimitry Andric   MachineOperand &OpToFold,
7550b57cec5SDimitry Andric   MachineInstr *UseMI,
7560b57cec5SDimitry Andric   int UseOpIdx,
7570b57cec5SDimitry Andric   SmallVectorImpl<FoldCandidate> &FoldList,
7580b57cec5SDimitry Andric   SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
7590b57cec5SDimitry Andric   const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
7600b57cec5SDimitry Andric 
761bdd1243dSDimitry Andric   if (!isUseSafeToFold(*UseMI, UseOp))
7620b57cec5SDimitry Andric     return;
7630b57cec5SDimitry Andric 
7640b57cec5SDimitry Andric   // FIXME: Fold operands with subregs.
765bdd1243dSDimitry Andric   if (UseOp.isReg() && OpToFold.isReg() &&
766bdd1243dSDimitry Andric       (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister))
7670b57cec5SDimitry Andric     return;
7680b57cec5SDimitry Andric 
7690b57cec5SDimitry Andric   // Special case for REG_SEQUENCE: We can't fold literals into
7700b57cec5SDimitry Andric   // REG_SEQUENCE instructions, so we have to fold them into the
7710b57cec5SDimitry Andric   // uses of REG_SEQUENCE.
7720b57cec5SDimitry Andric   if (UseMI->isRegSequence()) {
7738bcb0991SDimitry Andric     Register RegSeqDstReg = UseMI->getOperand(0).getReg();
7740b57cec5SDimitry Andric     unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
7750b57cec5SDimitry Andric 
776fe6060f1SDimitry Andric     for (auto &RSUse : make_early_inc_range(MRI->use_nodbg_operands(RegSeqDstReg))) {
777fe6060f1SDimitry Andric       MachineInstr *RSUseMI = RSUse.getParent();
7780b57cec5SDimitry Andric 
779bdd1243dSDimitry Andric       if (tryToFoldACImm(UseMI->getOperand(0), RSUseMI,
780fe6060f1SDimitry Andric                          RSUseMI->getOperandNo(&RSUse), FoldList))
7810b57cec5SDimitry Andric         continue;
7820b57cec5SDimitry Andric 
783fe6060f1SDimitry Andric       if (RSUse.getSubReg() != RegSeqDstSubReg)
7840b57cec5SDimitry Andric         continue;
7850b57cec5SDimitry Andric 
786fe6060f1SDimitry Andric       foldOperand(OpToFold, RSUseMI, RSUseMI->getOperandNo(&RSUse), FoldList,
7870b57cec5SDimitry Andric                   CopiesToReplace);
7880b57cec5SDimitry Andric     }
7890b57cec5SDimitry Andric 
7900b57cec5SDimitry Andric     return;
7910b57cec5SDimitry Andric   }
7920b57cec5SDimitry Andric 
793bdd1243dSDimitry Andric   if (tryToFoldACImm(OpToFold, UseMI, UseOpIdx, FoldList))
7940b57cec5SDimitry Andric     return;
7950b57cec5SDimitry Andric 
796bdd1243dSDimitry Andric   if (frameIndexMayFold(*UseMI, UseOpIdx, OpToFold)) {
797349cc55cSDimitry Andric     // Verify that this is a stack access.
7980b57cec5SDimitry Andric     // FIXME: Should probably use stack pseudos before frame lowering.
7990b57cec5SDimitry Andric 
800e8d8bef9SDimitry Andric     if (TII->isMUBUF(*UseMI)) {
8010b57cec5SDimitry Andric       if (TII->getNamedOperand(*UseMI, AMDGPU::OpName::srsrc)->getReg() !=
8020b57cec5SDimitry Andric           MFI->getScratchRSrcReg())
8030b57cec5SDimitry Andric         return;
8040b57cec5SDimitry Andric 
805e8d8bef9SDimitry Andric       // Ensure this is either relative to the current frame or the current
806e8d8bef9SDimitry Andric       // wave.
8075ffd83dbSDimitry Andric       MachineOperand &SOff =
8085ffd83dbSDimitry Andric           *TII->getNamedOperand(*UseMI, AMDGPU::OpName::soffset);
809e8d8bef9SDimitry Andric       if (!SOff.isImm() || SOff.getImm() != 0)
8105ffd83dbSDimitry Andric         return;
811e8d8bef9SDimitry Andric     }
8125ffd83dbSDimitry Andric 
8130b57cec5SDimitry Andric     // A frame index will resolve to a positive constant, so it should always be
8140b57cec5SDimitry Andric     // safe to fold the addressing mode, even pre-GFX9.
8150b57cec5SDimitry Andric     UseMI->getOperand(UseOpIdx).ChangeToFrameIndex(OpToFold.getIndex());
8165ffd83dbSDimitry Andric 
817bdd1243dSDimitry Andric     const unsigned Opc = UseMI->getOpcode();
818e8d8bef9SDimitry Andric     if (TII->isFLATScratch(*UseMI) &&
819bdd1243dSDimitry Andric         AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::vaddr) &&
820bdd1243dSDimitry Andric         !AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::saddr)) {
821bdd1243dSDimitry Andric       unsigned NewOpc = AMDGPU::getFlatScratchInstSSfromSV(Opc);
822e8d8bef9SDimitry Andric       UseMI->setDesc(TII->get(NewOpc));
823e8d8bef9SDimitry Andric     }
824e8d8bef9SDimitry Andric 
8250b57cec5SDimitry Andric     return;
8260b57cec5SDimitry Andric   }
8270b57cec5SDimitry Andric 
8280b57cec5SDimitry Andric   bool FoldingImmLike =
8290b57cec5SDimitry Andric       OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
8300b57cec5SDimitry Andric 
8310b57cec5SDimitry Andric   if (FoldingImmLike && UseMI->isCopy()) {
8328bcb0991SDimitry Andric     Register DestReg = UseMI->getOperand(0).getReg();
833e8d8bef9SDimitry Andric     Register SrcReg = UseMI->getOperand(1).getReg();
834e8d8bef9SDimitry Andric     assert(SrcReg.isVirtual());
8350b57cec5SDimitry Andric 
836e8d8bef9SDimitry Andric     const TargetRegisterClass *SrcRC = MRI->getRegClass(SrcReg);
837e8d8bef9SDimitry Andric 
838e8d8bef9SDimitry Andric     // Don't fold into a copy to a physical register with the same class. Doing
839e8d8bef9SDimitry Andric     // so would interfere with the register coalescer's logic which would avoid
840349cc55cSDimitry Andric     // redundant initializations.
841e8d8bef9SDimitry Andric     if (DestReg.isPhysical() && SrcRC->contains(DestReg))
8428bcb0991SDimitry Andric       return;
8438bcb0991SDimitry Andric 
844e8d8bef9SDimitry Andric     const TargetRegisterClass *DestRC = TRI->getRegClassForReg(*MRI, DestReg);
845e8d8bef9SDimitry Andric     if (!DestReg.isPhysical()) {
8460b57cec5SDimitry Andric       if (DestRC == &AMDGPU::AGPR_32RegClass &&
8470b57cec5SDimitry Andric           TII->isInlineConstant(OpToFold, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
848e8d8bef9SDimitry Andric         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64));
8490b57cec5SDimitry Andric         UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
8500b57cec5SDimitry Andric         CopiesToReplace.push_back(UseMI);
8510b57cec5SDimitry Andric         return;
8520b57cec5SDimitry Andric       }
853e8d8bef9SDimitry Andric     }
8540b57cec5SDimitry Andric 
8550b57cec5SDimitry Andric     // In order to fold immediates into copies, we need to change the
8560b57cec5SDimitry Andric     // copy to a MOV.
8570b57cec5SDimitry Andric 
8580b57cec5SDimitry Andric     unsigned MovOp = TII->getMovOpcode(DestRC);
8590b57cec5SDimitry Andric     if (MovOp == AMDGPU::COPY)
8600b57cec5SDimitry Andric       return;
8610b57cec5SDimitry Andric 
8620b57cec5SDimitry Andric     UseMI->setDesc(TII->get(MovOp));
8638bcb0991SDimitry Andric     MachineInstr::mop_iterator ImpOpI = UseMI->implicit_operands().begin();
8648bcb0991SDimitry Andric     MachineInstr::mop_iterator ImpOpE = UseMI->implicit_operands().end();
8658bcb0991SDimitry Andric     while (ImpOpI != ImpOpE) {
8668bcb0991SDimitry Andric       MachineInstr::mop_iterator Tmp = ImpOpI;
8678bcb0991SDimitry Andric       ImpOpI++;
86881ad6265SDimitry Andric       UseMI->removeOperand(UseMI->getOperandNo(Tmp));
8698bcb0991SDimitry Andric     }
8700b57cec5SDimitry Andric     CopiesToReplace.push_back(UseMI);
8710b57cec5SDimitry Andric   } else {
8720b57cec5SDimitry Andric     if (UseMI->isCopy() && OpToFold.isReg() &&
873480093f4SDimitry Andric         UseMI->getOperand(0).getReg().isVirtual() &&
8740b57cec5SDimitry Andric         !UseMI->getOperand(1).getSubReg()) {
875fe6060f1SDimitry Andric       LLVM_DEBUG(dbgs() << "Folding " << OpToFold << "\n into " << *UseMI);
8760b57cec5SDimitry Andric       unsigned Size = TII->getOpSize(*UseMI, 1);
877480093f4SDimitry Andric       Register UseReg = OpToFold.getReg();
878480093f4SDimitry Andric       UseMI->getOperand(1).setReg(UseReg);
8790b57cec5SDimitry Andric       UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
8800b57cec5SDimitry Andric       UseMI->getOperand(1).setIsKill(false);
8810b57cec5SDimitry Andric       CopiesToReplace.push_back(UseMI);
8820b57cec5SDimitry Andric       OpToFold.setIsKill(false);
883480093f4SDimitry Andric 
884bdd1243dSDimitry Andric       // Remove kill flags as kills may now be out of order with uses.
885bdd1243dSDimitry Andric       MRI->clearKillFlags(OpToFold.getReg());
886bdd1243dSDimitry Andric 
887480093f4SDimitry Andric       // That is very tricky to store a value into an AGPR. v_accvgpr_write_b32
888480093f4SDimitry Andric       // can only accept VGPR or inline immediate. Recreate a reg_sequence with
889480093f4SDimitry Andric       // its initializers right here, so we will rematerialize immediates and
890480093f4SDimitry Andric       // avoid copies via different reg classes.
891480093f4SDimitry Andric       SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs;
892480093f4SDimitry Andric       if (Size > 4 && TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) &&
893bdd1243dSDimitry Andric           getRegSeqInit(Defs, UseReg, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
894480093f4SDimitry Andric         const DebugLoc &DL = UseMI->getDebugLoc();
895480093f4SDimitry Andric         MachineBasicBlock &MBB = *UseMI->getParent();
896480093f4SDimitry Andric 
897480093f4SDimitry Andric         UseMI->setDesc(TII->get(AMDGPU::REG_SEQUENCE));
898480093f4SDimitry Andric         for (unsigned I = UseMI->getNumOperands() - 1; I > 0; --I)
89981ad6265SDimitry Andric           UseMI->removeOperand(I);
900480093f4SDimitry Andric 
901480093f4SDimitry Andric         MachineInstrBuilder B(*MBB.getParent(), UseMI);
902480093f4SDimitry Andric         DenseMap<TargetInstrInfo::RegSubRegPair, Register> VGPRCopies;
903480093f4SDimitry Andric         SmallSetVector<TargetInstrInfo::RegSubRegPair, 32> SeenAGPRs;
904480093f4SDimitry Andric         for (unsigned I = 0; I < Size / 4; ++I) {
905480093f4SDimitry Andric           MachineOperand *Def = Defs[I].first;
906480093f4SDimitry Andric           TargetInstrInfo::RegSubRegPair CopyToVGPR;
907480093f4SDimitry Andric           if (Def->isImm() &&
908480093f4SDimitry Andric               TII->isInlineConstant(*Def, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
909480093f4SDimitry Andric             int64_t Imm = Def->getImm();
910480093f4SDimitry Andric 
911480093f4SDimitry Andric             auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
912480093f4SDimitry Andric             BuildMI(MBB, UseMI, DL,
913e8d8bef9SDimitry Andric                     TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), Tmp).addImm(Imm);
914480093f4SDimitry Andric             B.addReg(Tmp);
915480093f4SDimitry Andric           } else if (Def->isReg() && TRI->isAGPR(*MRI, Def->getReg())) {
916480093f4SDimitry Andric             auto Src = getRegSubRegPair(*Def);
917480093f4SDimitry Andric             Def->setIsKill(false);
918480093f4SDimitry Andric             if (!SeenAGPRs.insert(Src)) {
919480093f4SDimitry Andric               // We cannot build a reg_sequence out of the same registers, they
920480093f4SDimitry Andric               // must be copied. Better do it here before copyPhysReg() created
921480093f4SDimitry Andric               // several reads to do the AGPR->VGPR->AGPR copy.
922480093f4SDimitry Andric               CopyToVGPR = Src;
923480093f4SDimitry Andric             } else {
924480093f4SDimitry Andric               B.addReg(Src.Reg, Def->isUndef() ? RegState::Undef : 0,
925480093f4SDimitry Andric                        Src.SubReg);
926480093f4SDimitry Andric             }
927480093f4SDimitry Andric           } else {
928480093f4SDimitry Andric             assert(Def->isReg());
929480093f4SDimitry Andric             Def->setIsKill(false);
930480093f4SDimitry Andric             auto Src = getRegSubRegPair(*Def);
931480093f4SDimitry Andric 
932480093f4SDimitry Andric             // Direct copy from SGPR to AGPR is not possible. To avoid creation
933480093f4SDimitry Andric             // of exploded copies SGPR->VGPR->AGPR in the copyPhysReg() later,
934480093f4SDimitry Andric             // create a copy here and track if we already have such a copy.
935480093f4SDimitry Andric             if (TRI->isSGPRReg(*MRI, Src.Reg)) {
936480093f4SDimitry Andric               CopyToVGPR = Src;
937480093f4SDimitry Andric             } else {
938480093f4SDimitry Andric               auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
939480093f4SDimitry Andric               BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Tmp).add(*Def);
940480093f4SDimitry Andric               B.addReg(Tmp);
941480093f4SDimitry Andric             }
942480093f4SDimitry Andric           }
943480093f4SDimitry Andric 
944480093f4SDimitry Andric           if (CopyToVGPR.Reg) {
945480093f4SDimitry Andric             Register Vgpr;
946480093f4SDimitry Andric             if (VGPRCopies.count(CopyToVGPR)) {
947480093f4SDimitry Andric               Vgpr = VGPRCopies[CopyToVGPR];
948480093f4SDimitry Andric             } else {
949480093f4SDimitry Andric               Vgpr = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
950480093f4SDimitry Andric               BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Vgpr).add(*Def);
951480093f4SDimitry Andric               VGPRCopies[CopyToVGPR] = Vgpr;
952480093f4SDimitry Andric             }
953480093f4SDimitry Andric             auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
954480093f4SDimitry Andric             BuildMI(MBB, UseMI, DL,
955e8d8bef9SDimitry Andric                     TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), Tmp).addReg(Vgpr);
956480093f4SDimitry Andric             B.addReg(Tmp);
957480093f4SDimitry Andric           }
958480093f4SDimitry Andric 
959480093f4SDimitry Andric           B.addImm(Defs[I].second);
960480093f4SDimitry Andric         }
961fe6060f1SDimitry Andric         LLVM_DEBUG(dbgs() << "Folded " << *UseMI);
962480093f4SDimitry Andric         return;
963480093f4SDimitry Andric       }
964480093f4SDimitry Andric 
9650b57cec5SDimitry Andric       if (Size != 4)
9660b57cec5SDimitry Andric         return;
967bdd1243dSDimitry Andric 
968bdd1243dSDimitry Andric       Register Reg0 = UseMI->getOperand(0).getReg();
969bdd1243dSDimitry Andric       Register Reg1 = UseMI->getOperand(1).getReg();
970bdd1243dSDimitry Andric       if (TRI->isAGPR(*MRI, Reg0) && TRI->isVGPR(*MRI, Reg1))
971e8d8bef9SDimitry Andric         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64));
972bdd1243dSDimitry Andric       else if (TRI->isVGPR(*MRI, Reg0) && TRI->isAGPR(*MRI, Reg1))
973e8d8bef9SDimitry Andric         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_READ_B32_e64));
974bdd1243dSDimitry Andric       else if (ST->hasGFX90AInsts() && TRI->isAGPR(*MRI, Reg0) &&
975bdd1243dSDimitry Andric                TRI->isAGPR(*MRI, Reg1))
976fe6060f1SDimitry Andric         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_MOV_B32));
9770b57cec5SDimitry Andric       return;
9780b57cec5SDimitry Andric     }
9790b57cec5SDimitry Andric 
9800b57cec5SDimitry Andric     unsigned UseOpc = UseMI->getOpcode();
9810b57cec5SDimitry Andric     if (UseOpc == AMDGPU::V_READFIRSTLANE_B32 ||
9820b57cec5SDimitry Andric         (UseOpc == AMDGPU::V_READLANE_B32 &&
9830b57cec5SDimitry Andric          (int)UseOpIdx ==
9840b57cec5SDimitry Andric          AMDGPU::getNamedOperandIdx(UseOpc, AMDGPU::OpName::src0))) {
9850b57cec5SDimitry Andric       // %vgpr = V_MOV_B32 imm
9860b57cec5SDimitry Andric       // %sgpr = V_READFIRSTLANE_B32 %vgpr
9870b57cec5SDimitry Andric       // =>
9880b57cec5SDimitry Andric       // %sgpr = S_MOV_B32 imm
9890b57cec5SDimitry Andric       if (FoldingImmLike) {
9900b57cec5SDimitry Andric         if (execMayBeModifiedBeforeUse(*MRI,
9910b57cec5SDimitry Andric                                        UseMI->getOperand(UseOpIdx).getReg(),
9920b57cec5SDimitry Andric                                        *OpToFold.getParent(),
9930b57cec5SDimitry Andric                                        *UseMI))
9940b57cec5SDimitry Andric           return;
9950b57cec5SDimitry Andric 
9960b57cec5SDimitry Andric         UseMI->setDesc(TII->get(AMDGPU::S_MOV_B32));
9970b57cec5SDimitry Andric 
9980b57cec5SDimitry Andric         if (OpToFold.isImm())
9990b57cec5SDimitry Andric           UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
10000b57cec5SDimitry Andric         else
10010b57cec5SDimitry Andric           UseMI->getOperand(1).ChangeToFrameIndex(OpToFold.getIndex());
100281ad6265SDimitry Andric         UseMI->removeOperand(2); // Remove exec read (or src1 for readlane)
10030b57cec5SDimitry Andric         return;
10040b57cec5SDimitry Andric       }
10050b57cec5SDimitry Andric 
10060b57cec5SDimitry Andric       if (OpToFold.isReg() && TRI->isSGPRReg(*MRI, OpToFold.getReg())) {
10070b57cec5SDimitry Andric         if (execMayBeModifiedBeforeUse(*MRI,
10080b57cec5SDimitry Andric                                        UseMI->getOperand(UseOpIdx).getReg(),
10090b57cec5SDimitry Andric                                        *OpToFold.getParent(),
10100b57cec5SDimitry Andric                                        *UseMI))
10110b57cec5SDimitry Andric           return;
10120b57cec5SDimitry Andric 
10130b57cec5SDimitry Andric         // %vgpr = COPY %sgpr0
10140b57cec5SDimitry Andric         // %sgpr1 = V_READFIRSTLANE_B32 %vgpr
10150b57cec5SDimitry Andric         // =>
10160b57cec5SDimitry Andric         // %sgpr1 = COPY %sgpr0
10170b57cec5SDimitry Andric         UseMI->setDesc(TII->get(AMDGPU::COPY));
10188bcb0991SDimitry Andric         UseMI->getOperand(1).setReg(OpToFold.getReg());
10198bcb0991SDimitry Andric         UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
10208bcb0991SDimitry Andric         UseMI->getOperand(1).setIsKill(false);
102181ad6265SDimitry Andric         UseMI->removeOperand(2); // Remove exec read (or src1 for readlane)
10220b57cec5SDimitry Andric         return;
10230b57cec5SDimitry Andric       }
10240b57cec5SDimitry Andric     }
10250b57cec5SDimitry Andric 
10260b57cec5SDimitry Andric     const MCInstrDesc &UseDesc = UseMI->getDesc();
10270b57cec5SDimitry Andric 
10280b57cec5SDimitry Andric     // Don't fold into target independent nodes.  Target independent opcodes
10290b57cec5SDimitry Andric     // don't have defined register classes.
1030bdd1243dSDimitry Andric     if (UseDesc.isVariadic() || UseOp.isImplicit() ||
1031bdd1243dSDimitry Andric         UseDesc.operands()[UseOpIdx].RegClass == -1)
10320b57cec5SDimitry Andric       return;
10330b57cec5SDimitry Andric   }
10340b57cec5SDimitry Andric 
10350b57cec5SDimitry Andric   if (!FoldingImmLike) {
103681ad6265SDimitry Andric     if (OpToFold.isReg() && ST->needsAlignedVGPRs()) {
103781ad6265SDimitry Andric       // Don't fold if OpToFold doesn't hold an aligned register.
103881ad6265SDimitry Andric       const TargetRegisterClass *RC =
103981ad6265SDimitry Andric           TRI->getRegClassForReg(*MRI, OpToFold.getReg());
104081ad6265SDimitry Andric       if (TRI->hasVectorRegisters(RC) && OpToFold.getSubReg()) {
104181ad6265SDimitry Andric         unsigned SubReg = OpToFold.getSubReg();
1042bdd1243dSDimitry Andric         if (const TargetRegisterClass *SubRC =
1043bdd1243dSDimitry Andric                 TRI->getSubRegisterClass(RC, SubReg))
104481ad6265SDimitry Andric           RC = SubRC;
104581ad6265SDimitry Andric       }
104681ad6265SDimitry Andric 
104781ad6265SDimitry Andric       if (!RC || !TRI->isProperlyAlignedRC(*RC))
104881ad6265SDimitry Andric         return;
104981ad6265SDimitry Andric     }
105081ad6265SDimitry Andric 
1051bdd1243dSDimitry Andric     tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold);
10520b57cec5SDimitry Andric 
10530b57cec5SDimitry Andric     // FIXME: We could try to change the instruction from 64-bit to 32-bit
1054349cc55cSDimitry Andric     // to enable more folding opportunities.  The shrink operands pass
10550b57cec5SDimitry Andric     // already does this.
10560b57cec5SDimitry Andric     return;
10570b57cec5SDimitry Andric   }
10580b57cec5SDimitry Andric 
10590b57cec5SDimitry Andric 
10600b57cec5SDimitry Andric   const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
10610b57cec5SDimitry Andric   const TargetRegisterClass *FoldRC =
1062bdd1243dSDimitry Andric       TRI->getRegClass(FoldDesc.operands()[0].RegClass);
10630b57cec5SDimitry Andric 
10640b57cec5SDimitry Andric   // Split 64-bit constants into 32-bits for folding.
106506c3fb27SDimitry Andric   if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(*FoldRC) == 64) {
10668bcb0991SDimitry Andric     Register UseReg = UseOp.getReg();
10670b57cec5SDimitry Andric     const TargetRegisterClass *UseRC = MRI->getRegClass(UseReg);
106806c3fb27SDimitry Andric     if (AMDGPU::getRegBitWidth(*UseRC) != 64)
10690b57cec5SDimitry Andric       return;
10700b57cec5SDimitry Andric 
10710b57cec5SDimitry Andric     APInt Imm(64, OpToFold.getImm());
10720b57cec5SDimitry Andric     if (UseOp.getSubReg() == AMDGPU::sub0) {
10730b57cec5SDimitry Andric       Imm = Imm.getLoBits(32);
10740b57cec5SDimitry Andric     } else {
10750b57cec5SDimitry Andric       assert(UseOp.getSubReg() == AMDGPU::sub1);
10760b57cec5SDimitry Andric       Imm = Imm.getHiBits(32);
10770b57cec5SDimitry Andric     }
10780b57cec5SDimitry Andric 
10790b57cec5SDimitry Andric     MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
1080bdd1243dSDimitry Andric     tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp);
10810b57cec5SDimitry Andric     return;
10820b57cec5SDimitry Andric   }
10830b57cec5SDimitry Andric 
1084bdd1243dSDimitry Andric   tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold);
10850b57cec5SDimitry Andric }
10860b57cec5SDimitry Andric 
evalBinaryInstruction(unsigned Opcode,int32_t & Result,uint32_t LHS,uint32_t RHS)10870b57cec5SDimitry Andric static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
10880b57cec5SDimitry Andric                                   uint32_t LHS, uint32_t RHS) {
10890b57cec5SDimitry Andric   switch (Opcode) {
10900b57cec5SDimitry Andric   case AMDGPU::V_AND_B32_e64:
10910b57cec5SDimitry Andric   case AMDGPU::V_AND_B32_e32:
10920b57cec5SDimitry Andric   case AMDGPU::S_AND_B32:
10930b57cec5SDimitry Andric     Result = LHS & RHS;
10940b57cec5SDimitry Andric     return true;
10950b57cec5SDimitry Andric   case AMDGPU::V_OR_B32_e64:
10960b57cec5SDimitry Andric   case AMDGPU::V_OR_B32_e32:
10970b57cec5SDimitry Andric   case AMDGPU::S_OR_B32:
10980b57cec5SDimitry Andric     Result = LHS | RHS;
10990b57cec5SDimitry Andric     return true;
11000b57cec5SDimitry Andric   case AMDGPU::V_XOR_B32_e64:
11010b57cec5SDimitry Andric   case AMDGPU::V_XOR_B32_e32:
11020b57cec5SDimitry Andric   case AMDGPU::S_XOR_B32:
11030b57cec5SDimitry Andric     Result = LHS ^ RHS;
11040b57cec5SDimitry Andric     return true;
11055ffd83dbSDimitry Andric   case AMDGPU::S_XNOR_B32:
11065ffd83dbSDimitry Andric     Result = ~(LHS ^ RHS);
11075ffd83dbSDimitry Andric     return true;
11085ffd83dbSDimitry Andric   case AMDGPU::S_NAND_B32:
11095ffd83dbSDimitry Andric     Result = ~(LHS & RHS);
11105ffd83dbSDimitry Andric     return true;
11115ffd83dbSDimitry Andric   case AMDGPU::S_NOR_B32:
11125ffd83dbSDimitry Andric     Result = ~(LHS | RHS);
11135ffd83dbSDimitry Andric     return true;
11145ffd83dbSDimitry Andric   case AMDGPU::S_ANDN2_B32:
11155ffd83dbSDimitry Andric     Result = LHS & ~RHS;
11165ffd83dbSDimitry Andric     return true;
11175ffd83dbSDimitry Andric   case AMDGPU::S_ORN2_B32:
11185ffd83dbSDimitry Andric     Result = LHS | ~RHS;
11195ffd83dbSDimitry Andric     return true;
11200b57cec5SDimitry Andric   case AMDGPU::V_LSHL_B32_e64:
11210b57cec5SDimitry Andric   case AMDGPU::V_LSHL_B32_e32:
11220b57cec5SDimitry Andric   case AMDGPU::S_LSHL_B32:
11230b57cec5SDimitry Andric     // The instruction ignores the high bits for out of bounds shifts.
11240b57cec5SDimitry Andric     Result = LHS << (RHS & 31);
11250b57cec5SDimitry Andric     return true;
11260b57cec5SDimitry Andric   case AMDGPU::V_LSHLREV_B32_e64:
11270b57cec5SDimitry Andric   case AMDGPU::V_LSHLREV_B32_e32:
11280b57cec5SDimitry Andric     Result = RHS << (LHS & 31);
11290b57cec5SDimitry Andric     return true;
11300b57cec5SDimitry Andric   case AMDGPU::V_LSHR_B32_e64:
11310b57cec5SDimitry Andric   case AMDGPU::V_LSHR_B32_e32:
11320b57cec5SDimitry Andric   case AMDGPU::S_LSHR_B32:
11330b57cec5SDimitry Andric     Result = LHS >> (RHS & 31);
11340b57cec5SDimitry Andric     return true;
11350b57cec5SDimitry Andric   case AMDGPU::V_LSHRREV_B32_e64:
11360b57cec5SDimitry Andric   case AMDGPU::V_LSHRREV_B32_e32:
11370b57cec5SDimitry Andric     Result = RHS >> (LHS & 31);
11380b57cec5SDimitry Andric     return true;
11390b57cec5SDimitry Andric   case AMDGPU::V_ASHR_I32_e64:
11400b57cec5SDimitry Andric   case AMDGPU::V_ASHR_I32_e32:
11410b57cec5SDimitry Andric   case AMDGPU::S_ASHR_I32:
11420b57cec5SDimitry Andric     Result = static_cast<int32_t>(LHS) >> (RHS & 31);
11430b57cec5SDimitry Andric     return true;
11440b57cec5SDimitry Andric   case AMDGPU::V_ASHRREV_I32_e64:
11450b57cec5SDimitry Andric   case AMDGPU::V_ASHRREV_I32_e32:
11460b57cec5SDimitry Andric     Result = static_cast<int32_t>(RHS) >> (LHS & 31);
11470b57cec5SDimitry Andric     return true;
11480b57cec5SDimitry Andric   default:
11490b57cec5SDimitry Andric     return false;
11500b57cec5SDimitry Andric   }
11510b57cec5SDimitry Andric }
11520b57cec5SDimitry Andric 
getMovOpc(bool IsScalar)11530b57cec5SDimitry Andric static unsigned getMovOpc(bool IsScalar) {
11540b57cec5SDimitry Andric   return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
11550b57cec5SDimitry Andric }
11560b57cec5SDimitry Andric 
mutateCopyOp(MachineInstr & MI,const MCInstrDesc & NewDesc)1157bdd1243dSDimitry Andric static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
1158bdd1243dSDimitry Andric   MI.setDesc(NewDesc);
1159bdd1243dSDimitry Andric 
1160bdd1243dSDimitry Andric   // Remove any leftover implicit operands from mutating the instruction. e.g.
1161bdd1243dSDimitry Andric   // if we replace an s_and_b32 with a copy, we don't need the implicit scc def
1162bdd1243dSDimitry Andric   // anymore.
11630b57cec5SDimitry Andric   const MCInstrDesc &Desc = MI.getDesc();
1164bdd1243dSDimitry Andric   unsigned NumOps = Desc.getNumOperands() + Desc.implicit_uses().size() +
1165bdd1243dSDimitry Andric                     Desc.implicit_defs().size();
11660b57cec5SDimitry Andric 
11670b57cec5SDimitry Andric   for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
116881ad6265SDimitry Andric     MI.removeOperand(I);
11690b57cec5SDimitry Andric }
11700b57cec5SDimitry Andric 
1171bdd1243dSDimitry Andric MachineOperand *
getImmOrMaterializedImm(MachineOperand & Op) const1172bdd1243dSDimitry Andric SIFoldOperands::getImmOrMaterializedImm(MachineOperand &Op) const {
11730b57cec5SDimitry Andric   // If this has a subregister, it obviously is a register source.
1174bdd1243dSDimitry Andric   if (!Op.isReg() || Op.getSubReg() != AMDGPU::NoSubRegister ||
1175bdd1243dSDimitry Andric       !Op.getReg().isVirtual())
11760b57cec5SDimitry Andric     return &Op;
11770b57cec5SDimitry Andric 
1178bdd1243dSDimitry Andric   MachineInstr *Def = MRI->getVRegDef(Op.getReg());
11790b57cec5SDimitry Andric   if (Def && Def->isMoveImmediate()) {
11800b57cec5SDimitry Andric     MachineOperand &ImmSrc = Def->getOperand(1);
11810b57cec5SDimitry Andric     if (ImmSrc.isImm())
11820b57cec5SDimitry Andric       return &ImmSrc;
11830b57cec5SDimitry Andric   }
11840b57cec5SDimitry Andric 
11850b57cec5SDimitry Andric   return &Op;
11860b57cec5SDimitry Andric }
11870b57cec5SDimitry Andric 
11880b57cec5SDimitry Andric // Try to simplify operations with a constant that may appear after instruction
11890b57cec5SDimitry Andric // selection.
11900b57cec5SDimitry Andric // TODO: See if a frame index with a fixed offset can fold.
tryConstantFoldOp(MachineInstr * MI) const1191bdd1243dSDimitry Andric bool SIFoldOperands::tryConstantFoldOp(MachineInstr *MI) const {
11925f757f3fSDimitry Andric   if (!MI->allImplicitDefsAreDead())
11935f757f3fSDimitry Andric     return false;
11945f757f3fSDimitry Andric 
11950b57cec5SDimitry Andric   unsigned Opc = MI->getOpcode();
1196fe6060f1SDimitry Andric 
1197fe6060f1SDimitry Andric   int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
1198fe6060f1SDimitry Andric   if (Src0Idx == -1)
1199fe6060f1SDimitry Andric     return false;
1200bdd1243dSDimitry Andric   MachineOperand *Src0 = getImmOrMaterializedImm(MI->getOperand(Src0Idx));
1201fe6060f1SDimitry Andric 
1202fe6060f1SDimitry Andric   if ((Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
1203fe6060f1SDimitry Andric        Opc == AMDGPU::S_NOT_B32) &&
1204fe6060f1SDimitry Andric       Src0->isImm()) {
1205fe6060f1SDimitry Andric     MI->getOperand(1).ChangeToImmediate(~Src0->getImm());
12060b57cec5SDimitry Andric     mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
12070b57cec5SDimitry Andric     return true;
12080b57cec5SDimitry Andric   }
12090b57cec5SDimitry Andric 
12100b57cec5SDimitry Andric   int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
12110b57cec5SDimitry Andric   if (Src1Idx == -1)
12120b57cec5SDimitry Andric     return false;
1213bdd1243dSDimitry Andric   MachineOperand *Src1 = getImmOrMaterializedImm(MI->getOperand(Src1Idx));
12140b57cec5SDimitry Andric 
12150b57cec5SDimitry Andric   if (!Src0->isImm() && !Src1->isImm())
12160b57cec5SDimitry Andric     return false;
12170b57cec5SDimitry Andric 
12180b57cec5SDimitry Andric   // and k0, k1 -> v_mov_b32 (k0 & k1)
12190b57cec5SDimitry Andric   // or k0, k1 -> v_mov_b32 (k0 | k1)
12200b57cec5SDimitry Andric   // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
12210b57cec5SDimitry Andric   if (Src0->isImm() && Src1->isImm()) {
12220b57cec5SDimitry Andric     int32_t NewImm;
12230b57cec5SDimitry Andric     if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
12240b57cec5SDimitry Andric       return false;
12250b57cec5SDimitry Andric 
1226bdd1243dSDimitry Andric     bool IsSGPR = TRI->isSGPRReg(*MRI, MI->getOperand(0).getReg());
12270b57cec5SDimitry Andric 
12280b57cec5SDimitry Andric     // Be careful to change the right operand, src0 may belong to a different
12290b57cec5SDimitry Andric     // instruction.
12300b57cec5SDimitry Andric     MI->getOperand(Src0Idx).ChangeToImmediate(NewImm);
123181ad6265SDimitry Andric     MI->removeOperand(Src1Idx);
12320b57cec5SDimitry Andric     mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
12330b57cec5SDimitry Andric     return true;
12340b57cec5SDimitry Andric   }
12350b57cec5SDimitry Andric 
12360b57cec5SDimitry Andric   if (!MI->isCommutable())
12370b57cec5SDimitry Andric     return false;
12380b57cec5SDimitry Andric 
12390b57cec5SDimitry Andric   if (Src0->isImm() && !Src1->isImm()) {
12400b57cec5SDimitry Andric     std::swap(Src0, Src1);
12410b57cec5SDimitry Andric     std::swap(Src0Idx, Src1Idx);
12420b57cec5SDimitry Andric   }
12430b57cec5SDimitry Andric 
12440b57cec5SDimitry Andric   int32_t Src1Val = static_cast<int32_t>(Src1->getImm());
12450b57cec5SDimitry Andric   if (Opc == AMDGPU::V_OR_B32_e64 ||
12460b57cec5SDimitry Andric       Opc == AMDGPU::V_OR_B32_e32 ||
12470b57cec5SDimitry Andric       Opc == AMDGPU::S_OR_B32) {
12480b57cec5SDimitry Andric     if (Src1Val == 0) {
12490b57cec5SDimitry Andric       // y = or x, 0 => y = copy x
125081ad6265SDimitry Andric       MI->removeOperand(Src1Idx);
12510b57cec5SDimitry Andric       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
12520b57cec5SDimitry Andric     } else if (Src1Val == -1) {
12530b57cec5SDimitry Andric       // y = or x, -1 => y = v_mov_b32 -1
125481ad6265SDimitry Andric       MI->removeOperand(Src1Idx);
12550b57cec5SDimitry Andric       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
12560b57cec5SDimitry Andric     } else
12570b57cec5SDimitry Andric       return false;
12580b57cec5SDimitry Andric 
12590b57cec5SDimitry Andric     return true;
12600b57cec5SDimitry Andric   }
12610b57cec5SDimitry Andric 
1262bdd1243dSDimitry Andric   if (Opc == AMDGPU::V_AND_B32_e64 || Opc == AMDGPU::V_AND_B32_e32 ||
1263bdd1243dSDimitry Andric       Opc == AMDGPU::S_AND_B32) {
12640b57cec5SDimitry Andric     if (Src1Val == 0) {
12650b57cec5SDimitry Andric       // y = and x, 0 => y = v_mov_b32 0
126681ad6265SDimitry Andric       MI->removeOperand(Src0Idx);
12670b57cec5SDimitry Andric       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
12680b57cec5SDimitry Andric     } else if (Src1Val == -1) {
12690b57cec5SDimitry Andric       // y = and x, -1 => y = copy x
127081ad6265SDimitry Andric       MI->removeOperand(Src1Idx);
12710b57cec5SDimitry Andric       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
12720b57cec5SDimitry Andric     } else
12730b57cec5SDimitry Andric       return false;
12740b57cec5SDimitry Andric 
12750b57cec5SDimitry Andric     return true;
12760b57cec5SDimitry Andric   }
12770b57cec5SDimitry Andric 
1278bdd1243dSDimitry Andric   if (Opc == AMDGPU::V_XOR_B32_e64 || Opc == AMDGPU::V_XOR_B32_e32 ||
1279bdd1243dSDimitry Andric       Opc == AMDGPU::S_XOR_B32) {
12800b57cec5SDimitry Andric     if (Src1Val == 0) {
12810b57cec5SDimitry Andric       // y = xor x, 0 => y = copy x
128281ad6265SDimitry Andric       MI->removeOperand(Src1Idx);
12830b57cec5SDimitry Andric       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
12840b57cec5SDimitry Andric       return true;
12850b57cec5SDimitry Andric     }
12860b57cec5SDimitry Andric   }
12870b57cec5SDimitry Andric 
12880b57cec5SDimitry Andric   return false;
12890b57cec5SDimitry Andric }
12900b57cec5SDimitry Andric 
12910b57cec5SDimitry Andric // Try to fold an instruction into a simpler one
tryFoldCndMask(MachineInstr & MI) const1292fe6060f1SDimitry Andric bool SIFoldOperands::tryFoldCndMask(MachineInstr &MI) const {
1293fe6060f1SDimitry Andric   unsigned Opc = MI.getOpcode();
1294fe6060f1SDimitry Andric   if (Opc != AMDGPU::V_CNDMASK_B32_e32 && Opc != AMDGPU::V_CNDMASK_B32_e64 &&
1295fe6060f1SDimitry Andric       Opc != AMDGPU::V_CNDMASK_B64_PSEUDO)
1296fe6060f1SDimitry Andric     return false;
12970b57cec5SDimitry Andric 
1298fe6060f1SDimitry Andric   MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1299fe6060f1SDimitry Andric   MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1300fe6060f1SDimitry Andric   if (!Src1->isIdenticalTo(*Src0)) {
1301bdd1243dSDimitry Andric     auto *Src0Imm = getImmOrMaterializedImm(*Src0);
1302bdd1243dSDimitry Andric     auto *Src1Imm = getImmOrMaterializedImm(*Src1);
1303fe6060f1SDimitry Andric     if (!Src1Imm->isIdenticalTo(*Src0Imm))
1304fe6060f1SDimitry Andric       return false;
1305fe6060f1SDimitry Andric   }
1306fe6060f1SDimitry Andric 
1307fe6060f1SDimitry Andric   int Src1ModIdx =
1308fe6060f1SDimitry Andric       AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers);
1309fe6060f1SDimitry Andric   int Src0ModIdx =
1310fe6060f1SDimitry Andric       AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
1311fe6060f1SDimitry Andric   if ((Src1ModIdx != -1 && MI.getOperand(Src1ModIdx).getImm() != 0) ||
1312fe6060f1SDimitry Andric       (Src0ModIdx != -1 && MI.getOperand(Src0ModIdx).getImm() != 0))
1313fe6060f1SDimitry Andric     return false;
1314fe6060f1SDimitry Andric 
1315fe6060f1SDimitry Andric   LLVM_DEBUG(dbgs() << "Folded " << MI << " into ");
13160b57cec5SDimitry Andric   auto &NewDesc =
13170b57cec5SDimitry Andric       TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY : getMovOpc(false));
13180b57cec5SDimitry Andric   int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
13190b57cec5SDimitry Andric   if (Src2Idx != -1)
132081ad6265SDimitry Andric     MI.removeOperand(Src2Idx);
132181ad6265SDimitry Andric   MI.removeOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1));
13220b57cec5SDimitry Andric   if (Src1ModIdx != -1)
132381ad6265SDimitry Andric     MI.removeOperand(Src1ModIdx);
13240b57cec5SDimitry Andric   if (Src0ModIdx != -1)
132581ad6265SDimitry Andric     MI.removeOperand(Src0ModIdx);
1326fe6060f1SDimitry Andric   mutateCopyOp(MI, NewDesc);
1327fe6060f1SDimitry Andric   LLVM_DEBUG(dbgs() << MI);
13280b57cec5SDimitry Andric   return true;
13290b57cec5SDimitry Andric }
1330fe6060f1SDimitry Andric 
tryFoldZeroHighBits(MachineInstr & MI) const1331fe6060f1SDimitry Andric bool SIFoldOperands::tryFoldZeroHighBits(MachineInstr &MI) const {
1332fe6060f1SDimitry Andric   if (MI.getOpcode() != AMDGPU::V_AND_B32_e64 &&
1333fe6060f1SDimitry Andric       MI.getOpcode() != AMDGPU::V_AND_B32_e32)
1334fe6060f1SDimitry Andric     return false;
1335fe6060f1SDimitry Andric 
1336bdd1243dSDimitry Andric   MachineOperand *Src0 = getImmOrMaterializedImm(MI.getOperand(1));
1337fe6060f1SDimitry Andric   if (!Src0->isImm() || Src0->getImm() != 0xffff)
1338fe6060f1SDimitry Andric     return false;
1339fe6060f1SDimitry Andric 
1340fe6060f1SDimitry Andric   Register Src1 = MI.getOperand(2).getReg();
1341fe6060f1SDimitry Andric   MachineInstr *SrcDef = MRI->getVRegDef(Src1);
1342bdd1243dSDimitry Andric   if (!ST->zeroesHigh16BitsOfDest(SrcDef->getOpcode()))
1343bdd1243dSDimitry Andric     return false;
1344bdd1243dSDimitry Andric 
1345fe6060f1SDimitry Andric   Register Dst = MI.getOperand(0).getReg();
1346fe6060f1SDimitry Andric   MRI->replaceRegWith(Dst, SrcDef->getOperand(0).getReg());
1347fe6060f1SDimitry Andric   MI.eraseFromParent();
1348fe6060f1SDimitry Andric   return true;
13490b57cec5SDimitry Andric }
13500b57cec5SDimitry Andric 
foldInstOperand(MachineInstr & MI,MachineOperand & OpToFold) const135181ad6265SDimitry Andric bool SIFoldOperands::foldInstOperand(MachineInstr &MI,
13520b57cec5SDimitry Andric                                      MachineOperand &OpToFold) const {
13530b57cec5SDimitry Andric   // We need mutate the operands of new mov instructions to add implicit
13540b57cec5SDimitry Andric   // uses of EXEC, but adding them invalidates the use_iterator, so defer
13550b57cec5SDimitry Andric   // this.
13560b57cec5SDimitry Andric   SmallVector<MachineInstr *, 4> CopiesToReplace;
13570b57cec5SDimitry Andric   SmallVector<FoldCandidate, 4> FoldList;
13580b57cec5SDimitry Andric   MachineOperand &Dst = MI.getOperand(0);
135981ad6265SDimitry Andric   bool Changed = false;
13600b57cec5SDimitry Andric 
1361fe6060f1SDimitry Andric   if (OpToFold.isImm()) {
1362fe6060f1SDimitry Andric     for (auto &UseMI :
1363fe6060f1SDimitry Andric          make_early_inc_range(MRI->use_nodbg_instructions(Dst.getReg()))) {
13640b57cec5SDimitry Andric       // Folding the immediate may reveal operations that can be constant
13650b57cec5SDimitry Andric       // folded or replaced with a copy. This can happen for example after
13660b57cec5SDimitry Andric       // frame indices are lowered to constants or from splitting 64-bit
13670b57cec5SDimitry Andric       // constants.
13680b57cec5SDimitry Andric       //
13690b57cec5SDimitry Andric       // We may also encounter cases where one or both operands are
13700b57cec5SDimitry Andric       // immediates materialized into a register, which would ordinarily not
13710b57cec5SDimitry Andric       // be folded due to multiple uses or operand constraints.
1372bdd1243dSDimitry Andric       if (tryConstantFoldOp(&UseMI)) {
1373fe6060f1SDimitry Andric         LLVM_DEBUG(dbgs() << "Constant folded " << UseMI);
137481ad6265SDimitry Andric         Changed = true;
13750b57cec5SDimitry Andric       }
13760b57cec5SDimitry Andric     }
13770b57cec5SDimitry Andric   }
13780b57cec5SDimitry Andric 
1379fe6060f1SDimitry Andric   SmallVector<MachineOperand *, 4> UsesToProcess;
1380fe6060f1SDimitry Andric   for (auto &Use : MRI->use_nodbg_operands(Dst.getReg()))
1381fe6060f1SDimitry Andric     UsesToProcess.push_back(&Use);
1382bdd1243dSDimitry Andric   for (auto *U : UsesToProcess) {
13830b57cec5SDimitry Andric     MachineInstr *UseMI = U->getParent();
138481ad6265SDimitry Andric     foldOperand(OpToFold, UseMI, UseMI->getOperandNo(U), FoldList,
138581ad6265SDimitry Andric                 CopiesToReplace);
138681ad6265SDimitry Andric   }
13870b57cec5SDimitry Andric 
138881ad6265SDimitry Andric   if (CopiesToReplace.empty() && FoldList.empty())
138981ad6265SDimitry Andric     return Changed;
13900b57cec5SDimitry Andric 
13910b57cec5SDimitry Andric   MachineFunction *MF = MI.getParent()->getParent();
13920b57cec5SDimitry Andric   // Make sure we add EXEC uses to any new v_mov instructions created.
13930b57cec5SDimitry Andric   for (MachineInstr *Copy : CopiesToReplace)
13940b57cec5SDimitry Andric     Copy->addImplicitDefUseOperands(*MF);
13950b57cec5SDimitry Andric 
13960b57cec5SDimitry Andric   for (FoldCandidate &Fold : FoldList) {
1397480093f4SDimitry Andric     assert(!Fold.isReg() || Fold.OpToFold);
1398e8d8bef9SDimitry Andric     if (Fold.isReg() && Fold.OpToFold->getReg().isVirtual()) {
13998bcb0991SDimitry Andric       Register Reg = Fold.OpToFold->getReg();
14008bcb0991SDimitry Andric       MachineInstr *DefMI = Fold.OpToFold->getParent();
14018bcb0991SDimitry Andric       if (DefMI->readsRegister(AMDGPU::EXEC, TRI) &&
14028bcb0991SDimitry Andric           execMayBeModifiedBeforeUse(*MRI, Reg, *DefMI, *Fold.UseMI))
14038bcb0991SDimitry Andric         continue;
14048bcb0991SDimitry Andric     }
1405bdd1243dSDimitry Andric     if (updateOperand(Fold)) {
14060b57cec5SDimitry Andric       // Clear kill flags.
14070b57cec5SDimitry Andric       if (Fold.isReg()) {
14080b57cec5SDimitry Andric         assert(Fold.OpToFold && Fold.OpToFold->isReg());
14090b57cec5SDimitry Andric         // FIXME: Probably shouldn't bother trying to fold if not an
14100b57cec5SDimitry Andric         // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
14110b57cec5SDimitry Andric         // copies.
14120b57cec5SDimitry Andric         MRI->clearKillFlags(Fold.OpToFold->getReg());
14130b57cec5SDimitry Andric       }
14140b57cec5SDimitry Andric       LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo "
14150b57cec5SDimitry Andric                         << static_cast<int>(Fold.UseOpNo) << " of "
1416fe6060f1SDimitry Andric                         << *Fold.UseMI);
1417bdd1243dSDimitry Andric     } else if (Fold.Commuted) {
14180b57cec5SDimitry Andric       // Restoring instruction's original operand order if fold has failed.
14190b57cec5SDimitry Andric       TII->commuteInstruction(*Fold.UseMI, false);
14200b57cec5SDimitry Andric     }
14210b57cec5SDimitry Andric   }
142281ad6265SDimitry Andric   return true;
14230b57cec5SDimitry Andric }
14240b57cec5SDimitry Andric 
tryFoldFoldableCopy(MachineInstr & MI,MachineOperand * & CurrentKnownM0Val) const1425bdd1243dSDimitry Andric bool SIFoldOperands::tryFoldFoldableCopy(
1426bdd1243dSDimitry Andric     MachineInstr &MI, MachineOperand *&CurrentKnownM0Val) const {
1427bdd1243dSDimitry Andric   // Specially track simple redefs of m0 to the same value in a block, so we
1428bdd1243dSDimitry Andric   // can erase the later ones.
1429bdd1243dSDimitry Andric   if (MI.getOperand(0).getReg() == AMDGPU::M0) {
1430bdd1243dSDimitry Andric     MachineOperand &NewM0Val = MI.getOperand(1);
1431bdd1243dSDimitry Andric     if (CurrentKnownM0Val && CurrentKnownM0Val->isIdenticalTo(NewM0Val)) {
1432bdd1243dSDimitry Andric       MI.eraseFromParent();
1433bdd1243dSDimitry Andric       return true;
1434bdd1243dSDimitry Andric     }
1435bdd1243dSDimitry Andric 
1436bdd1243dSDimitry Andric     // We aren't tracking other physical registers
1437bdd1243dSDimitry Andric     CurrentKnownM0Val = (NewM0Val.isReg() && NewM0Val.getReg().isPhysical())
1438bdd1243dSDimitry Andric                             ? nullptr
1439bdd1243dSDimitry Andric                             : &NewM0Val;
1440bdd1243dSDimitry Andric     return false;
1441bdd1243dSDimitry Andric   }
1442bdd1243dSDimitry Andric 
1443bdd1243dSDimitry Andric   MachineOperand &OpToFold = MI.getOperand(1);
1444bdd1243dSDimitry Andric   bool FoldingImm = OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
1445bdd1243dSDimitry Andric 
1446bdd1243dSDimitry Andric   // FIXME: We could also be folding things like TargetIndexes.
1447bdd1243dSDimitry Andric   if (!FoldingImm && !OpToFold.isReg())
1448bdd1243dSDimitry Andric     return false;
1449bdd1243dSDimitry Andric 
1450bdd1243dSDimitry Andric   if (OpToFold.isReg() && !OpToFold.getReg().isVirtual())
1451bdd1243dSDimitry Andric     return false;
1452bdd1243dSDimitry Andric 
1453bdd1243dSDimitry Andric   // Prevent folding operands backwards in the function. For example,
1454bdd1243dSDimitry Andric   // the COPY opcode must not be replaced by 1 in this example:
1455bdd1243dSDimitry Andric   //
1456bdd1243dSDimitry Andric   //    %3 = COPY %vgpr0; VGPR_32:%3
1457bdd1243dSDimitry Andric   //    ...
1458bdd1243dSDimitry Andric   //    %vgpr0 = V_MOV_B32_e32 1, implicit %exec
1459bdd1243dSDimitry Andric   if (!MI.getOperand(0).getReg().isVirtual())
1460bdd1243dSDimitry Andric     return false;
1461bdd1243dSDimitry Andric 
1462bdd1243dSDimitry Andric   bool Changed = foldInstOperand(MI, OpToFold);
1463bdd1243dSDimitry Andric 
1464bdd1243dSDimitry Andric   // If we managed to fold all uses of this copy then we might as well
1465bdd1243dSDimitry Andric   // delete it now.
1466bdd1243dSDimitry Andric   // The only reason we need to follow chains of copies here is that
1467bdd1243dSDimitry Andric   // tryFoldRegSequence looks forward through copies before folding a
1468bdd1243dSDimitry Andric   // REG_SEQUENCE into its eventual users.
1469bdd1243dSDimitry Andric   auto *InstToErase = &MI;
1470bdd1243dSDimitry Andric   while (MRI->use_nodbg_empty(InstToErase->getOperand(0).getReg())) {
1471bdd1243dSDimitry Andric     auto &SrcOp = InstToErase->getOperand(1);
1472bdd1243dSDimitry Andric     auto SrcReg = SrcOp.isReg() ? SrcOp.getReg() : Register();
1473bdd1243dSDimitry Andric     InstToErase->eraseFromParent();
1474bdd1243dSDimitry Andric     Changed = true;
1475bdd1243dSDimitry Andric     InstToErase = nullptr;
1476bdd1243dSDimitry Andric     if (!SrcReg || SrcReg.isPhysical())
1477bdd1243dSDimitry Andric       break;
1478bdd1243dSDimitry Andric     InstToErase = MRI->getVRegDef(SrcReg);
1479bdd1243dSDimitry Andric     if (!InstToErase || !TII->isFoldableCopy(*InstToErase))
1480bdd1243dSDimitry Andric       break;
1481bdd1243dSDimitry Andric   }
1482bdd1243dSDimitry Andric 
1483bdd1243dSDimitry Andric   if (InstToErase && InstToErase->isRegSequence() &&
1484bdd1243dSDimitry Andric       MRI->use_nodbg_empty(InstToErase->getOperand(0).getReg())) {
1485bdd1243dSDimitry Andric     InstToErase->eraseFromParent();
1486bdd1243dSDimitry Andric     Changed = true;
1487bdd1243dSDimitry Andric   }
1488bdd1243dSDimitry Andric 
1489bdd1243dSDimitry Andric   return Changed;
1490bdd1243dSDimitry Andric }
1491bdd1243dSDimitry Andric 
14920b57cec5SDimitry Andric // Clamp patterns are canonically selected to v_max_* instructions, so only
14930b57cec5SDimitry Andric // handle them.
isClamp(const MachineInstr & MI) const14940b57cec5SDimitry Andric const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const {
14950b57cec5SDimitry Andric   unsigned Op = MI.getOpcode();
14960b57cec5SDimitry Andric   switch (Op) {
14970b57cec5SDimitry Andric   case AMDGPU::V_MAX_F32_e64:
14980b57cec5SDimitry Andric   case AMDGPU::V_MAX_F16_e64:
1499bdd1243dSDimitry Andric   case AMDGPU::V_MAX_F16_t16_e64:
15005f757f3fSDimitry Andric   case AMDGPU::V_MAX_F16_fake16_e64:
1501e8d8bef9SDimitry Andric   case AMDGPU::V_MAX_F64_e64:
15027a6dacacSDimitry Andric   case AMDGPU::V_MAX_NUM_F64_e64:
15030b57cec5SDimitry Andric   case AMDGPU::V_PK_MAX_F16: {
15040b57cec5SDimitry Andric     if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm())
15050b57cec5SDimitry Andric       return nullptr;
15060b57cec5SDimitry Andric 
15070b57cec5SDimitry Andric     // Make sure sources are identical.
15080b57cec5SDimitry Andric     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
15090b57cec5SDimitry Andric     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
15100b57cec5SDimitry Andric     if (!Src0->isReg() || !Src1->isReg() ||
15110b57cec5SDimitry Andric         Src0->getReg() != Src1->getReg() ||
15120b57cec5SDimitry Andric         Src0->getSubReg() != Src1->getSubReg() ||
15130b57cec5SDimitry Andric         Src0->getSubReg() != AMDGPU::NoSubRegister)
15140b57cec5SDimitry Andric       return nullptr;
15150b57cec5SDimitry Andric 
15160b57cec5SDimitry Andric     // Can't fold up if we have modifiers.
15170b57cec5SDimitry Andric     if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
15180b57cec5SDimitry Andric       return nullptr;
15190b57cec5SDimitry Andric 
15200b57cec5SDimitry Andric     unsigned Src0Mods
15210b57cec5SDimitry Andric       = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm();
15220b57cec5SDimitry Andric     unsigned Src1Mods
15230b57cec5SDimitry Andric       = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm();
15240b57cec5SDimitry Andric 
15250b57cec5SDimitry Andric     // Having a 0 op_sel_hi would require swizzling the output in the source
15260b57cec5SDimitry Andric     // instruction, which we can't do.
15270b57cec5SDimitry Andric     unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1
15280b57cec5SDimitry Andric                                                       : 0u;
15290b57cec5SDimitry Andric     if (Src0Mods != UnsetMods && Src1Mods != UnsetMods)
15300b57cec5SDimitry Andric       return nullptr;
15310b57cec5SDimitry Andric     return Src0;
15320b57cec5SDimitry Andric   }
15330b57cec5SDimitry Andric   default:
15340b57cec5SDimitry Andric     return nullptr;
15350b57cec5SDimitry Andric   }
15360b57cec5SDimitry Andric }
15370b57cec5SDimitry Andric 
15380b57cec5SDimitry Andric // FIXME: Clamp for v_mad_mixhi_f16 handled during isel.
tryFoldClamp(MachineInstr & MI)15390b57cec5SDimitry Andric bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) {
15400b57cec5SDimitry Andric   const MachineOperand *ClampSrc = isClamp(MI);
1541fe6060f1SDimitry Andric   if (!ClampSrc || !MRI->hasOneNonDBGUser(ClampSrc->getReg()))
15420b57cec5SDimitry Andric     return false;
15430b57cec5SDimitry Andric 
15440b57cec5SDimitry Andric   MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg());
15450b57cec5SDimitry Andric 
15460b57cec5SDimitry Andric   // The type of clamp must be compatible.
15470b57cec5SDimitry Andric   if (TII->getClampMask(*Def) != TII->getClampMask(MI))
15480b57cec5SDimitry Andric     return false;
15490b57cec5SDimitry Andric 
15500b57cec5SDimitry Andric   MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp);
15510b57cec5SDimitry Andric   if (!DefClamp)
15520b57cec5SDimitry Andric     return false;
15530b57cec5SDimitry Andric 
1554fe6060f1SDimitry Andric   LLVM_DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def);
15550b57cec5SDimitry Andric 
15560b57cec5SDimitry Andric   // Clamp is applied after omod, so it is OK if omod is set.
15570b57cec5SDimitry Andric   DefClamp->setImm(1);
15580b57cec5SDimitry Andric   MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
15590b57cec5SDimitry Andric   MI.eraseFromParent();
1560349cc55cSDimitry Andric 
1561349cc55cSDimitry Andric   // Use of output modifiers forces VOP3 encoding for a VOP2 mac/fmac
1562349cc55cSDimitry Andric   // instruction, so we might as well convert it to the more flexible VOP3-only
1563349cc55cSDimitry Andric   // mad/fma form.
1564349cc55cSDimitry Andric   if (TII->convertToThreeAddress(*Def, nullptr, nullptr))
1565349cc55cSDimitry Andric     Def->eraseFromParent();
1566349cc55cSDimitry Andric 
15670b57cec5SDimitry Andric   return true;
15680b57cec5SDimitry Andric }
15690b57cec5SDimitry Andric 
getOModValue(unsigned Opc,int64_t Val)15700b57cec5SDimitry Andric static int getOModValue(unsigned Opc, int64_t Val) {
15710b57cec5SDimitry Andric   switch (Opc) {
15727a6dacacSDimitry Andric   case AMDGPU::V_MUL_F64_e64:
15737a6dacacSDimitry Andric   case AMDGPU::V_MUL_F64_pseudo_e64: {
1574fe6060f1SDimitry Andric     switch (Val) {
1575fe6060f1SDimitry Andric     case 0x3fe0000000000000: // 0.5
1576fe6060f1SDimitry Andric       return SIOutMods::DIV2;
1577fe6060f1SDimitry Andric     case 0x4000000000000000: // 2.0
1578fe6060f1SDimitry Andric       return SIOutMods::MUL2;
1579fe6060f1SDimitry Andric     case 0x4010000000000000: // 4.0
1580fe6060f1SDimitry Andric       return SIOutMods::MUL4;
1581fe6060f1SDimitry Andric     default:
1582fe6060f1SDimitry Andric       return SIOutMods::NONE;
1583fe6060f1SDimitry Andric     }
1584fe6060f1SDimitry Andric   }
15850b57cec5SDimitry Andric   case AMDGPU::V_MUL_F32_e64: {
15860b57cec5SDimitry Andric     switch (static_cast<uint32_t>(Val)) {
15870b57cec5SDimitry Andric     case 0x3f000000: // 0.5
15880b57cec5SDimitry Andric       return SIOutMods::DIV2;
15890b57cec5SDimitry Andric     case 0x40000000: // 2.0
15900b57cec5SDimitry Andric       return SIOutMods::MUL2;
15910b57cec5SDimitry Andric     case 0x40800000: // 4.0
15920b57cec5SDimitry Andric       return SIOutMods::MUL4;
15930b57cec5SDimitry Andric     default:
15940b57cec5SDimitry Andric       return SIOutMods::NONE;
15950b57cec5SDimitry Andric     }
15960b57cec5SDimitry Andric   }
1597bdd1243dSDimitry Andric   case AMDGPU::V_MUL_F16_e64:
15985f757f3fSDimitry Andric   case AMDGPU::V_MUL_F16_t16_e64:
15995f757f3fSDimitry Andric   case AMDGPU::V_MUL_F16_fake16_e64: {
16000b57cec5SDimitry Andric     switch (static_cast<uint16_t>(Val)) {
16010b57cec5SDimitry Andric     case 0x3800: // 0.5
16020b57cec5SDimitry Andric       return SIOutMods::DIV2;
16030b57cec5SDimitry Andric     case 0x4000: // 2.0
16040b57cec5SDimitry Andric       return SIOutMods::MUL2;
16050b57cec5SDimitry Andric     case 0x4400: // 4.0
16060b57cec5SDimitry Andric       return SIOutMods::MUL4;
16070b57cec5SDimitry Andric     default:
16080b57cec5SDimitry Andric       return SIOutMods::NONE;
16090b57cec5SDimitry Andric     }
16100b57cec5SDimitry Andric   }
16110b57cec5SDimitry Andric   default:
16120b57cec5SDimitry Andric     llvm_unreachable("invalid mul opcode");
16130b57cec5SDimitry Andric   }
16140b57cec5SDimitry Andric }
16150b57cec5SDimitry Andric 
16160b57cec5SDimitry Andric // FIXME: Does this really not support denormals with f16?
16170b57cec5SDimitry Andric // FIXME: Does this need to check IEEE mode bit? SNaNs are generally not
16180b57cec5SDimitry Andric // handled, so will anything other than that break?
16190b57cec5SDimitry Andric std::pair<const MachineOperand *, int>
isOMod(const MachineInstr & MI) const16200b57cec5SDimitry Andric SIFoldOperands::isOMod(const MachineInstr &MI) const {
16210b57cec5SDimitry Andric   unsigned Op = MI.getOpcode();
16220b57cec5SDimitry Andric   switch (Op) {
1623fe6060f1SDimitry Andric   case AMDGPU::V_MUL_F64_e64:
16247a6dacacSDimitry Andric   case AMDGPU::V_MUL_F64_pseudo_e64:
16250b57cec5SDimitry Andric   case AMDGPU::V_MUL_F32_e64:
1626bdd1243dSDimitry Andric   case AMDGPU::V_MUL_F16_t16_e64:
16275f757f3fSDimitry Andric   case AMDGPU::V_MUL_F16_fake16_e64:
16280b57cec5SDimitry Andric   case AMDGPU::V_MUL_F16_e64: {
16290b57cec5SDimitry Andric     // If output denormals are enabled, omod is ignored.
1630bdd1243dSDimitry Andric     if ((Op == AMDGPU::V_MUL_F32_e64 &&
1631bdd1243dSDimitry Andric          MFI->getMode().FP32Denormals.Output != DenormalMode::PreserveSign) ||
16327a6dacacSDimitry Andric         ((Op == AMDGPU::V_MUL_F64_e64 || Op == AMDGPU::V_MUL_F64_pseudo_e64 ||
16337a6dacacSDimitry Andric           Op == AMDGPU::V_MUL_F16_e64 || Op == AMDGPU::V_MUL_F16_t16_e64 ||
16345f757f3fSDimitry Andric           Op == AMDGPU::V_MUL_F16_fake16_e64) &&
1635bdd1243dSDimitry Andric          MFI->getMode().FP64FP16Denormals.Output != DenormalMode::PreserveSign))
1636bdd1243dSDimitry Andric       return std::pair(nullptr, SIOutMods::NONE);
16370b57cec5SDimitry Andric 
16380b57cec5SDimitry Andric     const MachineOperand *RegOp = nullptr;
16390b57cec5SDimitry Andric     const MachineOperand *ImmOp = nullptr;
16400b57cec5SDimitry Andric     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
16410b57cec5SDimitry Andric     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
16420b57cec5SDimitry Andric     if (Src0->isImm()) {
16430b57cec5SDimitry Andric       ImmOp = Src0;
16440b57cec5SDimitry Andric       RegOp = Src1;
16450b57cec5SDimitry Andric     } else if (Src1->isImm()) {
16460b57cec5SDimitry Andric       ImmOp = Src1;
16470b57cec5SDimitry Andric       RegOp = Src0;
16480b57cec5SDimitry Andric     } else
1649bdd1243dSDimitry Andric       return std::pair(nullptr, SIOutMods::NONE);
16500b57cec5SDimitry Andric 
16510b57cec5SDimitry Andric     int OMod = getOModValue(Op, ImmOp->getImm());
16520b57cec5SDimitry Andric     if (OMod == SIOutMods::NONE ||
16530b57cec5SDimitry Andric         TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
16540b57cec5SDimitry Andric         TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
16550b57cec5SDimitry Andric         TII->hasModifiersSet(MI, AMDGPU::OpName::omod) ||
16560b57cec5SDimitry Andric         TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
1657bdd1243dSDimitry Andric       return std::pair(nullptr, SIOutMods::NONE);
16580b57cec5SDimitry Andric 
1659bdd1243dSDimitry Andric     return std::pair(RegOp, OMod);
16600b57cec5SDimitry Andric   }
1661fe6060f1SDimitry Andric   case AMDGPU::V_ADD_F64_e64:
16627a6dacacSDimitry Andric   case AMDGPU::V_ADD_F64_pseudo_e64:
16630b57cec5SDimitry Andric   case AMDGPU::V_ADD_F32_e64:
1664bdd1243dSDimitry Andric   case AMDGPU::V_ADD_F16_e64:
16655f757f3fSDimitry Andric   case AMDGPU::V_ADD_F16_t16_e64:
16665f757f3fSDimitry Andric   case AMDGPU::V_ADD_F16_fake16_e64: {
16670b57cec5SDimitry Andric     // If output denormals are enabled, omod is ignored.
1668bdd1243dSDimitry Andric     if ((Op == AMDGPU::V_ADD_F32_e64 &&
1669bdd1243dSDimitry Andric          MFI->getMode().FP32Denormals.Output != DenormalMode::PreserveSign) ||
16707a6dacacSDimitry Andric         ((Op == AMDGPU::V_ADD_F64_e64 || Op == AMDGPU::V_ADD_F64_pseudo_e64 ||
16717a6dacacSDimitry Andric           Op == AMDGPU::V_ADD_F16_e64 || Op == AMDGPU::V_ADD_F16_t16_e64 ||
16725f757f3fSDimitry Andric           Op == AMDGPU::V_ADD_F16_fake16_e64) &&
1673bdd1243dSDimitry Andric          MFI->getMode().FP64FP16Denormals.Output != DenormalMode::PreserveSign))
1674bdd1243dSDimitry Andric       return std::pair(nullptr, SIOutMods::NONE);
16750b57cec5SDimitry Andric 
16760b57cec5SDimitry Andric     // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x
16770b57cec5SDimitry Andric     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
16780b57cec5SDimitry Andric     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
16790b57cec5SDimitry Andric 
16800b57cec5SDimitry Andric     if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() &&
16810b57cec5SDimitry Andric         Src0->getSubReg() == Src1->getSubReg() &&
16820b57cec5SDimitry Andric         !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) &&
16830b57cec5SDimitry Andric         !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) &&
16840b57cec5SDimitry Andric         !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
16850b57cec5SDimitry Andric         !TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1686bdd1243dSDimitry Andric       return std::pair(Src0, SIOutMods::MUL2);
16870b57cec5SDimitry Andric 
1688bdd1243dSDimitry Andric     return std::pair(nullptr, SIOutMods::NONE);
16890b57cec5SDimitry Andric   }
16900b57cec5SDimitry Andric   default:
1691bdd1243dSDimitry Andric     return std::pair(nullptr, SIOutMods::NONE);
16920b57cec5SDimitry Andric   }
16930b57cec5SDimitry Andric }
16940b57cec5SDimitry Andric 
16950b57cec5SDimitry Andric // FIXME: Does this need to check IEEE bit on function?
tryFoldOMod(MachineInstr & MI)16960b57cec5SDimitry Andric bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) {
16970b57cec5SDimitry Andric   const MachineOperand *RegOp;
16980b57cec5SDimitry Andric   int OMod;
16990b57cec5SDimitry Andric   std::tie(RegOp, OMod) = isOMod(MI);
17000b57cec5SDimitry Andric   if (OMod == SIOutMods::NONE || !RegOp->isReg() ||
17010b57cec5SDimitry Andric       RegOp->getSubReg() != AMDGPU::NoSubRegister ||
1702fe6060f1SDimitry Andric       !MRI->hasOneNonDBGUser(RegOp->getReg()))
17030b57cec5SDimitry Andric     return false;
17040b57cec5SDimitry Andric 
17050b57cec5SDimitry Andric   MachineInstr *Def = MRI->getVRegDef(RegOp->getReg());
17060b57cec5SDimitry Andric   MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod);
17070b57cec5SDimitry Andric   if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE)
17080b57cec5SDimitry Andric     return false;
17090b57cec5SDimitry Andric 
17100b57cec5SDimitry Andric   // Clamp is applied after omod. If the source already has clamp set, don't
17110b57cec5SDimitry Andric   // fold it.
17120b57cec5SDimitry Andric   if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp))
17130b57cec5SDimitry Andric     return false;
17140b57cec5SDimitry Andric 
1715fe6060f1SDimitry Andric   LLVM_DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def);
17160b57cec5SDimitry Andric 
17170b57cec5SDimitry Andric   DefOMod->setImm(OMod);
17180b57cec5SDimitry Andric   MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
17190b57cec5SDimitry Andric   MI.eraseFromParent();
1720349cc55cSDimitry Andric 
1721349cc55cSDimitry Andric   // Use of output modifiers forces VOP3 encoding for a VOP2 mac/fmac
1722349cc55cSDimitry Andric   // instruction, so we might as well convert it to the more flexible VOP3-only
1723349cc55cSDimitry Andric   // mad/fma form.
1724349cc55cSDimitry Andric   if (TII->convertToThreeAddress(*Def, nullptr, nullptr))
1725349cc55cSDimitry Andric     Def->eraseFromParent();
1726349cc55cSDimitry Andric 
17270b57cec5SDimitry Andric   return true;
17280b57cec5SDimitry Andric }
17290b57cec5SDimitry Andric 
1730fe6060f1SDimitry Andric // Try to fold a reg_sequence with vgpr output and agpr inputs into an
1731fe6060f1SDimitry Andric // instruction which can take an agpr. So far that means a store.
tryFoldRegSequence(MachineInstr & MI)1732fe6060f1SDimitry Andric bool SIFoldOperands::tryFoldRegSequence(MachineInstr &MI) {
1733fe6060f1SDimitry Andric   assert(MI.isRegSequence());
1734fe6060f1SDimitry Andric   auto Reg = MI.getOperand(0).getReg();
1735fe6060f1SDimitry Andric 
1736fe6060f1SDimitry Andric   if (!ST->hasGFX90AInsts() || !TRI->isVGPR(*MRI, Reg) ||
1737fe6060f1SDimitry Andric       !MRI->hasOneNonDBGUse(Reg))
1738fe6060f1SDimitry Andric     return false;
1739fe6060f1SDimitry Andric 
1740fe6060f1SDimitry Andric   SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs;
1741bdd1243dSDimitry Andric   if (!getRegSeqInit(Defs, Reg, MCOI::OPERAND_REGISTER))
1742fe6060f1SDimitry Andric     return false;
1743fe6060f1SDimitry Andric 
1744fe6060f1SDimitry Andric   for (auto &Def : Defs) {
1745fe6060f1SDimitry Andric     const auto *Op = Def.first;
1746fe6060f1SDimitry Andric     if (!Op->isReg())
1747fe6060f1SDimitry Andric       return false;
1748fe6060f1SDimitry Andric     if (TRI->isAGPR(*MRI, Op->getReg()))
1749fe6060f1SDimitry Andric       continue;
1750fe6060f1SDimitry Andric     // Maybe this is a COPY from AREG
1751fe6060f1SDimitry Andric     const MachineInstr *SubDef = MRI->getVRegDef(Op->getReg());
1752fe6060f1SDimitry Andric     if (!SubDef || !SubDef->isCopy() || SubDef->getOperand(1).getSubReg())
1753fe6060f1SDimitry Andric       return false;
1754fe6060f1SDimitry Andric     if (!TRI->isAGPR(*MRI, SubDef->getOperand(1).getReg()))
1755fe6060f1SDimitry Andric       return false;
1756fe6060f1SDimitry Andric   }
1757fe6060f1SDimitry Andric 
1758fe6060f1SDimitry Andric   MachineOperand *Op = &*MRI->use_nodbg_begin(Reg);
1759fe6060f1SDimitry Andric   MachineInstr *UseMI = Op->getParent();
1760fe6060f1SDimitry Andric   while (UseMI->isCopy() && !Op->getSubReg()) {
1761fe6060f1SDimitry Andric     Reg = UseMI->getOperand(0).getReg();
1762fe6060f1SDimitry Andric     if (!TRI->isVGPR(*MRI, Reg) || !MRI->hasOneNonDBGUse(Reg))
1763fe6060f1SDimitry Andric       return false;
1764fe6060f1SDimitry Andric     Op = &*MRI->use_nodbg_begin(Reg);
1765fe6060f1SDimitry Andric     UseMI = Op->getParent();
1766fe6060f1SDimitry Andric   }
1767fe6060f1SDimitry Andric 
1768fe6060f1SDimitry Andric   if (Op->getSubReg())
1769fe6060f1SDimitry Andric     return false;
1770fe6060f1SDimitry Andric 
1771fe6060f1SDimitry Andric   unsigned OpIdx = Op - &UseMI->getOperand(0);
1772fe6060f1SDimitry Andric   const MCInstrDesc &InstDesc = UseMI->getDesc();
177381ad6265SDimitry Andric   const TargetRegisterClass *OpRC =
177481ad6265SDimitry Andric       TII->getRegClass(InstDesc, OpIdx, TRI, *MI.getMF());
177581ad6265SDimitry Andric   if (!OpRC || !TRI->isVectorSuperClass(OpRC))
1776fe6060f1SDimitry Andric     return false;
1777fe6060f1SDimitry Andric 
1778fe6060f1SDimitry Andric   const auto *NewDstRC = TRI->getEquivalentAGPRClass(MRI->getRegClass(Reg));
1779fe6060f1SDimitry Andric   auto Dst = MRI->createVirtualRegister(NewDstRC);
1780fe6060f1SDimitry Andric   auto RS = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1781fe6060f1SDimitry Andric                     TII->get(AMDGPU::REG_SEQUENCE), Dst);
1782fe6060f1SDimitry Andric 
1783fe6060f1SDimitry Andric   for (unsigned I = 0; I < Defs.size(); ++I) {
1784fe6060f1SDimitry Andric     MachineOperand *Def = Defs[I].first;
1785fe6060f1SDimitry Andric     Def->setIsKill(false);
1786fe6060f1SDimitry Andric     if (TRI->isAGPR(*MRI, Def->getReg())) {
1787fe6060f1SDimitry Andric       RS.add(*Def);
1788fe6060f1SDimitry Andric     } else { // This is a copy
1789fe6060f1SDimitry Andric       MachineInstr *SubDef = MRI->getVRegDef(Def->getReg());
1790fe6060f1SDimitry Andric       SubDef->getOperand(1).setIsKill(false);
1791fe6060f1SDimitry Andric       RS.addReg(SubDef->getOperand(1).getReg(), 0, Def->getSubReg());
1792fe6060f1SDimitry Andric     }
1793fe6060f1SDimitry Andric     RS.addImm(Defs[I].second);
1794fe6060f1SDimitry Andric   }
1795fe6060f1SDimitry Andric 
1796fe6060f1SDimitry Andric   Op->setReg(Dst);
1797fe6060f1SDimitry Andric   if (!TII->isOperandLegal(*UseMI, OpIdx, Op)) {
1798fe6060f1SDimitry Andric     Op->setReg(Reg);
1799fe6060f1SDimitry Andric     RS->eraseFromParent();
1800fe6060f1SDimitry Andric     return false;
1801fe6060f1SDimitry Andric   }
1802fe6060f1SDimitry Andric 
1803fe6060f1SDimitry Andric   LLVM_DEBUG(dbgs() << "Folded " << *RS << " into " << *UseMI);
1804fe6060f1SDimitry Andric 
1805fe6060f1SDimitry Andric   // Erase the REG_SEQUENCE eagerly, unless we followed a chain of COPY users,
1806fe6060f1SDimitry Andric   // in which case we can erase them all later in runOnMachineFunction.
1807fe6060f1SDimitry Andric   if (MRI->use_nodbg_empty(MI.getOperand(0).getReg()))
18080eae32dcSDimitry Andric     MI.eraseFromParent();
1809fe6060f1SDimitry Andric   return true;
1810fe6060f1SDimitry Andric }
1811fe6060f1SDimitry Andric 
181206c3fb27SDimitry Andric /// Checks whether \p Copy is a AGPR -> VGPR copy. Returns `true` on success and
181306c3fb27SDimitry Andric /// stores the AGPR register in \p OutReg and the subreg in \p OutSubReg
isAGPRCopy(const SIRegisterInfo & TRI,const MachineRegisterInfo & MRI,const MachineInstr & Copy,Register & OutReg,unsigned & OutSubReg)181406c3fb27SDimitry Andric static bool isAGPRCopy(const SIRegisterInfo &TRI,
181506c3fb27SDimitry Andric                        const MachineRegisterInfo &MRI, const MachineInstr &Copy,
181606c3fb27SDimitry Andric                        Register &OutReg, unsigned &OutSubReg) {
181706c3fb27SDimitry Andric   assert(Copy.isCopy());
181806c3fb27SDimitry Andric 
181906c3fb27SDimitry Andric   const MachineOperand &CopySrc = Copy.getOperand(1);
182006c3fb27SDimitry Andric   Register CopySrcReg = CopySrc.getReg();
182106c3fb27SDimitry Andric   if (!CopySrcReg.isVirtual())
182206c3fb27SDimitry Andric     return false;
182306c3fb27SDimitry Andric 
182406c3fb27SDimitry Andric   // Common case: copy from AGPR directly, e.g.
182506c3fb27SDimitry Andric   //  %1:vgpr_32 = COPY %0:agpr_32
182606c3fb27SDimitry Andric   if (TRI.isAGPR(MRI, CopySrcReg)) {
182706c3fb27SDimitry Andric     OutReg = CopySrcReg;
182806c3fb27SDimitry Andric     OutSubReg = CopySrc.getSubReg();
182906c3fb27SDimitry Andric     return true;
183006c3fb27SDimitry Andric   }
183106c3fb27SDimitry Andric 
183206c3fb27SDimitry Andric   // Sometimes it can also involve two copies, e.g.
183306c3fb27SDimitry Andric   //  %1:vgpr_256 = COPY %0:agpr_256
183406c3fb27SDimitry Andric   //  %2:vgpr_32 = COPY %1:vgpr_256.sub0
183506c3fb27SDimitry Andric   const MachineInstr *CopySrcDef = MRI.getVRegDef(CopySrcReg);
183606c3fb27SDimitry Andric   if (!CopySrcDef || !CopySrcDef->isCopy())
183706c3fb27SDimitry Andric     return false;
183806c3fb27SDimitry Andric 
183906c3fb27SDimitry Andric   const MachineOperand &OtherCopySrc = CopySrcDef->getOperand(1);
184006c3fb27SDimitry Andric   Register OtherCopySrcReg = OtherCopySrc.getReg();
184106c3fb27SDimitry Andric   if (!OtherCopySrcReg.isVirtual() ||
184206c3fb27SDimitry Andric       CopySrcDef->getOperand(0).getSubReg() != AMDGPU::NoSubRegister ||
184306c3fb27SDimitry Andric       OtherCopySrc.getSubReg() != AMDGPU::NoSubRegister ||
184406c3fb27SDimitry Andric       !TRI.isAGPR(MRI, OtherCopySrcReg))
184506c3fb27SDimitry Andric     return false;
184606c3fb27SDimitry Andric 
184706c3fb27SDimitry Andric   OutReg = OtherCopySrcReg;
184806c3fb27SDimitry Andric   OutSubReg = CopySrc.getSubReg();
184906c3fb27SDimitry Andric   return true;
185006c3fb27SDimitry Andric }
185106c3fb27SDimitry Andric 
185206c3fb27SDimitry Andric // Try to hoist an AGPR to VGPR copy across a PHI.
1853fe6060f1SDimitry Andric // This should allow folding of an AGPR into a consumer which may support it.
1854fe6060f1SDimitry Andric //
185506c3fb27SDimitry Andric // Example 1: LCSSA PHI
185606c3fb27SDimitry Andric //      loop:
185706c3fb27SDimitry Andric //        %1:vreg = COPY %0:areg
185806c3fb27SDimitry Andric //      exit:
185906c3fb27SDimitry Andric //        %2:vreg = PHI %1:vreg, %loop
186006c3fb27SDimitry Andric //  =>
186106c3fb27SDimitry Andric //      loop:
186206c3fb27SDimitry Andric //      exit:
186306c3fb27SDimitry Andric //        %1:areg = PHI %0:areg, %loop
186406c3fb27SDimitry Andric //        %2:vreg = COPY %1:areg
186506c3fb27SDimitry Andric //
186606c3fb27SDimitry Andric // Example 2: PHI with multiple incoming values:
186706c3fb27SDimitry Andric //      entry:
186806c3fb27SDimitry Andric //        %1:vreg = GLOBAL_LOAD(..)
186906c3fb27SDimitry Andric //      loop:
187006c3fb27SDimitry Andric //        %2:vreg = PHI %1:vreg, %entry, %5:vreg, %loop
187106c3fb27SDimitry Andric //        %3:areg = COPY %2:vreg
187206c3fb27SDimitry Andric //        %4:areg = (instr using %3:areg)
187306c3fb27SDimitry Andric //        %5:vreg = COPY %4:areg
187406c3fb27SDimitry Andric //  =>
187506c3fb27SDimitry Andric //      entry:
187606c3fb27SDimitry Andric //        %1:vreg = GLOBAL_LOAD(..)
187706c3fb27SDimitry Andric //        %2:areg = COPY %1:vreg
187806c3fb27SDimitry Andric //      loop:
187906c3fb27SDimitry Andric //        %3:areg = PHI %2:areg, %entry, %X:areg,
188006c3fb27SDimitry Andric //        %4:areg = (instr using %3:areg)
tryFoldPhiAGPR(MachineInstr & PHI)188106c3fb27SDimitry Andric bool SIFoldOperands::tryFoldPhiAGPR(MachineInstr &PHI) {
1882fe6060f1SDimitry Andric   assert(PHI.isPHI());
1883fe6060f1SDimitry Andric 
1884fe6060f1SDimitry Andric   Register PhiOut = PHI.getOperand(0).getReg();
188506c3fb27SDimitry Andric   if (!TRI->isVGPR(*MRI, PhiOut))
1886fe6060f1SDimitry Andric     return false;
1887fe6060f1SDimitry Andric 
188806c3fb27SDimitry Andric   // Iterate once over all incoming values of the PHI to check if this PHI is
188906c3fb27SDimitry Andric   // eligible, and determine the exact AGPR RC we'll target.
189006c3fb27SDimitry Andric   const TargetRegisterClass *ARC = nullptr;
189106c3fb27SDimitry Andric   for (unsigned K = 1; K < PHI.getNumExplicitOperands(); K += 2) {
189206c3fb27SDimitry Andric     MachineOperand &MO = PHI.getOperand(K);
189306c3fb27SDimitry Andric     MachineInstr *Copy = MRI->getVRegDef(MO.getReg());
1894fe6060f1SDimitry Andric     if (!Copy || !Copy->isCopy())
189506c3fb27SDimitry Andric       continue;
189606c3fb27SDimitry Andric 
189706c3fb27SDimitry Andric     Register AGPRSrc;
189806c3fb27SDimitry Andric     unsigned AGPRRegMask = AMDGPU::NoSubRegister;
189906c3fb27SDimitry Andric     if (!isAGPRCopy(*TRI, *MRI, *Copy, AGPRSrc, AGPRRegMask))
190006c3fb27SDimitry Andric       continue;
190106c3fb27SDimitry Andric 
190206c3fb27SDimitry Andric     const TargetRegisterClass *CopyInRC = MRI->getRegClass(AGPRSrc);
190306c3fb27SDimitry Andric     if (const auto *SubRC = TRI->getSubRegisterClass(CopyInRC, AGPRRegMask))
190406c3fb27SDimitry Andric       CopyInRC = SubRC;
190506c3fb27SDimitry Andric 
190606c3fb27SDimitry Andric     if (ARC && !ARC->hasSubClassEq(CopyInRC))
190706c3fb27SDimitry Andric       return false;
190806c3fb27SDimitry Andric     ARC = CopyInRC;
190906c3fb27SDimitry Andric   }
191006c3fb27SDimitry Andric 
191106c3fb27SDimitry Andric   if (!ARC)
1912fe6060f1SDimitry Andric     return false;
1913fe6060f1SDimitry Andric 
191406c3fb27SDimitry Andric   bool IsAGPR32 = (ARC == &AMDGPU::AGPR_32RegClass);
1915fe6060f1SDimitry Andric 
191606c3fb27SDimitry Andric   // Rewrite the PHI's incoming values to ARC.
191706c3fb27SDimitry Andric   LLVM_DEBUG(dbgs() << "Folding AGPR copies into: " << PHI);
191806c3fb27SDimitry Andric   for (unsigned K = 1; K < PHI.getNumExplicitOperands(); K += 2) {
191906c3fb27SDimitry Andric     MachineOperand &MO = PHI.getOperand(K);
192006c3fb27SDimitry Andric     Register Reg = MO.getReg();
192106c3fb27SDimitry Andric 
192206c3fb27SDimitry Andric     MachineBasicBlock::iterator InsertPt;
192306c3fb27SDimitry Andric     MachineBasicBlock *InsertMBB = nullptr;
192406c3fb27SDimitry Andric 
192506c3fb27SDimitry Andric     // Look at the def of Reg, ignoring all copies.
192606c3fb27SDimitry Andric     unsigned CopyOpc = AMDGPU::COPY;
192706c3fb27SDimitry Andric     if (MachineInstr *Def = MRI->getVRegDef(Reg)) {
192806c3fb27SDimitry Andric 
192906c3fb27SDimitry Andric       // Look at pre-existing COPY instructions from ARC: Steal the operand. If
193006c3fb27SDimitry Andric       // the copy was single-use, it will be removed by DCE later.
193106c3fb27SDimitry Andric       if (Def->isCopy()) {
193206c3fb27SDimitry Andric         Register AGPRSrc;
193306c3fb27SDimitry Andric         unsigned AGPRSubReg = AMDGPU::NoSubRegister;
193406c3fb27SDimitry Andric         if (isAGPRCopy(*TRI, *MRI, *Def, AGPRSrc, AGPRSubReg)) {
193506c3fb27SDimitry Andric           MO.setReg(AGPRSrc);
193606c3fb27SDimitry Andric           MO.setSubReg(AGPRSubReg);
193706c3fb27SDimitry Andric           continue;
193806c3fb27SDimitry Andric         }
193906c3fb27SDimitry Andric 
194006c3fb27SDimitry Andric         // If this is a multi-use SGPR -> VGPR copy, use V_ACCVGPR_WRITE on
194106c3fb27SDimitry Andric         // GFX908 directly instead of a COPY. Otherwise, SIFoldOperand may try
194206c3fb27SDimitry Andric         // to fold the sgpr -> vgpr -> agpr copy into a sgpr -> agpr copy which
194306c3fb27SDimitry Andric         // is unlikely to be profitable.
194406c3fb27SDimitry Andric         //
194506c3fb27SDimitry Andric         // Note that V_ACCVGPR_WRITE is only used for AGPR_32.
194606c3fb27SDimitry Andric         MachineOperand &CopyIn = Def->getOperand(1);
194706c3fb27SDimitry Andric         if (IsAGPR32 && !ST->hasGFX90AInsts() && !MRI->hasOneNonDBGUse(Reg) &&
194806c3fb27SDimitry Andric             TRI->isSGPRReg(*MRI, CopyIn.getReg()))
194906c3fb27SDimitry Andric           CopyOpc = AMDGPU::V_ACCVGPR_WRITE_B32_e64;
195006c3fb27SDimitry Andric       }
195106c3fb27SDimitry Andric 
195206c3fb27SDimitry Andric       InsertMBB = Def->getParent();
195306c3fb27SDimitry Andric       InsertPt = InsertMBB->SkipPHIsLabelsAndDebug(++Def->getIterator());
195406c3fb27SDimitry Andric     } else {
195506c3fb27SDimitry Andric       InsertMBB = PHI.getOperand(MO.getOperandNo() + 1).getMBB();
195606c3fb27SDimitry Andric       InsertPt = InsertMBB->getFirstTerminator();
195706c3fb27SDimitry Andric     }
195806c3fb27SDimitry Andric 
1959fe6060f1SDimitry Andric     Register NewReg = MRI->createVirtualRegister(ARC);
196006c3fb27SDimitry Andric     MachineInstr *MI = BuildMI(*InsertMBB, InsertPt, PHI.getDebugLoc(),
196106c3fb27SDimitry Andric                                TII->get(CopyOpc), NewReg)
196206c3fb27SDimitry Andric                            .addReg(Reg);
196306c3fb27SDimitry Andric     MO.setReg(NewReg);
196406c3fb27SDimitry Andric 
196506c3fb27SDimitry Andric     (void)MI;
196606c3fb27SDimitry Andric     LLVM_DEBUG(dbgs() << "  Created COPY: " << *MI);
196706c3fb27SDimitry Andric   }
196806c3fb27SDimitry Andric 
196906c3fb27SDimitry Andric   // Replace the PHI's result with a new register.
197006c3fb27SDimitry Andric   Register NewReg = MRI->createVirtualRegister(ARC);
1971fe6060f1SDimitry Andric   PHI.getOperand(0).setReg(NewReg);
1972fe6060f1SDimitry Andric 
197306c3fb27SDimitry Andric   // COPY that new register back to the original PhiOut register. This COPY will
197406c3fb27SDimitry Andric   // usually be folded out later.
1975fe6060f1SDimitry Andric   MachineBasicBlock *MBB = PHI.getParent();
197606c3fb27SDimitry Andric   BuildMI(*MBB, MBB->getFirstNonPHI(), PHI.getDebugLoc(),
1977fe6060f1SDimitry Andric           TII->get(AMDGPU::COPY), PhiOut)
197806c3fb27SDimitry Andric       .addReg(NewReg);
1979fe6060f1SDimitry Andric 
198006c3fb27SDimitry Andric   LLVM_DEBUG(dbgs() << "  Done: Folded " << PHI);
1981fe6060f1SDimitry Andric   return true;
1982fe6060f1SDimitry Andric }
1983fe6060f1SDimitry Andric 
1984fe6060f1SDimitry Andric // Attempt to convert VGPR load to an AGPR load.
tryFoldLoad(MachineInstr & MI)1985fe6060f1SDimitry Andric bool SIFoldOperands::tryFoldLoad(MachineInstr &MI) {
1986fe6060f1SDimitry Andric   assert(MI.mayLoad());
1987fe6060f1SDimitry Andric   if (!ST->hasGFX90AInsts() || MI.getNumExplicitDefs() != 1)
1988fe6060f1SDimitry Andric     return false;
1989fe6060f1SDimitry Andric 
1990fe6060f1SDimitry Andric   MachineOperand &Def = MI.getOperand(0);
1991fe6060f1SDimitry Andric   if (!Def.isDef())
1992fe6060f1SDimitry Andric     return false;
1993fe6060f1SDimitry Andric 
1994fe6060f1SDimitry Andric   Register DefReg = Def.getReg();
1995fe6060f1SDimitry Andric 
1996fe6060f1SDimitry Andric   if (DefReg.isPhysical() || !TRI->isVGPR(*MRI, DefReg))
1997fe6060f1SDimitry Andric     return false;
1998fe6060f1SDimitry Andric 
1999fe6060f1SDimitry Andric   SmallVector<const MachineInstr*, 8> Users;
2000fe6060f1SDimitry Andric   SmallVector<Register, 8> MoveRegs;
2001bdd1243dSDimitry Andric   for (const MachineInstr &I : MRI->use_nodbg_instructions(DefReg))
2002fe6060f1SDimitry Andric     Users.push_back(&I);
2003bdd1243dSDimitry Andric 
2004fe6060f1SDimitry Andric   if (Users.empty())
2005fe6060f1SDimitry Andric     return false;
2006fe6060f1SDimitry Andric 
2007fe6060f1SDimitry Andric   // Check that all uses a copy to an agpr or a reg_sequence producing an agpr.
2008fe6060f1SDimitry Andric   while (!Users.empty()) {
2009fe6060f1SDimitry Andric     const MachineInstr *I = Users.pop_back_val();
2010fe6060f1SDimitry Andric     if (!I->isCopy() && !I->isRegSequence())
2011fe6060f1SDimitry Andric       return false;
2012fe6060f1SDimitry Andric     Register DstReg = I->getOperand(0).getReg();
2013bdd1243dSDimitry Andric     // Physical registers may have more than one instruction definitions
2014bdd1243dSDimitry Andric     if (DstReg.isPhysical())
2015bdd1243dSDimitry Andric       return false;
2016fe6060f1SDimitry Andric     if (TRI->isAGPR(*MRI, DstReg))
2017fe6060f1SDimitry Andric       continue;
2018fe6060f1SDimitry Andric     MoveRegs.push_back(DstReg);
2019bdd1243dSDimitry Andric     for (const MachineInstr &U : MRI->use_nodbg_instructions(DstReg))
2020fe6060f1SDimitry Andric       Users.push_back(&U);
2021fe6060f1SDimitry Andric   }
2022fe6060f1SDimitry Andric 
2023fe6060f1SDimitry Andric   const TargetRegisterClass *RC = MRI->getRegClass(DefReg);
2024fe6060f1SDimitry Andric   MRI->setRegClass(DefReg, TRI->getEquivalentAGPRClass(RC));
2025fe6060f1SDimitry Andric   if (!TII->isOperandLegal(MI, 0, &Def)) {
2026fe6060f1SDimitry Andric     MRI->setRegClass(DefReg, RC);
2027fe6060f1SDimitry Andric     return false;
2028fe6060f1SDimitry Andric   }
2029fe6060f1SDimitry Andric 
2030fe6060f1SDimitry Andric   while (!MoveRegs.empty()) {
2031fe6060f1SDimitry Andric     Register Reg = MoveRegs.pop_back_val();
2032fe6060f1SDimitry Andric     MRI->setRegClass(Reg, TRI->getEquivalentAGPRClass(MRI->getRegClass(Reg)));
2033fe6060f1SDimitry Andric   }
2034fe6060f1SDimitry Andric 
2035fe6060f1SDimitry Andric   LLVM_DEBUG(dbgs() << "Folded " << MI);
2036fe6060f1SDimitry Andric 
2037fe6060f1SDimitry Andric   return true;
2038fe6060f1SDimitry Andric }
2039fe6060f1SDimitry Andric 
204006c3fb27SDimitry Andric // tryFoldPhiAGPR will aggressively try to create AGPR PHIs.
204106c3fb27SDimitry Andric // For GFX90A and later, this is pretty much always a good thing, but for GFX908
204206c3fb27SDimitry Andric // there's cases where it can create a lot more AGPR-AGPR copies, which are
204306c3fb27SDimitry Andric // expensive on this architecture due to the lack of V_ACCVGPR_MOV.
204406c3fb27SDimitry Andric //
204506c3fb27SDimitry Andric // This function looks at all AGPR PHIs in a basic block and collects their
204606c3fb27SDimitry Andric // operands. Then, it checks for register that are used more than once across
204706c3fb27SDimitry Andric // all PHIs and caches them in a VGPR. This prevents ExpandPostRAPseudo from
204806c3fb27SDimitry Andric // having to create one VGPR temporary per use, which can get very messy if
204906c3fb27SDimitry Andric // these PHIs come from a broken-up large PHI (e.g. 32 AGPR phis, one per vector
205006c3fb27SDimitry Andric // element).
205106c3fb27SDimitry Andric //
205206c3fb27SDimitry Andric // Example
205306c3fb27SDimitry Andric //      a:
205406c3fb27SDimitry Andric //        %in:agpr_256 = COPY %foo:vgpr_256
205506c3fb27SDimitry Andric //      c:
205606c3fb27SDimitry Andric //        %x:agpr_32 = ..
205706c3fb27SDimitry Andric //      b:
205806c3fb27SDimitry Andric //        %0:areg = PHI %in.sub0:agpr_32, %a, %x, %c
205906c3fb27SDimitry Andric //        %1:areg = PHI %in.sub0:agpr_32, %a, %y, %c
206006c3fb27SDimitry Andric //        %2:areg = PHI %in.sub0:agpr_32, %a, %z, %c
206106c3fb27SDimitry Andric //  =>
206206c3fb27SDimitry Andric //      a:
206306c3fb27SDimitry Andric //        %in:agpr_256 = COPY %foo:vgpr_256
206406c3fb27SDimitry Andric //        %tmp:vgpr_32 = V_ACCVGPR_READ_B32_e64 %in.sub0:agpr_32
206506c3fb27SDimitry Andric //        %tmp_agpr:agpr_32 = COPY %tmp
206606c3fb27SDimitry Andric //      c:
206706c3fb27SDimitry Andric //        %x:agpr_32 = ..
206806c3fb27SDimitry Andric //      b:
206906c3fb27SDimitry Andric //        %0:areg = PHI %tmp_agpr, %a, %x, %c
207006c3fb27SDimitry Andric //        %1:areg = PHI %tmp_agpr, %a, %y, %c
207106c3fb27SDimitry Andric //        %2:areg = PHI %tmp_agpr, %a, %z, %c
tryOptimizeAGPRPhis(MachineBasicBlock & MBB)207206c3fb27SDimitry Andric bool SIFoldOperands::tryOptimizeAGPRPhis(MachineBasicBlock &MBB) {
207306c3fb27SDimitry Andric   // This is only really needed on GFX908 where AGPR-AGPR copies are
207406c3fb27SDimitry Andric   // unreasonably difficult.
207506c3fb27SDimitry Andric   if (ST->hasGFX90AInsts())
207606c3fb27SDimitry Andric     return false;
207706c3fb27SDimitry Andric 
207806c3fb27SDimitry Andric   // Look at all AGPR Phis and collect the register + subregister used.
207906c3fb27SDimitry Andric   DenseMap<std::pair<Register, unsigned>, std::vector<MachineOperand *>>
208006c3fb27SDimitry Andric       RegToMO;
208106c3fb27SDimitry Andric 
208206c3fb27SDimitry Andric   for (auto &MI : MBB) {
208306c3fb27SDimitry Andric     if (!MI.isPHI())
208406c3fb27SDimitry Andric       break;
208506c3fb27SDimitry Andric 
208606c3fb27SDimitry Andric     if (!TRI->isAGPR(*MRI, MI.getOperand(0).getReg()))
208706c3fb27SDimitry Andric       continue;
208806c3fb27SDimitry Andric 
208906c3fb27SDimitry Andric     for (unsigned K = 1; K < MI.getNumOperands(); K += 2) {
209006c3fb27SDimitry Andric       MachineOperand &PhiMO = MI.getOperand(K);
209106c3fb27SDimitry Andric       RegToMO[{PhiMO.getReg(), PhiMO.getSubReg()}].push_back(&PhiMO);
209206c3fb27SDimitry Andric     }
209306c3fb27SDimitry Andric   }
209406c3fb27SDimitry Andric 
209506c3fb27SDimitry Andric   // For all (Reg, SubReg) pair that are used more than once, cache the value in
209606c3fb27SDimitry Andric   // a VGPR.
209706c3fb27SDimitry Andric   bool Changed = false;
209806c3fb27SDimitry Andric   for (const auto &[Entry, MOs] : RegToMO) {
209906c3fb27SDimitry Andric     if (MOs.size() == 1)
210006c3fb27SDimitry Andric       continue;
210106c3fb27SDimitry Andric 
210206c3fb27SDimitry Andric     const auto [Reg, SubReg] = Entry;
210306c3fb27SDimitry Andric     MachineInstr *Def = MRI->getVRegDef(Reg);
210406c3fb27SDimitry Andric     MachineBasicBlock *DefMBB = Def->getParent();
210506c3fb27SDimitry Andric 
210606c3fb27SDimitry Andric     // Create a copy in a VGPR using V_ACCVGPR_READ_B32_e64 so it's not folded
210706c3fb27SDimitry Andric     // out.
210806c3fb27SDimitry Andric     const TargetRegisterClass *ARC = getRegOpRC(*MRI, *TRI, *MOs.front());
210906c3fb27SDimitry Andric     Register TempVGPR =
211006c3fb27SDimitry Andric         MRI->createVirtualRegister(TRI->getEquivalentVGPRClass(ARC));
211106c3fb27SDimitry Andric     MachineInstr *VGPRCopy =
211206c3fb27SDimitry Andric         BuildMI(*DefMBB, ++Def->getIterator(), Def->getDebugLoc(),
211306c3fb27SDimitry Andric                 TII->get(AMDGPU::V_ACCVGPR_READ_B32_e64), TempVGPR)
211406c3fb27SDimitry Andric             .addReg(Reg, /* flags */ 0, SubReg);
211506c3fb27SDimitry Andric 
211606c3fb27SDimitry Andric     // Copy back to an AGPR and use that instead of the AGPR subreg in all MOs.
211706c3fb27SDimitry Andric     Register TempAGPR = MRI->createVirtualRegister(ARC);
211806c3fb27SDimitry Andric     BuildMI(*DefMBB, ++VGPRCopy->getIterator(), Def->getDebugLoc(),
211906c3fb27SDimitry Andric             TII->get(AMDGPU::COPY), TempAGPR)
212006c3fb27SDimitry Andric         .addReg(TempVGPR);
212106c3fb27SDimitry Andric 
212206c3fb27SDimitry Andric     LLVM_DEBUG(dbgs() << "Caching AGPR into VGPR: " << *VGPRCopy);
212306c3fb27SDimitry Andric     for (MachineOperand *MO : MOs) {
212406c3fb27SDimitry Andric       MO->setReg(TempAGPR);
212506c3fb27SDimitry Andric       MO->setSubReg(AMDGPU::NoSubRegister);
212606c3fb27SDimitry Andric       LLVM_DEBUG(dbgs() << "  Changed PHI Operand: " << *MO << "\n");
212706c3fb27SDimitry Andric     }
212806c3fb27SDimitry Andric 
212906c3fb27SDimitry Andric     Changed = true;
213006c3fb27SDimitry Andric   }
213106c3fb27SDimitry Andric 
213206c3fb27SDimitry Andric   return Changed;
213306c3fb27SDimitry Andric }
213406c3fb27SDimitry Andric 
runOnMachineFunction(MachineFunction & MF)21350b57cec5SDimitry Andric bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
21360b57cec5SDimitry Andric   if (skipFunction(MF.getFunction()))
21370b57cec5SDimitry Andric     return false;
21380b57cec5SDimitry Andric 
21390b57cec5SDimitry Andric   MRI = &MF.getRegInfo();
21400b57cec5SDimitry Andric   ST = &MF.getSubtarget<GCNSubtarget>();
21410b57cec5SDimitry Andric   TII = ST->getInstrInfo();
21420b57cec5SDimitry Andric   TRI = &TII->getRegisterInfo();
21430b57cec5SDimitry Andric   MFI = MF.getInfo<SIMachineFunctionInfo>();
21440b57cec5SDimitry Andric 
21450b57cec5SDimitry Andric   // omod is ignored by hardware if IEEE bit is enabled. omod also does not
21460b57cec5SDimitry Andric   // correctly handle signed zeros.
21470b57cec5SDimitry Andric   //
21480b57cec5SDimitry Andric   // FIXME: Also need to check strictfp
21490b57cec5SDimitry Andric   bool IsIEEEMode = MFI->getMode().IEEE;
21500b57cec5SDimitry Andric   bool HasNSZ = MFI->hasNoSignedZerosFPMath();
21510b57cec5SDimitry Andric 
215281ad6265SDimitry Andric   bool Changed = false;
21530b57cec5SDimitry Andric   for (MachineBasicBlock *MBB : depth_first(&MF)) {
21548bcb0991SDimitry Andric     MachineOperand *CurrentKnownM0Val = nullptr;
2155fe6060f1SDimitry Andric     for (auto &MI : make_early_inc_range(*MBB)) {
215681ad6265SDimitry Andric       Changed |= tryFoldCndMask(MI);
21570b57cec5SDimitry Andric 
215881ad6265SDimitry Andric       if (tryFoldZeroHighBits(MI)) {
215981ad6265SDimitry Andric         Changed = true;
2160fe6060f1SDimitry Andric         continue;
216181ad6265SDimitry Andric       }
2162fe6060f1SDimitry Andric 
216381ad6265SDimitry Andric       if (MI.isRegSequence() && tryFoldRegSequence(MI)) {
216481ad6265SDimitry Andric         Changed = true;
2165fe6060f1SDimitry Andric         continue;
216681ad6265SDimitry Andric       }
2167fe6060f1SDimitry Andric 
216806c3fb27SDimitry Andric       if (MI.isPHI() && tryFoldPhiAGPR(MI)) {
216981ad6265SDimitry Andric         Changed = true;
2170fe6060f1SDimitry Andric         continue;
217181ad6265SDimitry Andric       }
2172fe6060f1SDimitry Andric 
217381ad6265SDimitry Andric       if (MI.mayLoad() && tryFoldLoad(MI)) {
217481ad6265SDimitry Andric         Changed = true;
2175fe6060f1SDimitry Andric         continue;
217681ad6265SDimitry Andric       }
21770b57cec5SDimitry Andric 
2178bdd1243dSDimitry Andric       if (TII->isFoldableCopy(MI)) {
2179bdd1243dSDimitry Andric         Changed |= tryFoldFoldableCopy(MI, CurrentKnownM0Val);
2180bdd1243dSDimitry Andric         continue;
2181bdd1243dSDimitry Andric       }
2182bdd1243dSDimitry Andric 
2183480093f4SDimitry Andric       // Saw an unknown clobber of m0, so we no longer know what it is.
2184480093f4SDimitry Andric       if (CurrentKnownM0Val && MI.modifiesRegister(AMDGPU::M0, TRI))
2185480093f4SDimitry Andric         CurrentKnownM0Val = nullptr;
2186480093f4SDimitry Andric 
21870b57cec5SDimitry Andric       // TODO: Omod might be OK if there is NSZ only on the source
21880b57cec5SDimitry Andric       // instruction, and not the omod multiply.
21890b57cec5SDimitry Andric       if (IsIEEEMode || (!HasNSZ && !MI.getFlag(MachineInstr::FmNsz)) ||
21900b57cec5SDimitry Andric           !tryFoldOMod(MI))
219181ad6265SDimitry Andric         Changed |= tryFoldClamp(MI);
21920b57cec5SDimitry Andric     }
219306c3fb27SDimitry Andric 
219406c3fb27SDimitry Andric     Changed |= tryOptimizeAGPRPhis(*MBB);
21950b57cec5SDimitry Andric   }
2196bdd1243dSDimitry Andric 
219781ad6265SDimitry Andric   return Changed;
21980b57cec5SDimitry Andric }
2199