1 //===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file This file implements the utility functions used by the GlobalISel
9 /// pipeline.
10 //===----------------------------------------------------------------------===//
11 
12 #include "llvm/CodeGen/GlobalISel/Utils.h"
13 #include "llvm/ADT/APFloat.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/Optional.h"
16 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
17 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
18 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
19 #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
20 #include "llvm/CodeGen/MachineInstr.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
23 #include "llvm/CodeGen/MachineSizeOpts.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/StackProtector.h"
26 #include "llvm/CodeGen/TargetInstrInfo.h"
27 #include "llvm/CodeGen/TargetLowering.h"
28 #include "llvm/CodeGen/TargetPassConfig.h"
29 #include "llvm/CodeGen/TargetRegisterInfo.h"
30 #include "llvm/IR/Constants.h"
31 #include "llvm/Target/TargetMachine.h"
32 
33 #define DEBUG_TYPE "globalisel-utils"
34 
35 using namespace llvm;
36 using namespace MIPatternMatch;
37 
constrainRegToClass(MachineRegisterInfo & MRI,const TargetInstrInfo & TII,const RegisterBankInfo & RBI,Register Reg,const TargetRegisterClass & RegClass)38 Register llvm::constrainRegToClass(MachineRegisterInfo &MRI,
39                                    const TargetInstrInfo &TII,
40                                    const RegisterBankInfo &RBI, Register Reg,
41                                    const TargetRegisterClass &RegClass) {
42   if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
43     return MRI.createVirtualRegister(&RegClass);
44 
45   return Reg;
46 }
47 
constrainOperandRegClass(const MachineFunction & MF,const TargetRegisterInfo & TRI,MachineRegisterInfo & MRI,const TargetInstrInfo & TII,const RegisterBankInfo & RBI,MachineInstr & InsertPt,const TargetRegisterClass & RegClass,MachineOperand & RegMO)48 Register llvm::constrainOperandRegClass(
49     const MachineFunction &MF, const TargetRegisterInfo &TRI,
50     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
51     const RegisterBankInfo &RBI, MachineInstr &InsertPt,
52     const TargetRegisterClass &RegClass, MachineOperand &RegMO) {
53   Register Reg = RegMO.getReg();
54   // Assume physical registers are properly constrained.
55   assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
56 
57   Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
58   // If we created a new virtual register because the class is not compatible
59   // then create a copy between the new and the old register.
60   if (ConstrainedReg != Reg) {
61     MachineBasicBlock::iterator InsertIt(&InsertPt);
62     MachineBasicBlock &MBB = *InsertPt.getParent();
63     if (RegMO.isUse()) {
64       BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(),
65               TII.get(TargetOpcode::COPY), ConstrainedReg)
66           .addReg(Reg);
67     } else {
68       assert(RegMO.isDef() && "Must be a definition");
69       BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(),
70               TII.get(TargetOpcode::COPY), Reg)
71           .addReg(ConstrainedReg);
72     }
73     if (GISelChangeObserver *Observer = MF.getObserver()) {
74       Observer->changingInstr(*RegMO.getParent());
75     }
76     RegMO.setReg(ConstrainedReg);
77     if (GISelChangeObserver *Observer = MF.getObserver()) {
78       Observer->changedInstr(*RegMO.getParent());
79     }
80   } else {
81     if (GISelChangeObserver *Observer = MF.getObserver()) {
82       if (!RegMO.isDef()) {
83         MachineInstr *RegDef = MRI.getVRegDef(Reg);
84         Observer->changedInstr(*RegDef);
85       }
86       Observer->changingAllUsesOfReg(MRI, Reg);
87       Observer->finishedChangingAllUsesOfReg();
88     }
89   }
90   return ConstrainedReg;
91 }
92 
constrainOperandRegClass(const MachineFunction & MF,const TargetRegisterInfo & TRI,MachineRegisterInfo & MRI,const TargetInstrInfo & TII,const RegisterBankInfo & RBI,MachineInstr & InsertPt,const MCInstrDesc & II,MachineOperand & RegMO,unsigned OpIdx)93 Register llvm::constrainOperandRegClass(
94     const MachineFunction &MF, const TargetRegisterInfo &TRI,
95     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
96     const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
97     MachineOperand &RegMO, unsigned OpIdx) {
98   Register Reg = RegMO.getReg();
99   // Assume physical registers are properly constrained.
100   assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
101 
102   const TargetRegisterClass *RegClass = TII.getRegClass(II, OpIdx, &TRI, MF);
103   // Some of the target independent instructions, like COPY, may not impose any
104   // register class constraints on some of their operands: If it's a use, we can
105   // skip constraining as the instruction defining the register would constrain
106   // it.
107 
108   // We can't constrain unallocatable register classes, because we can't create
109   // virtual registers for these classes, so we need to let targets handled this
110   // case.
111   if (RegClass && !RegClass->isAllocatable())
112     RegClass = TRI.getConstrainedRegClassForOperand(RegMO, MRI);
113 
114   if (!RegClass) {
115     assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
116            "Register class constraint is required unless either the "
117            "instruction is target independent or the operand is a use");
118     // FIXME: Just bailing out like this here could be not enough, unless we
119     // expect the users of this function to do the right thing for PHIs and
120     // COPY:
121     //   v1 = COPY v0
122     //   v2 = COPY v1
123     // v1 here may end up not being constrained at all. Please notice that to
124     // reproduce the issue we likely need a destination pattern of a selection
125     // rule producing such extra copies, not just an input GMIR with them as
126     // every existing target using selectImpl handles copies before calling it
127     // and they never reach this function.
128     return Reg;
129   }
130   return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *RegClass,
131                                   RegMO);
132 }
133 
constrainSelectedInstRegOperands(MachineInstr & I,const TargetInstrInfo & TII,const TargetRegisterInfo & TRI,const RegisterBankInfo & RBI)134 bool llvm::constrainSelectedInstRegOperands(MachineInstr &I,
135                                             const TargetInstrInfo &TII,
136                                             const TargetRegisterInfo &TRI,
137                                             const RegisterBankInfo &RBI) {
138   assert(!isPreISelGenericOpcode(I.getOpcode()) &&
139          "A selected instruction is expected");
140   MachineBasicBlock &MBB = *I.getParent();
141   MachineFunction &MF = *MBB.getParent();
142   MachineRegisterInfo &MRI = MF.getRegInfo();
143 
144   for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
145     MachineOperand &MO = I.getOperand(OpI);
146 
147     // There's nothing to be done on non-register operands.
148     if (!MO.isReg())
149       continue;
150 
151     LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
152     assert(MO.isReg() && "Unsupported non-reg operand");
153 
154     Register Reg = MO.getReg();
155     // Physical registers don't need to be constrained.
156     if (Register::isPhysicalRegister(Reg))
157       continue;
158 
159     // Register operands with a value of 0 (e.g. predicate operands) don't need
160     // to be constrained.
161     if (Reg == 0)
162       continue;
163 
164     // If the operand is a vreg, we should constrain its regclass, and only
165     // insert COPYs if that's impossible.
166     // constrainOperandRegClass does that for us.
167     constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI);
168 
169     // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
170     // done.
171     if (MO.isUse()) {
172       int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
173       if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
174         I.tieOperands(DefIdx, OpI);
175     }
176   }
177   return true;
178 }
179 
canReplaceReg(Register DstReg,Register SrcReg,MachineRegisterInfo & MRI)180 bool llvm::canReplaceReg(Register DstReg, Register SrcReg,
181                          MachineRegisterInfo &MRI) {
182   // Give up if either DstReg or SrcReg  is a physical register.
183   if (DstReg.isPhysical() || SrcReg.isPhysical())
184     return false;
185   // Give up if the types don't match.
186   if (MRI.getType(DstReg) != MRI.getType(SrcReg))
187     return false;
188   // Replace if either DstReg has no constraints or the register
189   // constraints match.
190   return !MRI.getRegClassOrRegBank(DstReg) ||
191          MRI.getRegClassOrRegBank(DstReg) == MRI.getRegClassOrRegBank(SrcReg);
192 }
193 
isTriviallyDead(const MachineInstr & MI,const MachineRegisterInfo & MRI)194 bool llvm::isTriviallyDead(const MachineInstr &MI,
195                            const MachineRegisterInfo &MRI) {
196   // FIXME: This logical is mostly duplicated with
197   // DeadMachineInstructionElim::isDead. Why is LOCAL_ESCAPE not considered in
198   // MachineInstr::isLabel?
199 
200   // Don't delete frame allocation labels.
201   if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE)
202     return false;
203   // LIFETIME markers should be preserved even if they seem dead.
204   if (MI.getOpcode() == TargetOpcode::LIFETIME_START ||
205       MI.getOpcode() == TargetOpcode::LIFETIME_END)
206     return false;
207 
208   // If we can move an instruction, we can remove it.  Otherwise, it has
209   // a side-effect of some sort.
210   bool SawStore = false;
211   if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI())
212     return false;
213 
214   // Instructions without side-effects are dead iff they only define dead vregs.
215   for (auto &MO : MI.operands()) {
216     if (!MO.isReg() || !MO.isDef())
217       continue;
218 
219     Register Reg = MO.getReg();
220     if (Register::isPhysicalRegister(Reg) || !MRI.use_nodbg_empty(Reg))
221       return false;
222   }
223   return true;
224 }
225 
reportGISelDiagnostic(DiagnosticSeverity Severity,MachineFunction & MF,const TargetPassConfig & TPC,MachineOptimizationRemarkEmitter & MORE,MachineOptimizationRemarkMissed & R)226 static void reportGISelDiagnostic(DiagnosticSeverity Severity,
227                                   MachineFunction &MF,
228                                   const TargetPassConfig &TPC,
229                                   MachineOptimizationRemarkEmitter &MORE,
230                                   MachineOptimizationRemarkMissed &R) {
231   bool IsFatal = Severity == DS_Error &&
232                  TPC.isGlobalISelAbortEnabled();
233   // Print the function name explicitly if we don't have a debug location (which
234   // makes the diagnostic less useful) or if we're going to emit a raw error.
235   if (!R.getLocation().isValid() || IsFatal)
236     R << (" (in function: " + MF.getName() + ")").str();
237 
238   if (IsFatal)
239     report_fatal_error(R.getMsg());
240   else
241     MORE.emit(R);
242 }
243 
reportGISelWarning(MachineFunction & MF,const TargetPassConfig & TPC,MachineOptimizationRemarkEmitter & MORE,MachineOptimizationRemarkMissed & R)244 void llvm::reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
245                               MachineOptimizationRemarkEmitter &MORE,
246                               MachineOptimizationRemarkMissed &R) {
247   reportGISelDiagnostic(DS_Warning, MF, TPC, MORE, R);
248 }
249 
reportGISelFailure(MachineFunction & MF,const TargetPassConfig & TPC,MachineOptimizationRemarkEmitter & MORE,MachineOptimizationRemarkMissed & R)250 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
251                               MachineOptimizationRemarkEmitter &MORE,
252                               MachineOptimizationRemarkMissed &R) {
253   MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
254   reportGISelDiagnostic(DS_Error, MF, TPC, MORE, R);
255 }
256 
reportGISelFailure(MachineFunction & MF,const TargetPassConfig & TPC,MachineOptimizationRemarkEmitter & MORE,const char * PassName,StringRef Msg,const MachineInstr & MI)257 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
258                               MachineOptimizationRemarkEmitter &MORE,
259                               const char *PassName, StringRef Msg,
260                               const MachineInstr &MI) {
261   MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
262                                     MI.getDebugLoc(), MI.getParent());
263   R << Msg;
264   // Printing MI is expensive;  only do it if expensive remarks are enabled.
265   if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName))
266     R << ": " << ore::MNV("Inst", MI);
267   reportGISelFailure(MF, TPC, MORE, R);
268 }
269 
getConstantVRegVal(Register VReg,const MachineRegisterInfo & MRI)270 Optional<APInt> llvm::getConstantVRegVal(Register VReg,
271                                          const MachineRegisterInfo &MRI) {
272   Optional<ValueAndVReg> ValAndVReg =
273       getConstantVRegValWithLookThrough(VReg, MRI, /*LookThroughInstrs*/ false);
274   assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
275          "Value found while looking through instrs");
276   if (!ValAndVReg)
277     return None;
278   return ValAndVReg->Value;
279 }
280 
getConstantVRegSExtVal(Register VReg,const MachineRegisterInfo & MRI)281 Optional<int64_t> llvm::getConstantVRegSExtVal(Register VReg,
282                                                const MachineRegisterInfo &MRI) {
283   Optional<APInt> Val = getConstantVRegVal(VReg, MRI);
284   if (Val && Val->getBitWidth() <= 64)
285     return Val->getSExtValue();
286   return None;
287 }
288 
getConstantVRegValWithLookThrough(Register VReg,const MachineRegisterInfo & MRI,bool LookThroughInstrs,bool HandleFConstant,bool LookThroughAnyExt)289 Optional<ValueAndVReg> llvm::getConstantVRegValWithLookThrough(
290     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
291     bool HandleFConstant, bool LookThroughAnyExt) {
292   SmallVector<std::pair<unsigned, unsigned>, 4> SeenOpcodes;
293   MachineInstr *MI;
294   auto IsConstantOpcode = [HandleFConstant](unsigned Opcode) {
295     return Opcode == TargetOpcode::G_CONSTANT ||
296            (HandleFConstant && Opcode == TargetOpcode::G_FCONSTANT);
297   };
298   auto GetImmediateValue = [HandleFConstant,
299                             &MRI](const MachineInstr &MI) -> Optional<APInt> {
300     const MachineOperand &CstVal = MI.getOperand(1);
301     if (!CstVal.isImm() && !CstVal.isCImm() &&
302         (!HandleFConstant || !CstVal.isFPImm()))
303       return None;
304     if (!CstVal.isFPImm()) {
305       unsigned BitWidth =
306           MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
307       APInt Val = CstVal.isImm() ? APInt(BitWidth, CstVal.getImm())
308                                  : CstVal.getCImm()->getValue();
309       assert(Val.getBitWidth() == BitWidth &&
310              "Value bitwidth doesn't match definition type");
311       return Val;
312     }
313     return CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
314   };
315   while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI->getOpcode()) &&
316          LookThroughInstrs) {
317     switch (MI->getOpcode()) {
318     case TargetOpcode::G_ANYEXT:
319       if (!LookThroughAnyExt)
320         return None;
321       LLVM_FALLTHROUGH;
322     case TargetOpcode::G_TRUNC:
323     case TargetOpcode::G_SEXT:
324     case TargetOpcode::G_ZEXT:
325       SeenOpcodes.push_back(std::make_pair(
326           MI->getOpcode(),
327           MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
328       VReg = MI->getOperand(1).getReg();
329       break;
330     case TargetOpcode::COPY:
331       VReg = MI->getOperand(1).getReg();
332       if (Register::isPhysicalRegister(VReg))
333         return None;
334       break;
335     case TargetOpcode::G_INTTOPTR:
336       VReg = MI->getOperand(1).getReg();
337       break;
338     default:
339       return None;
340     }
341   }
342   if (!MI || !IsConstantOpcode(MI->getOpcode()))
343     return None;
344 
345   Optional<APInt> MaybeVal = GetImmediateValue(*MI);
346   if (!MaybeVal)
347     return None;
348   APInt &Val = *MaybeVal;
349   while (!SeenOpcodes.empty()) {
350     std::pair<unsigned, unsigned> OpcodeAndSize = SeenOpcodes.pop_back_val();
351     switch (OpcodeAndSize.first) {
352     case TargetOpcode::G_TRUNC:
353       Val = Val.trunc(OpcodeAndSize.second);
354       break;
355     case TargetOpcode::G_ANYEXT:
356     case TargetOpcode::G_SEXT:
357       Val = Val.sext(OpcodeAndSize.second);
358       break;
359     case TargetOpcode::G_ZEXT:
360       Val = Val.zext(OpcodeAndSize.second);
361       break;
362     }
363   }
364 
365   return ValueAndVReg{Val, VReg};
366 }
367 
getConstantIntVRegVal(Register VReg,const MachineRegisterInfo & MRI)368 const ConstantInt *llvm::getConstantIntVRegVal(Register VReg,
369                                                const MachineRegisterInfo &MRI) {
370   MachineInstr *MI = MRI.getVRegDef(VReg);
371   if (MI->getOpcode() != TargetOpcode::G_CONSTANT)
372     return nullptr;
373   return MI->getOperand(1).getCImm();
374 }
375 
376 const ConstantFP *
getConstantFPVRegVal(Register VReg,const MachineRegisterInfo & MRI)377 llvm::getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI) {
378   MachineInstr *MI = MRI.getVRegDef(VReg);
379   if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
380     return nullptr;
381   return MI->getOperand(1).getFPImm();
382 }
383 
384 Optional<DefinitionAndSourceRegister>
getDefSrcRegIgnoringCopies(Register Reg,const MachineRegisterInfo & MRI)385 llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) {
386   Register DefSrcReg = Reg;
387   auto *DefMI = MRI.getVRegDef(Reg);
388   auto DstTy = MRI.getType(DefMI->getOperand(0).getReg());
389   if (!DstTy.isValid())
390     return None;
391   unsigned Opc = DefMI->getOpcode();
392   while (Opc == TargetOpcode::COPY || isPreISelGenericOptimizationHint(Opc)) {
393     Register SrcReg = DefMI->getOperand(1).getReg();
394     auto SrcTy = MRI.getType(SrcReg);
395     if (!SrcTy.isValid())
396       break;
397     DefMI = MRI.getVRegDef(SrcReg);
398     DefSrcReg = SrcReg;
399     Opc = DefMI->getOpcode();
400   }
401   return DefinitionAndSourceRegister{DefMI, DefSrcReg};
402 }
403 
getDefIgnoringCopies(Register Reg,const MachineRegisterInfo & MRI)404 MachineInstr *llvm::getDefIgnoringCopies(Register Reg,
405                                          const MachineRegisterInfo &MRI) {
406   Optional<DefinitionAndSourceRegister> DefSrcReg =
407       getDefSrcRegIgnoringCopies(Reg, MRI);
408   return DefSrcReg ? DefSrcReg->MI : nullptr;
409 }
410 
getSrcRegIgnoringCopies(Register Reg,const MachineRegisterInfo & MRI)411 Register llvm::getSrcRegIgnoringCopies(Register Reg,
412                                        const MachineRegisterInfo &MRI) {
413   Optional<DefinitionAndSourceRegister> DefSrcReg =
414       getDefSrcRegIgnoringCopies(Reg, MRI);
415   return DefSrcReg ? DefSrcReg->Reg : Register();
416 }
417 
getOpcodeDef(unsigned Opcode,Register Reg,const MachineRegisterInfo & MRI)418 MachineInstr *llvm::getOpcodeDef(unsigned Opcode, Register Reg,
419                                  const MachineRegisterInfo &MRI) {
420   MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI);
421   return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
422 }
423 
getAPFloatFromSize(double Val,unsigned Size)424 APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
425   if (Size == 32)
426     return APFloat(float(Val));
427   if (Size == 64)
428     return APFloat(Val);
429   if (Size != 16)
430     llvm_unreachable("Unsupported FPConstant size");
431   bool Ignored;
432   APFloat APF(Val);
433   APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
434   return APF;
435 }
436 
ConstantFoldBinOp(unsigned Opcode,const Register Op1,const Register Op2,const MachineRegisterInfo & MRI)437 Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const Register Op1,
438                                         const Register Op2,
439                                         const MachineRegisterInfo &MRI) {
440   auto MaybeOp2Cst = getConstantVRegVal(Op2, MRI);
441   if (!MaybeOp2Cst)
442     return None;
443 
444   auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI);
445   if (!MaybeOp1Cst)
446     return None;
447 
448   const APInt &C1 = *MaybeOp1Cst;
449   const APInt &C2 = *MaybeOp2Cst;
450   switch (Opcode) {
451   default:
452     break;
453   case TargetOpcode::G_ADD:
454     return C1 + C2;
455   case TargetOpcode::G_AND:
456     return C1 & C2;
457   case TargetOpcode::G_ASHR:
458     return C1.ashr(C2);
459   case TargetOpcode::G_LSHR:
460     return C1.lshr(C2);
461   case TargetOpcode::G_MUL:
462     return C1 * C2;
463   case TargetOpcode::G_OR:
464     return C1 | C2;
465   case TargetOpcode::G_SHL:
466     return C1 << C2;
467   case TargetOpcode::G_SUB:
468     return C1 - C2;
469   case TargetOpcode::G_XOR:
470     return C1 ^ C2;
471   case TargetOpcode::G_UDIV:
472     if (!C2.getBoolValue())
473       break;
474     return C1.udiv(C2);
475   case TargetOpcode::G_SDIV:
476     if (!C2.getBoolValue())
477       break;
478     return C1.sdiv(C2);
479   case TargetOpcode::G_UREM:
480     if (!C2.getBoolValue())
481       break;
482     return C1.urem(C2);
483   case TargetOpcode::G_SREM:
484     if (!C2.getBoolValue())
485       break;
486     return C1.srem(C2);
487   }
488 
489   return None;
490 }
491 
ConstantFoldFPBinOp(unsigned Opcode,const Register Op1,const Register Op2,const MachineRegisterInfo & MRI)492 Optional<APFloat> llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
493                                             const Register Op2,
494                                             const MachineRegisterInfo &MRI) {
495   const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI);
496   if (!Op2Cst)
497     return None;
498 
499   const ConstantFP *Op1Cst = getConstantFPVRegVal(Op1, MRI);
500   if (!Op1Cst)
501     return None;
502 
503   APFloat C1 = Op1Cst->getValueAPF();
504   const APFloat &C2 = Op2Cst->getValueAPF();
505   switch (Opcode) {
506   case TargetOpcode::G_FADD:
507     C1.add(C2, APFloat::rmNearestTiesToEven);
508     return C1;
509   case TargetOpcode::G_FSUB:
510     C1.subtract(C2, APFloat::rmNearestTiesToEven);
511     return C1;
512   case TargetOpcode::G_FMUL:
513     C1.multiply(C2, APFloat::rmNearestTiesToEven);
514     return C1;
515   case TargetOpcode::G_FDIV:
516     C1.divide(C2, APFloat::rmNearestTiesToEven);
517     return C1;
518   case TargetOpcode::G_FREM:
519     C1.mod(C2);
520     return C1;
521   case TargetOpcode::G_FCOPYSIGN:
522     C1.copySign(C2);
523     return C1;
524   case TargetOpcode::G_FMINNUM:
525     return minnum(C1, C2);
526   case TargetOpcode::G_FMAXNUM:
527     return maxnum(C1, C2);
528   case TargetOpcode::G_FMINIMUM:
529     return minimum(C1, C2);
530   case TargetOpcode::G_FMAXIMUM:
531     return maximum(C1, C2);
532   case TargetOpcode::G_FMINNUM_IEEE:
533   case TargetOpcode::G_FMAXNUM_IEEE:
534     // FIXME: These operations were unfortunately named. fminnum/fmaxnum do not
535     // follow the IEEE behavior for signaling nans and follow libm's fmin/fmax,
536     // and currently there isn't a nice wrapper in APFloat for the version with
537     // correct snan handling.
538     break;
539   default:
540     break;
541   }
542 
543   return None;
544 }
545 
isKnownNeverNaN(Register Val,const MachineRegisterInfo & MRI,bool SNaN)546 bool llvm::isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
547                            bool SNaN) {
548   const MachineInstr *DefMI = MRI.getVRegDef(Val);
549   if (!DefMI)
550     return false;
551 
552   const TargetMachine& TM = DefMI->getMF()->getTarget();
553   if (DefMI->getFlag(MachineInstr::FmNoNans) || TM.Options.NoNaNsFPMath)
554     return true;
555 
556   // If the value is a constant, we can obviously see if it is a NaN or not.
557   if (const ConstantFP *FPVal = getConstantFPVRegVal(Val, MRI)) {
558     return !FPVal->getValueAPF().isNaN() ||
559            (SNaN && !FPVal->getValueAPF().isSignaling());
560   }
561 
562   if (DefMI->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
563     for (const auto &Op : DefMI->uses())
564       if (!isKnownNeverNaN(Op.getReg(), MRI, SNaN))
565         return false;
566     return true;
567   }
568 
569   switch (DefMI->getOpcode()) {
570   default:
571     break;
572   case TargetOpcode::G_FMINNUM_IEEE:
573   case TargetOpcode::G_FMAXNUM_IEEE: {
574     if (SNaN)
575       return true;
576     // This can return a NaN if either operand is an sNaN, or if both operands
577     // are NaN.
578     return (isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI) &&
579             isKnownNeverSNaN(DefMI->getOperand(2).getReg(), MRI)) ||
580            (isKnownNeverSNaN(DefMI->getOperand(1).getReg(), MRI) &&
581             isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI));
582   }
583   case TargetOpcode::G_FMINNUM:
584   case TargetOpcode::G_FMAXNUM: {
585     // Only one needs to be known not-nan, since it will be returned if the
586     // other ends up being one.
587     return isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI, SNaN) ||
588            isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI, SNaN);
589   }
590   }
591 
592   if (SNaN) {
593     // FP operations quiet. For now, just handle the ones inserted during
594     // legalization.
595     switch (DefMI->getOpcode()) {
596     case TargetOpcode::G_FPEXT:
597     case TargetOpcode::G_FPTRUNC:
598     case TargetOpcode::G_FCANONICALIZE:
599       return true;
600     default:
601       return false;
602     }
603   }
604 
605   return false;
606 }
607 
inferAlignFromPtrInfo(MachineFunction & MF,const MachinePointerInfo & MPO)608 Align llvm::inferAlignFromPtrInfo(MachineFunction &MF,
609                                   const MachinePointerInfo &MPO) {
610   auto PSV = MPO.V.dyn_cast<const PseudoSourceValue *>();
611   if (auto FSPV = dyn_cast_or_null<FixedStackPseudoSourceValue>(PSV)) {
612     MachineFrameInfo &MFI = MF.getFrameInfo();
613     return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()),
614                            MPO.Offset);
615   }
616 
617   if (const Value *V = MPO.V.dyn_cast<const Value *>()) {
618     const Module *M = MF.getFunction().getParent();
619     return V->getPointerAlignment(M->getDataLayout());
620   }
621 
622   return Align(1);
623 }
624 
getFunctionLiveInPhysReg(MachineFunction & MF,const TargetInstrInfo & TII,MCRegister PhysReg,const TargetRegisterClass & RC,LLT RegTy)625 Register llvm::getFunctionLiveInPhysReg(MachineFunction &MF,
626                                         const TargetInstrInfo &TII,
627                                         MCRegister PhysReg,
628                                         const TargetRegisterClass &RC,
629                                         LLT RegTy) {
630   DebugLoc DL; // FIXME: Is no location the right choice?
631   MachineBasicBlock &EntryMBB = MF.front();
632   MachineRegisterInfo &MRI = MF.getRegInfo();
633   Register LiveIn = MRI.getLiveInVirtReg(PhysReg);
634   if (LiveIn) {
635     MachineInstr *Def = MRI.getVRegDef(LiveIn);
636     if (Def) {
637       // FIXME: Should the verifier check this is in the entry block?
638       assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block");
639       return LiveIn;
640     }
641 
642     // It's possible the incoming argument register and copy was added during
643     // lowering, but later deleted due to being/becoming dead. If this happens,
644     // re-insert the copy.
645   } else {
646     // The live in register was not present, so add it.
647     LiveIn = MF.addLiveIn(PhysReg, &RC);
648     if (RegTy.isValid())
649       MRI.setType(LiveIn, RegTy);
650   }
651 
652   BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn)
653     .addReg(PhysReg);
654   if (!EntryMBB.isLiveIn(PhysReg))
655     EntryMBB.addLiveIn(PhysReg);
656   return LiveIn;
657 }
658 
ConstantFoldExtOp(unsigned Opcode,const Register Op1,uint64_t Imm,const MachineRegisterInfo & MRI)659 Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const Register Op1,
660                                         uint64_t Imm,
661                                         const MachineRegisterInfo &MRI) {
662   auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI);
663   if (MaybeOp1Cst) {
664     switch (Opcode) {
665     default:
666       break;
667     case TargetOpcode::G_SEXT_INREG: {
668       LLT Ty = MRI.getType(Op1);
669       return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits());
670     }
671     }
672   }
673   return None;
674 }
675 
isKnownToBeAPowerOfTwo(Register Reg,const MachineRegisterInfo & MRI,GISelKnownBits * KB)676 bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI,
677                                   GISelKnownBits *KB) {
678   Optional<DefinitionAndSourceRegister> DefSrcReg =
679       getDefSrcRegIgnoringCopies(Reg, MRI);
680   if (!DefSrcReg)
681     return false;
682 
683   const MachineInstr &MI = *DefSrcReg->MI;
684   const LLT Ty = MRI.getType(Reg);
685 
686   switch (MI.getOpcode()) {
687   case TargetOpcode::G_CONSTANT: {
688     unsigned BitWidth = Ty.getScalarSizeInBits();
689     const ConstantInt *CI = MI.getOperand(1).getCImm();
690     return CI->getValue().zextOrTrunc(BitWidth).isPowerOf2();
691   }
692   case TargetOpcode::G_SHL: {
693     // A left-shift of a constant one will have exactly one bit set because
694     // shifting the bit off the end is undefined.
695 
696     // TODO: Constant splat
697     if (auto ConstLHS = getConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
698       if (*ConstLHS == 1)
699         return true;
700     }
701 
702     break;
703   }
704   case TargetOpcode::G_LSHR: {
705     if (auto ConstLHS = getConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
706       if (ConstLHS->isSignMask())
707         return true;
708     }
709 
710     break;
711   }
712   case TargetOpcode::G_BUILD_VECTOR: {
713     // TODO: Probably should have a recursion depth guard since you could have
714     // bitcasted vector elements.
715     for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) {
716       if (!isKnownToBeAPowerOfTwo(MI.getOperand(I).getReg(), MRI, KB))
717         return false;
718     }
719 
720     return true;
721   }
722   case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
723     // Only handle constants since we would need to know if number of leading
724     // zeros is greater than the truncation amount.
725     const unsigned BitWidth = Ty.getScalarSizeInBits();
726     for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) {
727       auto Const = getConstantVRegVal(MI.getOperand(I).getReg(), MRI);
728       if (!Const || !Const->zextOrTrunc(BitWidth).isPowerOf2())
729         return false;
730     }
731 
732     return true;
733   }
734   default:
735     break;
736   }
737 
738   if (!KB)
739     return false;
740 
741   // More could be done here, though the above checks are enough
742   // to handle some common cases.
743 
744   // Fall back to computeKnownBits to catch other known cases.
745   KnownBits Known = KB->getKnownBits(Reg);
746   return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
747 }
748 
getSelectionDAGFallbackAnalysisUsage(AnalysisUsage & AU)749 void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) {
750   AU.addPreserved<StackProtector>();
751 }
752 
getLCMSize(unsigned OrigSize,unsigned TargetSize)753 static unsigned getLCMSize(unsigned OrigSize, unsigned TargetSize) {
754   unsigned Mul = OrigSize * TargetSize;
755   unsigned GCDSize = greatestCommonDivisor(OrigSize, TargetSize);
756   return Mul / GCDSize;
757 }
758 
getLCMType(LLT OrigTy,LLT TargetTy)759 LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) {
760   const unsigned OrigSize = OrigTy.getSizeInBits();
761   const unsigned TargetSize = TargetTy.getSizeInBits();
762 
763   if (OrigSize == TargetSize)
764     return OrigTy;
765 
766   if (OrigTy.isVector()) {
767     const LLT OrigElt = OrigTy.getElementType();
768 
769     if (TargetTy.isVector()) {
770       const LLT TargetElt = TargetTy.getElementType();
771 
772       if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
773         int GCDElts = greatestCommonDivisor(OrigTy.getNumElements(),
774                                             TargetTy.getNumElements());
775         // Prefer the original element type.
776         int Mul = OrigTy.getNumElements() * TargetTy.getNumElements();
777         return LLT::vector(Mul / GCDElts, OrigTy.getElementType());
778       }
779     } else {
780       if (OrigElt.getSizeInBits() == TargetSize)
781         return OrigTy;
782     }
783 
784     unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
785     return LLT::vector(LCMSize / OrigElt.getSizeInBits(), OrigElt);
786   }
787 
788   if (TargetTy.isVector()) {
789     unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
790     return LLT::vector(LCMSize / OrigSize, OrigTy);
791   }
792 
793   unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
794 
795   // Preserve pointer types.
796   if (LCMSize == OrigSize)
797     return OrigTy;
798   if (LCMSize == TargetSize)
799     return TargetTy;
800 
801   return LLT::scalar(LCMSize);
802 }
803 
getGCDType(LLT OrigTy,LLT TargetTy)804 LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
805   const unsigned OrigSize = OrigTy.getSizeInBits();
806   const unsigned TargetSize = TargetTy.getSizeInBits();
807 
808   if (OrigSize == TargetSize)
809     return OrigTy;
810 
811   if (OrigTy.isVector()) {
812     LLT OrigElt = OrigTy.getElementType();
813     if (TargetTy.isVector()) {
814       LLT TargetElt = TargetTy.getElementType();
815       if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
816         int GCD = greatestCommonDivisor(OrigTy.getNumElements(),
817                                         TargetTy.getNumElements());
818         return LLT::scalarOrVector(GCD, OrigElt);
819       }
820     } else {
821       // If the source is a vector of pointers, return a pointer element.
822       if (OrigElt.getSizeInBits() == TargetSize)
823         return OrigElt;
824     }
825 
826     unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize);
827     if (GCD == OrigElt.getSizeInBits())
828       return OrigElt;
829 
830     // If we can't produce the original element type, we have to use a smaller
831     // scalar.
832     if (GCD < OrigElt.getSizeInBits())
833       return LLT::scalar(GCD);
834     return LLT::vector(GCD / OrigElt.getSizeInBits(), OrigElt);
835   }
836 
837   if (TargetTy.isVector()) {
838     // Try to preserve the original element type.
839     LLT TargetElt = TargetTy.getElementType();
840     if (TargetElt.getSizeInBits() == OrigSize)
841       return OrigTy;
842   }
843 
844   unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize);
845   return LLT::scalar(GCD);
846 }
847 
getSplatIndex(MachineInstr & MI)848 Optional<int> llvm::getSplatIndex(MachineInstr &MI) {
849   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
850          "Only G_SHUFFLE_VECTOR can have a splat index!");
851   ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
852   auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; });
853 
854   // If all elements are undefined, this shuffle can be considered a splat.
855   // Return 0 for better potential for callers to simplify.
856   if (FirstDefinedIdx == Mask.end())
857     return 0;
858 
859   // Make sure all remaining elements are either undef or the same
860   // as the first non-undef value.
861   int SplatValue = *FirstDefinedIdx;
862   if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()),
863              [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; }))
864     return None;
865 
866   return SplatValue;
867 }
868 
isBuildVectorOp(unsigned Opcode)869 static bool isBuildVectorOp(unsigned Opcode) {
870   return Opcode == TargetOpcode::G_BUILD_VECTOR ||
871          Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;
872 }
873 
874 // TODO: Handle mixed undef elements.
isBuildVectorConstantSplat(const MachineInstr & MI,const MachineRegisterInfo & MRI,int64_t SplatValue)875 static bool isBuildVectorConstantSplat(const MachineInstr &MI,
876                                        const MachineRegisterInfo &MRI,
877                                        int64_t SplatValue) {
878   if (!isBuildVectorOp(MI.getOpcode()))
879     return false;
880 
881   const unsigned NumOps = MI.getNumOperands();
882   for (unsigned I = 1; I != NumOps; ++I) {
883     Register Element = MI.getOperand(I).getReg();
884     if (!mi_match(Element, MRI, m_SpecificICst(SplatValue)))
885       return false;
886   }
887 
888   return true;
889 }
890 
891 Optional<int64_t>
getBuildVectorConstantSplat(const MachineInstr & MI,const MachineRegisterInfo & MRI)892 llvm::getBuildVectorConstantSplat(const MachineInstr &MI,
893                                   const MachineRegisterInfo &MRI) {
894   if (!isBuildVectorOp(MI.getOpcode()))
895     return None;
896 
897   const unsigned NumOps = MI.getNumOperands();
898   Optional<int64_t> Scalar;
899   for (unsigned I = 1; I != NumOps; ++I) {
900     Register Element = MI.getOperand(I).getReg();
901     int64_t ElementValue;
902     if (!mi_match(Element, MRI, m_ICst(ElementValue)))
903       return None;
904     if (!Scalar)
905       Scalar = ElementValue;
906     else if (*Scalar != ElementValue)
907       return None;
908   }
909 
910   return Scalar;
911 }
912 
isBuildVectorAllZeros(const MachineInstr & MI,const MachineRegisterInfo & MRI)913 bool llvm::isBuildVectorAllZeros(const MachineInstr &MI,
914                                  const MachineRegisterInfo &MRI) {
915   return isBuildVectorConstantSplat(MI, MRI, 0);
916 }
917 
isBuildVectorAllOnes(const MachineInstr & MI,const MachineRegisterInfo & MRI)918 bool llvm::isBuildVectorAllOnes(const MachineInstr &MI,
919                                 const MachineRegisterInfo &MRI) {
920   return isBuildVectorConstantSplat(MI, MRI, -1);
921 }
922 
getVectorSplat(const MachineInstr & MI,const MachineRegisterInfo & MRI)923 Optional<RegOrConstant> llvm::getVectorSplat(const MachineInstr &MI,
924                                              const MachineRegisterInfo &MRI) {
925   unsigned Opc = MI.getOpcode();
926   if (!isBuildVectorOp(Opc))
927     return None;
928   if (auto Splat = getBuildVectorConstantSplat(MI, MRI))
929     return RegOrConstant(*Splat);
930   auto Reg = MI.getOperand(1).getReg();
931   if (any_of(make_range(MI.operands_begin() + 2, MI.operands_end()),
932              [&Reg](const MachineOperand &Op) { return Op.getReg() != Reg; }))
933     return None;
934   return RegOrConstant(Reg);
935 }
936 
matchUnaryPredicate(const MachineRegisterInfo & MRI,Register Reg,std::function<bool (const Constant * ConstVal)> Match,bool AllowUndefs)937 bool llvm::matchUnaryPredicate(
938     const MachineRegisterInfo &MRI, Register Reg,
939     std::function<bool(const Constant *ConstVal)> Match, bool AllowUndefs) {
940 
941   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
942   if (AllowUndefs && Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
943     return Match(nullptr);
944 
945   // TODO: Also handle fconstant
946   if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
947     return Match(Def->getOperand(1).getCImm());
948 
949   if (Def->getOpcode() != TargetOpcode::G_BUILD_VECTOR)
950     return false;
951 
952   for (unsigned I = 1, E = Def->getNumOperands(); I != E; ++I) {
953     Register SrcElt = Def->getOperand(I).getReg();
954     const MachineInstr *SrcDef = getDefIgnoringCopies(SrcElt, MRI);
955     if (AllowUndefs && SrcDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF) {
956       if (!Match(nullptr))
957         return false;
958       continue;
959     }
960 
961     if (SrcDef->getOpcode() != TargetOpcode::G_CONSTANT ||
962         !Match(SrcDef->getOperand(1).getCImm()))
963       return false;
964   }
965 
966   return true;
967 }
968 
isConstTrueVal(const TargetLowering & TLI,int64_t Val,bool IsVector,bool IsFP)969 bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
970                           bool IsFP) {
971   switch (TLI.getBooleanContents(IsVector, IsFP)) {
972   case TargetLowering::UndefinedBooleanContent:
973     return Val & 0x1;
974   case TargetLowering::ZeroOrOneBooleanContent:
975     return Val == 1;
976   case TargetLowering::ZeroOrNegativeOneBooleanContent:
977     return Val == -1;
978   }
979   llvm_unreachable("Invalid boolean contents");
980 }
981 
getICmpTrueVal(const TargetLowering & TLI,bool IsVector,bool IsFP)982 int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector,
983                              bool IsFP) {
984   switch (TLI.getBooleanContents(IsVector, IsFP)) {
985   case TargetLowering::UndefinedBooleanContent:
986   case TargetLowering::ZeroOrOneBooleanContent:
987     return 1;
988   case TargetLowering::ZeroOrNegativeOneBooleanContent:
989     return -1;
990   }
991   llvm_unreachable("Invalid boolean contents");
992 }
993 
shouldOptForSize(const MachineBasicBlock & MBB,ProfileSummaryInfo * PSI,BlockFrequencyInfo * BFI)994 bool llvm::shouldOptForSize(const MachineBasicBlock &MBB,
995                             ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
996   const auto &F = MBB.getParent()->getFunction();
997   return F.hasOptSize() || F.hasMinSize() ||
998          llvm::shouldOptimizeForSize(MBB.getBasicBlock(), PSI, BFI);
999 }
1000 
getIntrinsicID(const MachineInstr & MI)1001 unsigned llvm::getIntrinsicID(const MachineInstr &MI) {
1002 #ifndef NDEBUG
1003   unsigned Opc = MI.getOpcode();
1004   assert(Opc == TargetOpcode::G_INTRINSIC ||
1005          Opc == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS);
1006 #endif
1007   return MI.getOperand(MI.getNumExplicitDefs()).getIntrinsicID();
1008 }
1009