1 //===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file This file implements the utility functions used by the GlobalISel
9 /// pipeline.
10 //===----------------------------------------------------------------------===//
11 
12 #include "llvm/CodeGen/GlobalISel/Utils.h"
13 #include "llvm/ADT/APFloat.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/Optional.h"
16 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
17 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
18 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
19 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
20 #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
21 #include "llvm/CodeGen/MachineInstr.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
24 #include "llvm/CodeGen/MachineSizeOpts.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/StackProtector.h"
27 #include "llvm/CodeGen/TargetInstrInfo.h"
28 #include "llvm/CodeGen/TargetLowering.h"
29 #include "llvm/CodeGen/TargetPassConfig.h"
30 #include "llvm/CodeGen/TargetRegisterInfo.h"
31 #include "llvm/IR/Constants.h"
32 #include "llvm/Target/TargetMachine.h"
33 
34 #define DEBUG_TYPE "globalisel-utils"
35 
36 using namespace llvm;
37 using namespace MIPatternMatch;
38 
constrainRegToClass(MachineRegisterInfo & MRI,const TargetInstrInfo & TII,const RegisterBankInfo & RBI,Register Reg,const TargetRegisterClass & RegClass)39 Register llvm::constrainRegToClass(MachineRegisterInfo &MRI,
40                                    const TargetInstrInfo &TII,
41                                    const RegisterBankInfo &RBI, Register Reg,
42                                    const TargetRegisterClass &RegClass) {
43   if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
44     return MRI.createVirtualRegister(&RegClass);
45 
46   return Reg;
47 }
48 
constrainOperandRegClass(const MachineFunction & MF,const TargetRegisterInfo & TRI,MachineRegisterInfo & MRI,const TargetInstrInfo & TII,const RegisterBankInfo & RBI,MachineInstr & InsertPt,const TargetRegisterClass & RegClass,MachineOperand & RegMO)49 Register llvm::constrainOperandRegClass(
50     const MachineFunction &MF, const TargetRegisterInfo &TRI,
51     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
52     const RegisterBankInfo &RBI, MachineInstr &InsertPt,
53     const TargetRegisterClass &RegClass, MachineOperand &RegMO) {
54   Register Reg = RegMO.getReg();
55   // Assume physical registers are properly constrained.
56   assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
57 
58   Register ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
59   // If we created a new virtual register because the class is not compatible
60   // then create a copy between the new and the old register.
61   if (ConstrainedReg != Reg) {
62     MachineBasicBlock::iterator InsertIt(&InsertPt);
63     MachineBasicBlock &MBB = *InsertPt.getParent();
64     if (RegMO.isUse()) {
65       BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(),
66               TII.get(TargetOpcode::COPY), ConstrainedReg)
67           .addReg(Reg);
68     } else {
69       assert(RegMO.isDef() && "Must be a definition");
70       BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(),
71               TII.get(TargetOpcode::COPY), Reg)
72           .addReg(ConstrainedReg);
73     }
74     if (GISelChangeObserver *Observer = MF.getObserver()) {
75       Observer->changingInstr(*RegMO.getParent());
76     }
77     RegMO.setReg(ConstrainedReg);
78     if (GISelChangeObserver *Observer = MF.getObserver()) {
79       Observer->changedInstr(*RegMO.getParent());
80     }
81   } else {
82     if (GISelChangeObserver *Observer = MF.getObserver()) {
83       if (!RegMO.isDef()) {
84         MachineInstr *RegDef = MRI.getVRegDef(Reg);
85         Observer->changedInstr(*RegDef);
86       }
87       Observer->changingAllUsesOfReg(MRI, Reg);
88       Observer->finishedChangingAllUsesOfReg();
89     }
90   }
91   return ConstrainedReg;
92 }
93 
constrainOperandRegClass(const MachineFunction & MF,const TargetRegisterInfo & TRI,MachineRegisterInfo & MRI,const TargetInstrInfo & TII,const RegisterBankInfo & RBI,MachineInstr & InsertPt,const MCInstrDesc & II,MachineOperand & RegMO,unsigned OpIdx)94 Register llvm::constrainOperandRegClass(
95     const MachineFunction &MF, const TargetRegisterInfo &TRI,
96     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
97     const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
98     MachineOperand &RegMO, unsigned OpIdx) {
99   Register Reg = RegMO.getReg();
100   // Assume physical registers are properly constrained.
101   assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
102 
103   const TargetRegisterClass *RegClass = TII.getRegClass(II, OpIdx, &TRI, MF);
104   // Some of the target independent instructions, like COPY, may not impose any
105   // register class constraints on some of their operands: If it's a use, we can
106   // skip constraining as the instruction defining the register would constrain
107   // it.
108 
109   // We can't constrain unallocatable register classes, because we can't create
110   // virtual registers for these classes, so we need to let targets handled this
111   // case.
112   if (RegClass && !RegClass->isAllocatable())
113     RegClass = TRI.getConstrainedRegClassForOperand(RegMO, MRI);
114 
115   if (!RegClass) {
116     assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
117            "Register class constraint is required unless either the "
118            "instruction is target independent or the operand is a use");
119     // FIXME: Just bailing out like this here could be not enough, unless we
120     // expect the users of this function to do the right thing for PHIs and
121     // COPY:
122     //   v1 = COPY v0
123     //   v2 = COPY v1
124     // v1 here may end up not being constrained at all. Please notice that to
125     // reproduce the issue we likely need a destination pattern of a selection
126     // rule producing such extra copies, not just an input GMIR with them as
127     // every existing target using selectImpl handles copies before calling it
128     // and they never reach this function.
129     return Reg;
130   }
131   return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *RegClass,
132                                   RegMO);
133 }
134 
constrainSelectedInstRegOperands(MachineInstr & I,const TargetInstrInfo & TII,const TargetRegisterInfo & TRI,const RegisterBankInfo & RBI)135 bool llvm::constrainSelectedInstRegOperands(MachineInstr &I,
136                                             const TargetInstrInfo &TII,
137                                             const TargetRegisterInfo &TRI,
138                                             const RegisterBankInfo &RBI) {
139   assert(!isPreISelGenericOpcode(I.getOpcode()) &&
140          "A selected instruction is expected");
141   MachineBasicBlock &MBB = *I.getParent();
142   MachineFunction &MF = *MBB.getParent();
143   MachineRegisterInfo &MRI = MF.getRegInfo();
144 
145   for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
146     MachineOperand &MO = I.getOperand(OpI);
147 
148     // There's nothing to be done on non-register operands.
149     if (!MO.isReg())
150       continue;
151 
152     LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
153     assert(MO.isReg() && "Unsupported non-reg operand");
154 
155     Register Reg = MO.getReg();
156     // Physical registers don't need to be constrained.
157     if (Register::isPhysicalRegister(Reg))
158       continue;
159 
160     // Register operands with a value of 0 (e.g. predicate operands) don't need
161     // to be constrained.
162     if (Reg == 0)
163       continue;
164 
165     // If the operand is a vreg, we should constrain its regclass, and only
166     // insert COPYs if that's impossible.
167     // constrainOperandRegClass does that for us.
168     constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(), MO, OpI);
169 
170     // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
171     // done.
172     if (MO.isUse()) {
173       int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
174       if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
175         I.tieOperands(DefIdx, OpI);
176     }
177   }
178   return true;
179 }
180 
canReplaceReg(Register DstReg,Register SrcReg,MachineRegisterInfo & MRI)181 bool llvm::canReplaceReg(Register DstReg, Register SrcReg,
182                          MachineRegisterInfo &MRI) {
183   // Give up if either DstReg or SrcReg  is a physical register.
184   if (DstReg.isPhysical() || SrcReg.isPhysical())
185     return false;
186   // Give up if the types don't match.
187   if (MRI.getType(DstReg) != MRI.getType(SrcReg))
188     return false;
189   // Replace if either DstReg has no constraints or the register
190   // constraints match.
191   return !MRI.getRegClassOrRegBank(DstReg) ||
192          MRI.getRegClassOrRegBank(DstReg) == MRI.getRegClassOrRegBank(SrcReg);
193 }
194 
isTriviallyDead(const MachineInstr & MI,const MachineRegisterInfo & MRI)195 bool llvm::isTriviallyDead(const MachineInstr &MI,
196                            const MachineRegisterInfo &MRI) {
197   // FIXME: This logical is mostly duplicated with
198   // DeadMachineInstructionElim::isDead. Why is LOCAL_ESCAPE not considered in
199   // MachineInstr::isLabel?
200 
201   // Don't delete frame allocation labels.
202   if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE)
203     return false;
204   // LIFETIME markers should be preserved even if they seem dead.
205   if (MI.getOpcode() == TargetOpcode::LIFETIME_START ||
206       MI.getOpcode() == TargetOpcode::LIFETIME_END)
207     return false;
208 
209   // If we can move an instruction, we can remove it.  Otherwise, it has
210   // a side-effect of some sort.
211   bool SawStore = false;
212   if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI())
213     return false;
214 
215   // Instructions without side-effects are dead iff they only define dead vregs.
216   for (auto &MO : MI.operands()) {
217     if (!MO.isReg() || !MO.isDef())
218       continue;
219 
220     Register Reg = MO.getReg();
221     if (Register::isPhysicalRegister(Reg) || !MRI.use_nodbg_empty(Reg))
222       return false;
223   }
224   return true;
225 }
226 
reportGISelDiagnostic(DiagnosticSeverity Severity,MachineFunction & MF,const TargetPassConfig & TPC,MachineOptimizationRemarkEmitter & MORE,MachineOptimizationRemarkMissed & R)227 static void reportGISelDiagnostic(DiagnosticSeverity Severity,
228                                   MachineFunction &MF,
229                                   const TargetPassConfig &TPC,
230                                   MachineOptimizationRemarkEmitter &MORE,
231                                   MachineOptimizationRemarkMissed &R) {
232   bool IsFatal = Severity == DS_Error &&
233                  TPC.isGlobalISelAbortEnabled();
234   // Print the function name explicitly if we don't have a debug location (which
235   // makes the diagnostic less useful) or if we're going to emit a raw error.
236   if (!R.getLocation().isValid() || IsFatal)
237     R << (" (in function: " + MF.getName() + ")").str();
238 
239   if (IsFatal)
240     report_fatal_error(Twine(R.getMsg()));
241   else
242     MORE.emit(R);
243 }
244 
reportGISelWarning(MachineFunction & MF,const TargetPassConfig & TPC,MachineOptimizationRemarkEmitter & MORE,MachineOptimizationRemarkMissed & R)245 void llvm::reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
246                               MachineOptimizationRemarkEmitter &MORE,
247                               MachineOptimizationRemarkMissed &R) {
248   reportGISelDiagnostic(DS_Warning, MF, TPC, MORE, R);
249 }
250 
reportGISelFailure(MachineFunction & MF,const TargetPassConfig & TPC,MachineOptimizationRemarkEmitter & MORE,MachineOptimizationRemarkMissed & R)251 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
252                               MachineOptimizationRemarkEmitter &MORE,
253                               MachineOptimizationRemarkMissed &R) {
254   MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
255   reportGISelDiagnostic(DS_Error, MF, TPC, MORE, R);
256 }
257 
reportGISelFailure(MachineFunction & MF,const TargetPassConfig & TPC,MachineOptimizationRemarkEmitter & MORE,const char * PassName,StringRef Msg,const MachineInstr & MI)258 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
259                               MachineOptimizationRemarkEmitter &MORE,
260                               const char *PassName, StringRef Msg,
261                               const MachineInstr &MI) {
262   MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
263                                     MI.getDebugLoc(), MI.getParent());
264   R << Msg;
265   // Printing MI is expensive;  only do it if expensive remarks are enabled.
266   if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName))
267     R << ": " << ore::MNV("Inst", MI);
268   reportGISelFailure(MF, TPC, MORE, R);
269 }
270 
getIConstantVRegVal(Register VReg,const MachineRegisterInfo & MRI)271 Optional<APInt> llvm::getIConstantVRegVal(Register VReg,
272                                           const MachineRegisterInfo &MRI) {
273   Optional<ValueAndVReg> ValAndVReg = getIConstantVRegValWithLookThrough(
274       VReg, MRI, /*LookThroughInstrs*/ false);
275   assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
276          "Value found while looking through instrs");
277   if (!ValAndVReg)
278     return None;
279   return ValAndVReg->Value;
280 }
281 
282 Optional<int64_t>
getIConstantVRegSExtVal(Register VReg,const MachineRegisterInfo & MRI)283 llvm::getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI) {
284   Optional<APInt> Val = getIConstantVRegVal(VReg, MRI);
285   if (Val && Val->getBitWidth() <= 64)
286     return Val->getSExtValue();
287   return None;
288 }
289 
290 namespace {
291 
292 typedef std::function<bool(const MachineInstr *)> IsOpcodeFn;
293 typedef std::function<Optional<APInt>(const MachineInstr *MI)> GetAPCstFn;
294 
getConstantVRegValWithLookThrough(Register VReg,const MachineRegisterInfo & MRI,IsOpcodeFn IsConstantOpcode,GetAPCstFn getAPCstValue,bool LookThroughInstrs=true,bool LookThroughAnyExt=false)295 Optional<ValueAndVReg> getConstantVRegValWithLookThrough(
296     Register VReg, const MachineRegisterInfo &MRI, IsOpcodeFn IsConstantOpcode,
297     GetAPCstFn getAPCstValue, bool LookThroughInstrs = true,
298     bool LookThroughAnyExt = false) {
299   SmallVector<std::pair<unsigned, unsigned>, 4> SeenOpcodes;
300   MachineInstr *MI;
301 
302   while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI) &&
303          LookThroughInstrs) {
304     switch (MI->getOpcode()) {
305     case TargetOpcode::G_ANYEXT:
306       if (!LookThroughAnyExt)
307         return None;
308       LLVM_FALLTHROUGH;
309     case TargetOpcode::G_TRUNC:
310     case TargetOpcode::G_SEXT:
311     case TargetOpcode::G_ZEXT:
312       SeenOpcodes.push_back(std::make_pair(
313           MI->getOpcode(),
314           MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
315       VReg = MI->getOperand(1).getReg();
316       break;
317     case TargetOpcode::COPY:
318       VReg = MI->getOperand(1).getReg();
319       if (Register::isPhysicalRegister(VReg))
320         return None;
321       break;
322     case TargetOpcode::G_INTTOPTR:
323       VReg = MI->getOperand(1).getReg();
324       break;
325     default:
326       return None;
327     }
328   }
329   if (!MI || !IsConstantOpcode(MI))
330     return None;
331 
332   Optional<APInt> MaybeVal = getAPCstValue(MI);
333   if (!MaybeVal)
334     return None;
335   APInt &Val = *MaybeVal;
336   while (!SeenOpcodes.empty()) {
337     std::pair<unsigned, unsigned> OpcodeAndSize = SeenOpcodes.pop_back_val();
338     switch (OpcodeAndSize.first) {
339     case TargetOpcode::G_TRUNC:
340       Val = Val.trunc(OpcodeAndSize.second);
341       break;
342     case TargetOpcode::G_ANYEXT:
343     case TargetOpcode::G_SEXT:
344       Val = Val.sext(OpcodeAndSize.second);
345       break;
346     case TargetOpcode::G_ZEXT:
347       Val = Val.zext(OpcodeAndSize.second);
348       break;
349     }
350   }
351 
352   return ValueAndVReg{Val, VReg};
353 }
354 
isIConstant(const MachineInstr * MI)355 bool isIConstant(const MachineInstr *MI) {
356   if (!MI)
357     return false;
358   return MI->getOpcode() == TargetOpcode::G_CONSTANT;
359 }
360 
isFConstant(const MachineInstr * MI)361 bool isFConstant(const MachineInstr *MI) {
362   if (!MI)
363     return false;
364   return MI->getOpcode() == TargetOpcode::G_FCONSTANT;
365 }
366 
isAnyConstant(const MachineInstr * MI)367 bool isAnyConstant(const MachineInstr *MI) {
368   if (!MI)
369     return false;
370   unsigned Opc = MI->getOpcode();
371   return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT;
372 }
373 
getCImmAsAPInt(const MachineInstr * MI)374 Optional<APInt> getCImmAsAPInt(const MachineInstr *MI) {
375   const MachineOperand &CstVal = MI->getOperand(1);
376   if (CstVal.isCImm())
377     return CstVal.getCImm()->getValue();
378   return None;
379 }
380 
getCImmOrFPImmAsAPInt(const MachineInstr * MI)381 Optional<APInt> getCImmOrFPImmAsAPInt(const MachineInstr *MI) {
382   const MachineOperand &CstVal = MI->getOperand(1);
383   if (CstVal.isCImm())
384     return CstVal.getCImm()->getValue();
385   if (CstVal.isFPImm())
386     return CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
387   return None;
388 }
389 
390 } // end anonymous namespace
391 
getIConstantVRegValWithLookThrough(Register VReg,const MachineRegisterInfo & MRI,bool LookThroughInstrs)392 Optional<ValueAndVReg> llvm::getIConstantVRegValWithLookThrough(
393     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
394   return getConstantVRegValWithLookThrough(VReg, MRI, isIConstant,
395                                            getCImmAsAPInt, LookThroughInstrs);
396 }
397 
getAnyConstantVRegValWithLookThrough(Register VReg,const MachineRegisterInfo & MRI,bool LookThroughInstrs,bool LookThroughAnyExt)398 Optional<ValueAndVReg> llvm::getAnyConstantVRegValWithLookThrough(
399     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
400     bool LookThroughAnyExt) {
401   return getConstantVRegValWithLookThrough(
402       VReg, MRI, isAnyConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs,
403       LookThroughAnyExt);
404 }
405 
getFConstantVRegValWithLookThrough(Register VReg,const MachineRegisterInfo & MRI,bool LookThroughInstrs)406 Optional<FPValueAndVReg> llvm::getFConstantVRegValWithLookThrough(
407     Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
408   auto Reg = getConstantVRegValWithLookThrough(
409       VReg, MRI, isFConstant, getCImmOrFPImmAsAPInt, LookThroughInstrs);
410   if (!Reg)
411     return None;
412   return FPValueAndVReg{getConstantFPVRegVal(Reg->VReg, MRI)->getValueAPF(),
413                         Reg->VReg};
414 }
415 
416 const ConstantFP *
getConstantFPVRegVal(Register VReg,const MachineRegisterInfo & MRI)417 llvm::getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI) {
418   MachineInstr *MI = MRI.getVRegDef(VReg);
419   if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
420     return nullptr;
421   return MI->getOperand(1).getFPImm();
422 }
423 
424 Optional<DefinitionAndSourceRegister>
getDefSrcRegIgnoringCopies(Register Reg,const MachineRegisterInfo & MRI)425 llvm::getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI) {
426   Register DefSrcReg = Reg;
427   auto *DefMI = MRI.getVRegDef(Reg);
428   auto DstTy = MRI.getType(DefMI->getOperand(0).getReg());
429   if (!DstTy.isValid())
430     return None;
431   unsigned Opc = DefMI->getOpcode();
432   while (Opc == TargetOpcode::COPY || isPreISelGenericOptimizationHint(Opc)) {
433     Register SrcReg = DefMI->getOperand(1).getReg();
434     auto SrcTy = MRI.getType(SrcReg);
435     if (!SrcTy.isValid())
436       break;
437     DefMI = MRI.getVRegDef(SrcReg);
438     DefSrcReg = SrcReg;
439     Opc = DefMI->getOpcode();
440   }
441   return DefinitionAndSourceRegister{DefMI, DefSrcReg};
442 }
443 
getDefIgnoringCopies(Register Reg,const MachineRegisterInfo & MRI)444 MachineInstr *llvm::getDefIgnoringCopies(Register Reg,
445                                          const MachineRegisterInfo &MRI) {
446   Optional<DefinitionAndSourceRegister> DefSrcReg =
447       getDefSrcRegIgnoringCopies(Reg, MRI);
448   return DefSrcReg ? DefSrcReg->MI : nullptr;
449 }
450 
getSrcRegIgnoringCopies(Register Reg,const MachineRegisterInfo & MRI)451 Register llvm::getSrcRegIgnoringCopies(Register Reg,
452                                        const MachineRegisterInfo &MRI) {
453   Optional<DefinitionAndSourceRegister> DefSrcReg =
454       getDefSrcRegIgnoringCopies(Reg, MRI);
455   return DefSrcReg ? DefSrcReg->Reg : Register();
456 }
457 
getOpcodeDef(unsigned Opcode,Register Reg,const MachineRegisterInfo & MRI)458 MachineInstr *llvm::getOpcodeDef(unsigned Opcode, Register Reg,
459                                  const MachineRegisterInfo &MRI) {
460   MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI);
461   return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
462 }
463 
getAPFloatFromSize(double Val,unsigned Size)464 APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
465   if (Size == 32)
466     return APFloat(float(Val));
467   if (Size == 64)
468     return APFloat(Val);
469   if (Size != 16)
470     llvm_unreachable("Unsupported FPConstant size");
471   bool Ignored;
472   APFloat APF(Val);
473   APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
474   return APF;
475 }
476 
ConstantFoldBinOp(unsigned Opcode,const Register Op1,const Register Op2,const MachineRegisterInfo & MRI)477 Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const Register Op1,
478                                         const Register Op2,
479                                         const MachineRegisterInfo &MRI) {
480   auto MaybeOp2Cst = getAnyConstantVRegValWithLookThrough(Op2, MRI, false);
481   if (!MaybeOp2Cst)
482     return None;
483 
484   auto MaybeOp1Cst = getAnyConstantVRegValWithLookThrough(Op1, MRI, false);
485   if (!MaybeOp1Cst)
486     return None;
487 
488   const APInt &C1 = MaybeOp1Cst->Value;
489   const APInt &C2 = MaybeOp2Cst->Value;
490   switch (Opcode) {
491   default:
492     break;
493   case TargetOpcode::G_ADD:
494     return C1 + C2;
495   case TargetOpcode::G_AND:
496     return C1 & C2;
497   case TargetOpcode::G_ASHR:
498     return C1.ashr(C2);
499   case TargetOpcode::G_LSHR:
500     return C1.lshr(C2);
501   case TargetOpcode::G_MUL:
502     return C1 * C2;
503   case TargetOpcode::G_OR:
504     return C1 | C2;
505   case TargetOpcode::G_SHL:
506     return C1 << C2;
507   case TargetOpcode::G_SUB:
508     return C1 - C2;
509   case TargetOpcode::G_XOR:
510     return C1 ^ C2;
511   case TargetOpcode::G_UDIV:
512     if (!C2.getBoolValue())
513       break;
514     return C1.udiv(C2);
515   case TargetOpcode::G_SDIV:
516     if (!C2.getBoolValue())
517       break;
518     return C1.sdiv(C2);
519   case TargetOpcode::G_UREM:
520     if (!C2.getBoolValue())
521       break;
522     return C1.urem(C2);
523   case TargetOpcode::G_SREM:
524     if (!C2.getBoolValue())
525       break;
526     return C1.srem(C2);
527   }
528 
529   return None;
530 }
531 
ConstantFoldFPBinOp(unsigned Opcode,const Register Op1,const Register Op2,const MachineRegisterInfo & MRI)532 Optional<APFloat> llvm::ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
533                                             const Register Op2,
534                                             const MachineRegisterInfo &MRI) {
535   const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI);
536   if (!Op2Cst)
537     return None;
538 
539   const ConstantFP *Op1Cst = getConstantFPVRegVal(Op1, MRI);
540   if (!Op1Cst)
541     return None;
542 
543   APFloat C1 = Op1Cst->getValueAPF();
544   const APFloat &C2 = Op2Cst->getValueAPF();
545   switch (Opcode) {
546   case TargetOpcode::G_FADD:
547     C1.add(C2, APFloat::rmNearestTiesToEven);
548     return C1;
549   case TargetOpcode::G_FSUB:
550     C1.subtract(C2, APFloat::rmNearestTiesToEven);
551     return C1;
552   case TargetOpcode::G_FMUL:
553     C1.multiply(C2, APFloat::rmNearestTiesToEven);
554     return C1;
555   case TargetOpcode::G_FDIV:
556     C1.divide(C2, APFloat::rmNearestTiesToEven);
557     return C1;
558   case TargetOpcode::G_FREM:
559     C1.mod(C2);
560     return C1;
561   case TargetOpcode::G_FCOPYSIGN:
562     C1.copySign(C2);
563     return C1;
564   case TargetOpcode::G_FMINNUM:
565     return minnum(C1, C2);
566   case TargetOpcode::G_FMAXNUM:
567     return maxnum(C1, C2);
568   case TargetOpcode::G_FMINIMUM:
569     return minimum(C1, C2);
570   case TargetOpcode::G_FMAXIMUM:
571     return maximum(C1, C2);
572   case TargetOpcode::G_FMINNUM_IEEE:
573   case TargetOpcode::G_FMAXNUM_IEEE:
574     // FIXME: These operations were unfortunately named. fminnum/fmaxnum do not
575     // follow the IEEE behavior for signaling nans and follow libm's fmin/fmax,
576     // and currently there isn't a nice wrapper in APFloat for the version with
577     // correct snan handling.
578     break;
579   default:
580     break;
581   }
582 
583   return None;
584 }
585 
isKnownNeverNaN(Register Val,const MachineRegisterInfo & MRI,bool SNaN)586 bool llvm::isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
587                            bool SNaN) {
588   const MachineInstr *DefMI = MRI.getVRegDef(Val);
589   if (!DefMI)
590     return false;
591 
592   const TargetMachine& TM = DefMI->getMF()->getTarget();
593   if (DefMI->getFlag(MachineInstr::FmNoNans) || TM.Options.NoNaNsFPMath)
594     return true;
595 
596   // If the value is a constant, we can obviously see if it is a NaN or not.
597   if (const ConstantFP *FPVal = getConstantFPVRegVal(Val, MRI)) {
598     return !FPVal->getValueAPF().isNaN() ||
599            (SNaN && !FPVal->getValueAPF().isSignaling());
600   }
601 
602   if (DefMI->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
603     for (const auto &Op : DefMI->uses())
604       if (!isKnownNeverNaN(Op.getReg(), MRI, SNaN))
605         return false;
606     return true;
607   }
608 
609   switch (DefMI->getOpcode()) {
610   default:
611     break;
612   case TargetOpcode::G_FMINNUM_IEEE:
613   case TargetOpcode::G_FMAXNUM_IEEE: {
614     if (SNaN)
615       return true;
616     // This can return a NaN if either operand is an sNaN, or if both operands
617     // are NaN.
618     return (isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI) &&
619             isKnownNeverSNaN(DefMI->getOperand(2).getReg(), MRI)) ||
620            (isKnownNeverSNaN(DefMI->getOperand(1).getReg(), MRI) &&
621             isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI));
622   }
623   case TargetOpcode::G_FMINNUM:
624   case TargetOpcode::G_FMAXNUM: {
625     // Only one needs to be known not-nan, since it will be returned if the
626     // other ends up being one.
627     return isKnownNeverNaN(DefMI->getOperand(1).getReg(), MRI, SNaN) ||
628            isKnownNeverNaN(DefMI->getOperand(2).getReg(), MRI, SNaN);
629   }
630   }
631 
632   if (SNaN) {
633     // FP operations quiet. For now, just handle the ones inserted during
634     // legalization.
635     switch (DefMI->getOpcode()) {
636     case TargetOpcode::G_FPEXT:
637     case TargetOpcode::G_FPTRUNC:
638     case TargetOpcode::G_FCANONICALIZE:
639       return true;
640     default:
641       return false;
642     }
643   }
644 
645   return false;
646 }
647 
inferAlignFromPtrInfo(MachineFunction & MF,const MachinePointerInfo & MPO)648 Align llvm::inferAlignFromPtrInfo(MachineFunction &MF,
649                                   const MachinePointerInfo &MPO) {
650   auto PSV = MPO.V.dyn_cast<const PseudoSourceValue *>();
651   if (auto FSPV = dyn_cast_or_null<FixedStackPseudoSourceValue>(PSV)) {
652     MachineFrameInfo &MFI = MF.getFrameInfo();
653     return commonAlignment(MFI.getObjectAlign(FSPV->getFrameIndex()),
654                            MPO.Offset);
655   }
656 
657   if (const Value *V = MPO.V.dyn_cast<const Value *>()) {
658     const Module *M = MF.getFunction().getParent();
659     return V->getPointerAlignment(M->getDataLayout());
660   }
661 
662   return Align(1);
663 }
664 
getFunctionLiveInPhysReg(MachineFunction & MF,const TargetInstrInfo & TII,MCRegister PhysReg,const TargetRegisterClass & RC,LLT RegTy)665 Register llvm::getFunctionLiveInPhysReg(MachineFunction &MF,
666                                         const TargetInstrInfo &TII,
667                                         MCRegister PhysReg,
668                                         const TargetRegisterClass &RC,
669                                         LLT RegTy) {
670   DebugLoc DL; // FIXME: Is no location the right choice?
671   MachineBasicBlock &EntryMBB = MF.front();
672   MachineRegisterInfo &MRI = MF.getRegInfo();
673   Register LiveIn = MRI.getLiveInVirtReg(PhysReg);
674   if (LiveIn) {
675     MachineInstr *Def = MRI.getVRegDef(LiveIn);
676     if (Def) {
677       // FIXME: Should the verifier check this is in the entry block?
678       assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block");
679       return LiveIn;
680     }
681 
682     // It's possible the incoming argument register and copy was added during
683     // lowering, but later deleted due to being/becoming dead. If this happens,
684     // re-insert the copy.
685   } else {
686     // The live in register was not present, so add it.
687     LiveIn = MF.addLiveIn(PhysReg, &RC);
688     if (RegTy.isValid())
689       MRI.setType(LiveIn, RegTy);
690   }
691 
692   BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn)
693     .addReg(PhysReg);
694   if (!EntryMBB.isLiveIn(PhysReg))
695     EntryMBB.addLiveIn(PhysReg);
696   return LiveIn;
697 }
698 
ConstantFoldExtOp(unsigned Opcode,const Register Op1,uint64_t Imm,const MachineRegisterInfo & MRI)699 Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const Register Op1,
700                                         uint64_t Imm,
701                                         const MachineRegisterInfo &MRI) {
702   auto MaybeOp1Cst = getIConstantVRegVal(Op1, MRI);
703   if (MaybeOp1Cst) {
704     switch (Opcode) {
705     default:
706       break;
707     case TargetOpcode::G_SEXT_INREG: {
708       LLT Ty = MRI.getType(Op1);
709       return MaybeOp1Cst->trunc(Imm).sext(Ty.getScalarSizeInBits());
710     }
711     }
712   }
713   return None;
714 }
715 
ConstantFoldIntToFloat(unsigned Opcode,LLT DstTy,Register Src,const MachineRegisterInfo & MRI)716 Optional<APFloat> llvm::ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy,
717                                                Register Src,
718                                                const MachineRegisterInfo &MRI) {
719   assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP);
720   if (auto MaybeSrcVal = getIConstantVRegVal(Src, MRI)) {
721     APFloat DstVal(getFltSemanticForLLT(DstTy));
722     DstVal.convertFromAPInt(*MaybeSrcVal, Opcode == TargetOpcode::G_SITOFP,
723                             APFloat::rmNearestTiesToEven);
724     return DstVal;
725   }
726   return None;
727 }
728 
729 Optional<SmallVector<unsigned>>
ConstantFoldCTLZ(Register Src,const MachineRegisterInfo & MRI)730 llvm::ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI) {
731   LLT Ty = MRI.getType(Src);
732   SmallVector<unsigned> FoldedCTLZs;
733   auto tryFoldScalar = [&](Register R) -> Optional<unsigned> {
734     auto MaybeCst = getIConstantVRegVal(R, MRI);
735     if (!MaybeCst)
736       return None;
737     return MaybeCst->countLeadingZeros();
738   };
739   if (Ty.isVector()) {
740     // Try to constant fold each element.
741     auto *BV = getOpcodeDef<GBuildVector>(Src, MRI);
742     if (!BV)
743       return None;
744     for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
745       if (auto MaybeFold = tryFoldScalar(BV->getSourceReg(SrcIdx))) {
746         FoldedCTLZs.emplace_back(*MaybeFold);
747         continue;
748       }
749       return None;
750     }
751     return FoldedCTLZs;
752   }
753   if (auto MaybeCst = tryFoldScalar(Src)) {
754     FoldedCTLZs.emplace_back(*MaybeCst);
755     return FoldedCTLZs;
756   }
757   return None;
758 }
759 
isKnownToBeAPowerOfTwo(Register Reg,const MachineRegisterInfo & MRI,GISelKnownBits * KB)760 bool llvm::isKnownToBeAPowerOfTwo(Register Reg, const MachineRegisterInfo &MRI,
761                                   GISelKnownBits *KB) {
762   Optional<DefinitionAndSourceRegister> DefSrcReg =
763       getDefSrcRegIgnoringCopies(Reg, MRI);
764   if (!DefSrcReg)
765     return false;
766 
767   const MachineInstr &MI = *DefSrcReg->MI;
768   const LLT Ty = MRI.getType(Reg);
769 
770   switch (MI.getOpcode()) {
771   case TargetOpcode::G_CONSTANT: {
772     unsigned BitWidth = Ty.getScalarSizeInBits();
773     const ConstantInt *CI = MI.getOperand(1).getCImm();
774     return CI->getValue().zextOrTrunc(BitWidth).isPowerOf2();
775   }
776   case TargetOpcode::G_SHL: {
777     // A left-shift of a constant one will have exactly one bit set because
778     // shifting the bit off the end is undefined.
779 
780     // TODO: Constant splat
781     if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
782       if (*ConstLHS == 1)
783         return true;
784     }
785 
786     break;
787   }
788   case TargetOpcode::G_LSHR: {
789     if (auto ConstLHS = getIConstantVRegVal(MI.getOperand(1).getReg(), MRI)) {
790       if (ConstLHS->isSignMask())
791         return true;
792     }
793 
794     break;
795   }
796   case TargetOpcode::G_BUILD_VECTOR: {
797     // TODO: Probably should have a recursion depth guard since you could have
798     // bitcasted vector elements.
799     for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) {
800       if (!isKnownToBeAPowerOfTwo(MI.getOperand(I).getReg(), MRI, KB))
801         return false;
802     }
803 
804     return true;
805   }
806   case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
807     // Only handle constants since we would need to know if number of leading
808     // zeros is greater than the truncation amount.
809     const unsigned BitWidth = Ty.getScalarSizeInBits();
810     for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) {
811       auto Const = getIConstantVRegVal(MI.getOperand(I).getReg(), MRI);
812       if (!Const || !Const->zextOrTrunc(BitWidth).isPowerOf2())
813         return false;
814     }
815 
816     return true;
817   }
818   default:
819     break;
820   }
821 
822   if (!KB)
823     return false;
824 
825   // More could be done here, though the above checks are enough
826   // to handle some common cases.
827 
828   // Fall back to computeKnownBits to catch other known cases.
829   KnownBits Known = KB->getKnownBits(Reg);
830   return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
831 }
832 
getSelectionDAGFallbackAnalysisUsage(AnalysisUsage & AU)833 void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) {
834   AU.addPreserved<StackProtector>();
835 }
836 
getLCMSize(unsigned OrigSize,unsigned TargetSize)837 static unsigned getLCMSize(unsigned OrigSize, unsigned TargetSize) {
838   unsigned Mul = OrigSize * TargetSize;
839   unsigned GCDSize = greatestCommonDivisor(OrigSize, TargetSize);
840   return Mul / GCDSize;
841 }
842 
getLCMType(LLT OrigTy,LLT TargetTy)843 LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) {
844   const unsigned OrigSize = OrigTy.getSizeInBits();
845   const unsigned TargetSize = TargetTy.getSizeInBits();
846 
847   if (OrigSize == TargetSize)
848     return OrigTy;
849 
850   if (OrigTy.isVector()) {
851     const LLT OrigElt = OrigTy.getElementType();
852 
853     if (TargetTy.isVector()) {
854       const LLT TargetElt = TargetTy.getElementType();
855 
856       if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
857         int GCDElts = greatestCommonDivisor(OrigTy.getNumElements(),
858                                             TargetTy.getNumElements());
859         // Prefer the original element type.
860         ElementCount Mul = OrigTy.getElementCount() * TargetTy.getNumElements();
861         return LLT::vector(Mul.divideCoefficientBy(GCDElts),
862                            OrigTy.getElementType());
863       }
864     } else {
865       if (OrigElt.getSizeInBits() == TargetSize)
866         return OrigTy;
867     }
868 
869     unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
870     return LLT::fixed_vector(LCMSize / OrigElt.getSizeInBits(), OrigElt);
871   }
872 
873   if (TargetTy.isVector()) {
874     unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
875     return LLT::fixed_vector(LCMSize / OrigSize, OrigTy);
876   }
877 
878   unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
879 
880   // Preserve pointer types.
881   if (LCMSize == OrigSize)
882     return OrigTy;
883   if (LCMSize == TargetSize)
884     return TargetTy;
885 
886   return LLT::scalar(LCMSize);
887 }
888 
getGCDType(LLT OrigTy,LLT TargetTy)889 LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
890   const unsigned OrigSize = OrigTy.getSizeInBits();
891   const unsigned TargetSize = TargetTy.getSizeInBits();
892 
893   if (OrigSize == TargetSize)
894     return OrigTy;
895 
896   if (OrigTy.isVector()) {
897     LLT OrigElt = OrigTy.getElementType();
898     if (TargetTy.isVector()) {
899       LLT TargetElt = TargetTy.getElementType();
900       if (OrigElt.getSizeInBits() == TargetElt.getSizeInBits()) {
901         int GCD = greatestCommonDivisor(OrigTy.getNumElements(),
902                                         TargetTy.getNumElements());
903         return LLT::scalarOrVector(ElementCount::getFixed(GCD), OrigElt);
904       }
905     } else {
906       // If the source is a vector of pointers, return a pointer element.
907       if (OrigElt.getSizeInBits() == TargetSize)
908         return OrigElt;
909     }
910 
911     unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize);
912     if (GCD == OrigElt.getSizeInBits())
913       return OrigElt;
914 
915     // If we can't produce the original element type, we have to use a smaller
916     // scalar.
917     if (GCD < OrigElt.getSizeInBits())
918       return LLT::scalar(GCD);
919     return LLT::fixed_vector(GCD / OrigElt.getSizeInBits(), OrigElt);
920   }
921 
922   if (TargetTy.isVector()) {
923     // Try to preserve the original element type.
924     LLT TargetElt = TargetTy.getElementType();
925     if (TargetElt.getSizeInBits() == OrigSize)
926       return OrigTy;
927   }
928 
929   unsigned GCD = greatestCommonDivisor(OrigSize, TargetSize);
930   return LLT::scalar(GCD);
931 }
932 
getSplatIndex(MachineInstr & MI)933 Optional<int> llvm::getSplatIndex(MachineInstr &MI) {
934   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
935          "Only G_SHUFFLE_VECTOR can have a splat index!");
936   ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
937   auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; });
938 
939   // If all elements are undefined, this shuffle can be considered a splat.
940   // Return 0 for better potential for callers to simplify.
941   if (FirstDefinedIdx == Mask.end())
942     return 0;
943 
944   // Make sure all remaining elements are either undef or the same
945   // as the first non-undef value.
946   int SplatValue = *FirstDefinedIdx;
947   if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()),
948              [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; }))
949     return None;
950 
951   return SplatValue;
952 }
953 
isBuildVectorOp(unsigned Opcode)954 static bool isBuildVectorOp(unsigned Opcode) {
955   return Opcode == TargetOpcode::G_BUILD_VECTOR ||
956          Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;
957 }
958 
959 namespace {
960 
getAnyConstantSplat(Register VReg,const MachineRegisterInfo & MRI,bool AllowUndef)961 Optional<ValueAndVReg> getAnyConstantSplat(Register VReg,
962                                            const MachineRegisterInfo &MRI,
963                                            bool AllowUndef) {
964   MachineInstr *MI = getDefIgnoringCopies(VReg, MRI);
965   if (!MI)
966     return None;
967 
968   if (!isBuildVectorOp(MI->getOpcode()))
969     return None;
970 
971   Optional<ValueAndVReg> SplatValAndReg = None;
972   for (MachineOperand &Op : MI->uses()) {
973     Register Element = Op.getReg();
974     auto ElementValAndReg =
975         getAnyConstantVRegValWithLookThrough(Element, MRI, true, true);
976 
977     // If AllowUndef, treat undef as value that will result in a constant splat.
978     if (!ElementValAndReg) {
979       if (AllowUndef && isa<GImplicitDef>(MRI.getVRegDef(Element)))
980         continue;
981       return None;
982     }
983 
984     // Record splat value
985     if (!SplatValAndReg)
986       SplatValAndReg = ElementValAndReg;
987 
988     // Different constant then the one already recorded, not a constant splat.
989     if (SplatValAndReg->Value != ElementValAndReg->Value)
990       return None;
991   }
992 
993   return SplatValAndReg;
994 }
995 
isBuildVectorConstantSplat(const MachineInstr & MI,const MachineRegisterInfo & MRI,int64_t SplatValue,bool AllowUndef)996 bool isBuildVectorConstantSplat(const MachineInstr &MI,
997                                 const MachineRegisterInfo &MRI,
998                                 int64_t SplatValue, bool AllowUndef) {
999   if (auto SplatValAndReg =
1000           getAnyConstantSplat(MI.getOperand(0).getReg(), MRI, AllowUndef))
1001     return mi_match(SplatValAndReg->VReg, MRI, m_SpecificICst(SplatValue));
1002   return false;
1003 }
1004 
1005 } // end anonymous namespace
1006 
1007 Optional<int64_t>
getBuildVectorConstantSplat(const MachineInstr & MI,const MachineRegisterInfo & MRI)1008 llvm::getBuildVectorConstantSplat(const MachineInstr &MI,
1009                                   const MachineRegisterInfo &MRI) {
1010   if (auto SplatValAndReg =
1011           getAnyConstantSplat(MI.getOperand(0).getReg(), MRI, false))
1012     return getIConstantVRegSExtVal(SplatValAndReg->VReg, MRI);
1013   return None;
1014 }
1015 
getFConstantSplat(Register VReg,const MachineRegisterInfo & MRI,bool AllowUndef)1016 Optional<FPValueAndVReg> llvm::getFConstantSplat(Register VReg,
1017                                                  const MachineRegisterInfo &MRI,
1018                                                  bool AllowUndef) {
1019   if (auto SplatValAndReg = getAnyConstantSplat(VReg, MRI, AllowUndef))
1020     return getFConstantVRegValWithLookThrough(SplatValAndReg->VReg, MRI);
1021   return None;
1022 }
1023 
isBuildVectorAllZeros(const MachineInstr & MI,const MachineRegisterInfo & MRI,bool AllowUndef)1024 bool llvm::isBuildVectorAllZeros(const MachineInstr &MI,
1025                                  const MachineRegisterInfo &MRI,
1026                                  bool AllowUndef) {
1027   return isBuildVectorConstantSplat(MI, MRI, 0, AllowUndef);
1028 }
1029 
isBuildVectorAllOnes(const MachineInstr & MI,const MachineRegisterInfo & MRI,bool AllowUndef)1030 bool llvm::isBuildVectorAllOnes(const MachineInstr &MI,
1031                                 const MachineRegisterInfo &MRI,
1032                                 bool AllowUndef) {
1033   return isBuildVectorConstantSplat(MI, MRI, -1, AllowUndef);
1034 }
1035 
getVectorSplat(const MachineInstr & MI,const MachineRegisterInfo & MRI)1036 Optional<RegOrConstant> llvm::getVectorSplat(const MachineInstr &MI,
1037                                              const MachineRegisterInfo &MRI) {
1038   unsigned Opc = MI.getOpcode();
1039   if (!isBuildVectorOp(Opc))
1040     return None;
1041   if (auto Splat = getBuildVectorConstantSplat(MI, MRI))
1042     return RegOrConstant(*Splat);
1043   auto Reg = MI.getOperand(1).getReg();
1044   if (any_of(make_range(MI.operands_begin() + 2, MI.operands_end()),
1045              [&Reg](const MachineOperand &Op) { return Op.getReg() != Reg; }))
1046     return None;
1047   return RegOrConstant(Reg);
1048 }
1049 
isConstantOrConstantVector(MachineInstr & MI,const MachineRegisterInfo & MRI)1050 bool llvm::isConstantOrConstantVector(MachineInstr &MI,
1051                                       const MachineRegisterInfo &MRI) {
1052   Register Def = MI.getOperand(0).getReg();
1053   if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1054     return true;
1055   GBuildVector *BV = dyn_cast<GBuildVector>(&MI);
1056   if (!BV)
1057     return false;
1058   for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
1059     if (getIConstantVRegValWithLookThrough(BV->getSourceReg(SrcIdx), MRI) ||
1060         getOpcodeDef<GImplicitDef>(BV->getSourceReg(SrcIdx), MRI))
1061       continue;
1062     return false;
1063   }
1064   return true;
1065 }
1066 
1067 Optional<APInt>
isConstantOrConstantSplatVector(MachineInstr & MI,const MachineRegisterInfo & MRI)1068 llvm::isConstantOrConstantSplatVector(MachineInstr &MI,
1069                                       const MachineRegisterInfo &MRI) {
1070   Register Def = MI.getOperand(0).getReg();
1071   if (auto C = getIConstantVRegValWithLookThrough(Def, MRI))
1072     return C->Value;
1073   auto MaybeCst = getBuildVectorConstantSplat(MI, MRI);
1074   if (!MaybeCst)
1075     return None;
1076   const unsigned ScalarSize = MRI.getType(Def).getScalarSizeInBits();
1077   return APInt(ScalarSize, *MaybeCst, true);
1078 }
1079 
matchUnaryPredicate(const MachineRegisterInfo & MRI,Register Reg,std::function<bool (const Constant * ConstVal)> Match,bool AllowUndefs)1080 bool llvm::matchUnaryPredicate(
1081     const MachineRegisterInfo &MRI, Register Reg,
1082     std::function<bool(const Constant *ConstVal)> Match, bool AllowUndefs) {
1083 
1084   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
1085   if (AllowUndefs && Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1086     return Match(nullptr);
1087 
1088   // TODO: Also handle fconstant
1089   if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
1090     return Match(Def->getOperand(1).getCImm());
1091 
1092   if (Def->getOpcode() != TargetOpcode::G_BUILD_VECTOR)
1093     return false;
1094 
1095   for (unsigned I = 1, E = Def->getNumOperands(); I != E; ++I) {
1096     Register SrcElt = Def->getOperand(I).getReg();
1097     const MachineInstr *SrcDef = getDefIgnoringCopies(SrcElt, MRI);
1098     if (AllowUndefs && SrcDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF) {
1099       if (!Match(nullptr))
1100         return false;
1101       continue;
1102     }
1103 
1104     if (SrcDef->getOpcode() != TargetOpcode::G_CONSTANT ||
1105         !Match(SrcDef->getOperand(1).getCImm()))
1106       return false;
1107   }
1108 
1109   return true;
1110 }
1111 
isConstTrueVal(const TargetLowering & TLI,int64_t Val,bool IsVector,bool IsFP)1112 bool llvm::isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
1113                           bool IsFP) {
1114   switch (TLI.getBooleanContents(IsVector, IsFP)) {
1115   case TargetLowering::UndefinedBooleanContent:
1116     return Val & 0x1;
1117   case TargetLowering::ZeroOrOneBooleanContent:
1118     return Val == 1;
1119   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1120     return Val == -1;
1121   }
1122   llvm_unreachable("Invalid boolean contents");
1123 }
1124 
getICmpTrueVal(const TargetLowering & TLI,bool IsVector,bool IsFP)1125 int64_t llvm::getICmpTrueVal(const TargetLowering &TLI, bool IsVector,
1126                              bool IsFP) {
1127   switch (TLI.getBooleanContents(IsVector, IsFP)) {
1128   case TargetLowering::UndefinedBooleanContent:
1129   case TargetLowering::ZeroOrOneBooleanContent:
1130     return 1;
1131   case TargetLowering::ZeroOrNegativeOneBooleanContent:
1132     return -1;
1133   }
1134   llvm_unreachable("Invalid boolean contents");
1135 }
1136 
shouldOptForSize(const MachineBasicBlock & MBB,ProfileSummaryInfo * PSI,BlockFrequencyInfo * BFI)1137 bool llvm::shouldOptForSize(const MachineBasicBlock &MBB,
1138                             ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
1139   const auto &F = MBB.getParent()->getFunction();
1140   return F.hasOptSize() || F.hasMinSize() ||
1141          llvm::shouldOptimizeForSize(MBB.getBasicBlock(), PSI, BFI);
1142 }
1143 
1144 /// These artifacts generally don't have any debug users because they don't
1145 /// directly originate from IR instructions, but instead usually from
1146 /// legalization. Avoiding checking for debug users improves compile time.
1147 /// Note that truncates or extends aren't included because they have IR
1148 /// counterparts which can have debug users after translation.
shouldSkipDbgValueFor(MachineInstr & MI)1149 static bool shouldSkipDbgValueFor(MachineInstr &MI) {
1150   switch (MI.getOpcode()) {
1151   case TargetOpcode::G_UNMERGE_VALUES:
1152   case TargetOpcode::G_MERGE_VALUES:
1153   case TargetOpcode::G_CONCAT_VECTORS:
1154   case TargetOpcode::G_BUILD_VECTOR:
1155   case TargetOpcode::G_EXTRACT:
1156   case TargetOpcode::G_INSERT:
1157     return true;
1158   default:
1159     return false;
1160   }
1161 }
1162 
saveUsesAndErase(MachineInstr & MI,MachineRegisterInfo & MRI,LostDebugLocObserver * LocObserver,SmallInstListTy & DeadInstChain)1163 void llvm::saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI,
1164                             LostDebugLocObserver *LocObserver,
1165                             SmallInstListTy &DeadInstChain) {
1166   for (MachineOperand &Op : MI.uses()) {
1167     if (Op.isReg() && Op.getReg().isVirtual())
1168       DeadInstChain.insert(MRI.getVRegDef(Op.getReg()));
1169   }
1170   LLVM_DEBUG(dbgs() << MI << "Is dead; erasing.\n");
1171   DeadInstChain.remove(&MI);
1172   if (shouldSkipDbgValueFor(MI))
1173     MI.eraseFromParent();
1174   else
1175     MI.eraseFromParentAndMarkDBGValuesForRemoval();
1176   if (LocObserver)
1177     LocObserver->checkpoint(false);
1178 }
1179 
eraseInstrs(ArrayRef<MachineInstr * > DeadInstrs,MachineRegisterInfo & MRI,LostDebugLocObserver * LocObserver)1180 void llvm::eraseInstrs(ArrayRef<MachineInstr *> DeadInstrs,
1181                        MachineRegisterInfo &MRI,
1182                        LostDebugLocObserver *LocObserver) {
1183   SmallInstListTy DeadInstChain;
1184   for (MachineInstr *MI : DeadInstrs)
1185     saveUsesAndErase(*MI, MRI, LocObserver, DeadInstChain);
1186 
1187   while (!DeadInstChain.empty()) {
1188     MachineInstr *Inst = DeadInstChain.pop_back_val();
1189     if (!isTriviallyDead(*Inst, MRI))
1190       continue;
1191     saveUsesAndErase(*Inst, MRI, LocObserver, DeadInstChain);
1192   }
1193 }
1194 
eraseInstr(MachineInstr & MI,MachineRegisterInfo & MRI,LostDebugLocObserver * LocObserver)1195 void llvm::eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI,
1196                       LostDebugLocObserver *LocObserver) {
1197   return eraseInstrs({&MI}, MRI, LocObserver);
1198 }
1199