1 //==-- llvm/CodeGen/GlobalISel/Utils.h ---------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file This file declares the API of helper functions used throughout the
10 /// GlobalISel pipeline.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_CODEGEN_GLOBALISEL_UTILS_H
15 #define LLVM_CODEGEN_GLOBALISEL_UTILS_H
16 
17 #include "GISelWorkList.h"
18 #include "llvm/ADT/APFloat.h"
19 #include "llvm/ADT/StringRef.h"
20 #include "llvm/CodeGen/LowLevelType.h"
21 #include "llvm/CodeGen/Register.h"
22 #include "llvm/IR/DebugLoc.h"
23 #include "llvm/Support/Alignment.h"
24 #include "llvm/Support/Casting.h"
25 #include <cstdint>
26 
27 namespace llvm {
28 
29 class AnalysisUsage;
30 class LostDebugLocObserver;
31 class MachineBasicBlock;
32 class BlockFrequencyInfo;
33 class GISelKnownBits;
34 class MachineFunction;
35 class MachineInstr;
36 class MachineIRBuilder;
37 class MachineOperand;
38 class MachineOptimizationRemarkEmitter;
39 class MachineOptimizationRemarkMissed;
40 struct MachinePointerInfo;
41 class MachineRegisterInfo;
42 class MCInstrDesc;
43 class ProfileSummaryInfo;
44 class RegisterBankInfo;
45 class TargetInstrInfo;
46 class TargetLowering;
47 class TargetPassConfig;
48 class TargetRegisterInfo;
49 class TargetRegisterClass;
50 class ConstantFP;
51 class APFloat;
52 
53 // Convenience macros for dealing with vector reduction opcodes.
54 #define GISEL_VECREDUCE_CASES_ALL                                              \
55   case TargetOpcode::G_VECREDUCE_SEQ_FADD:                                     \
56   case TargetOpcode::G_VECREDUCE_SEQ_FMUL:                                     \
57   case TargetOpcode::G_VECREDUCE_FADD:                                         \
58   case TargetOpcode::G_VECREDUCE_FMUL:                                         \
59   case TargetOpcode::G_VECREDUCE_FMAX:                                         \
60   case TargetOpcode::G_VECREDUCE_FMIN:                                         \
61   case TargetOpcode::G_VECREDUCE_FMAXIMUM:                                     \
62   case TargetOpcode::G_VECREDUCE_FMINIMUM:                                     \
63   case TargetOpcode::G_VECREDUCE_ADD:                                          \
64   case TargetOpcode::G_VECREDUCE_MUL:                                          \
65   case TargetOpcode::G_VECREDUCE_AND:                                          \
66   case TargetOpcode::G_VECREDUCE_OR:                                           \
67   case TargetOpcode::G_VECREDUCE_XOR:                                          \
68   case TargetOpcode::G_VECREDUCE_SMAX:                                         \
69   case TargetOpcode::G_VECREDUCE_SMIN:                                         \
70   case TargetOpcode::G_VECREDUCE_UMAX:                                         \
71   case TargetOpcode::G_VECREDUCE_UMIN:
72 
73 #define GISEL_VECREDUCE_CASES_NONSEQ                                           \
74   case TargetOpcode::G_VECREDUCE_FADD:                                         \
75   case TargetOpcode::G_VECREDUCE_FMUL:                                         \
76   case TargetOpcode::G_VECREDUCE_FMAX:                                         \
77   case TargetOpcode::G_VECREDUCE_FMIN:                                         \
78   case TargetOpcode::G_VECREDUCE_FMAXIMUM:                                     \
79   case TargetOpcode::G_VECREDUCE_FMINIMUM:                                     \
80   case TargetOpcode::G_VECREDUCE_ADD:                                          \
81   case TargetOpcode::G_VECREDUCE_MUL:                                          \
82   case TargetOpcode::G_VECREDUCE_AND:                                          \
83   case TargetOpcode::G_VECREDUCE_OR:                                           \
84   case TargetOpcode::G_VECREDUCE_XOR:                                          \
85   case TargetOpcode::G_VECREDUCE_SMAX:                                         \
86   case TargetOpcode::G_VECREDUCE_SMIN:                                         \
87   case TargetOpcode::G_VECREDUCE_UMAX:                                         \
88   case TargetOpcode::G_VECREDUCE_UMIN:
89 
90 /// Try to constrain Reg to the specified register class. If this fails,
91 /// create a new virtual register in the correct class.
92 ///
93 /// \return The virtual register constrained to the right register class.
94 Register constrainRegToClass(MachineRegisterInfo &MRI,
95                              const TargetInstrInfo &TII,
96                              const RegisterBankInfo &RBI, Register Reg,
97                              const TargetRegisterClass &RegClass);
98 
99 /// Constrain the Register operand OpIdx, so that it is now constrained to the
100 /// TargetRegisterClass passed as an argument (RegClass).
101 /// If this fails, create a new virtual register in the correct class and insert
102 /// a COPY before \p InsertPt if it is a use or after if it is a definition.
103 /// In both cases, the function also updates the register of RegMo. The debug
104 /// location of \p InsertPt is used for the new copy.
105 ///
106 /// \return The virtual register constrained to the right register class.
107 Register constrainOperandRegClass(const MachineFunction &MF,
108                                   const TargetRegisterInfo &TRI,
109                                   MachineRegisterInfo &MRI,
110                                   const TargetInstrInfo &TII,
111                                   const RegisterBankInfo &RBI,
112                                   MachineInstr &InsertPt,
113                                   const TargetRegisterClass &RegClass,
114                                   MachineOperand &RegMO);
115 
116 /// Try to constrain Reg so that it is usable by argument OpIdx of the provided
117 /// MCInstrDesc \p II. If this fails, create a new virtual register in the
118 /// correct class and insert a COPY before \p InsertPt if it is a use or after
119 /// if it is a definition. In both cases, the function also updates the register
120 /// of RegMo.
121 /// This is equivalent to constrainOperandRegClass(..., RegClass, ...)
122 /// with RegClass obtained from the MCInstrDesc. The debug location of \p
123 /// InsertPt is used for the new copy.
124 ///
125 /// \return The virtual register constrained to the right register class.
126 Register constrainOperandRegClass(const MachineFunction &MF,
127                                   const TargetRegisterInfo &TRI,
128                                   MachineRegisterInfo &MRI,
129                                   const TargetInstrInfo &TII,
130                                   const RegisterBankInfo &RBI,
131                                   MachineInstr &InsertPt, const MCInstrDesc &II,
132                                   MachineOperand &RegMO, unsigned OpIdx);
133 
134 /// Mutate the newly-selected instruction \p I to constrain its (possibly
135 /// generic) virtual register operands to the instruction's register class.
136 /// This could involve inserting COPYs before (for uses) or after (for defs).
137 /// This requires the number of operands to match the instruction description.
138 /// \returns whether operand regclass constraining succeeded.
139 ///
140 // FIXME: Not all instructions have the same number of operands. We should
141 // probably expose a constrain helper per operand and let the target selector
142 // constrain individual registers, like fast-isel.
143 bool constrainSelectedInstRegOperands(MachineInstr &I,
144                                       const TargetInstrInfo &TII,
145                                       const TargetRegisterInfo &TRI,
146                                       const RegisterBankInfo &RBI);
147 
148 /// Check if DstReg can be replaced with SrcReg depending on the register
149 /// constraints.
150 bool canReplaceReg(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI);
151 
152 /// Check whether an instruction \p MI is dead: it only defines dead virtual
153 /// registers, and doesn't have other side effects.
154 bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI);
155 
156 /// Report an ISel error as a missed optimization remark to the LLVMContext's
157 /// diagnostic stream.  Set the FailedISel MachineFunction property.
158 void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
159                         MachineOptimizationRemarkEmitter &MORE,
160                         MachineOptimizationRemarkMissed &R);
161 
162 void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
163                         MachineOptimizationRemarkEmitter &MORE,
164                         const char *PassName, StringRef Msg,
165                         const MachineInstr &MI);
166 
167 /// Report an ISel warning as a missed optimization remark to the LLVMContext's
168 /// diagnostic stream.
169 void reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
170                         MachineOptimizationRemarkEmitter &MORE,
171                         MachineOptimizationRemarkMissed &R);
172 
173 /// If \p VReg is defined by a G_CONSTANT, return the corresponding value.
174 std::optional<APInt> getIConstantVRegVal(Register VReg,
175                                          const MachineRegisterInfo &MRI);
176 
177 /// If \p VReg is defined by a G_CONSTANT fits in int64_t returns it.
178 std::optional<int64_t> getIConstantVRegSExtVal(Register VReg,
179                                                const MachineRegisterInfo &MRI);
180 
181 /// Simple struct used to hold a constant integer value and a virtual
182 /// register.
183 struct ValueAndVReg {
184   APInt Value;
185   Register VReg;
186 };
187 
188 /// If \p VReg is defined by a statically evaluable chain of instructions rooted
189 /// on a G_CONSTANT returns its APInt value and def register.
190 std::optional<ValueAndVReg>
191 getIConstantVRegValWithLookThrough(Register VReg,
192                                    const MachineRegisterInfo &MRI,
193                                    bool LookThroughInstrs = true);
194 
195 /// If \p VReg is defined by a statically evaluable chain of instructions rooted
196 /// on a G_CONSTANT or G_FCONSTANT returns its value as APInt and def register.
197 std::optional<ValueAndVReg> getAnyConstantVRegValWithLookThrough(
198     Register VReg, const MachineRegisterInfo &MRI,
199     bool LookThroughInstrs = true, bool LookThroughAnyExt = false);
200 
201 struct FPValueAndVReg {
202   APFloat Value;
203   Register VReg;
204 };
205 
206 /// If \p VReg is defined by a statically evaluable chain of instructions rooted
207 /// on a G_FCONSTANT returns its APFloat value and def register.
208 std::optional<FPValueAndVReg>
209 getFConstantVRegValWithLookThrough(Register VReg,
210                                    const MachineRegisterInfo &MRI,
211                                    bool LookThroughInstrs = true);
212 
213 const ConstantFP* getConstantFPVRegVal(Register VReg,
214                                        const MachineRegisterInfo &MRI);
215 
216 /// See if Reg is defined by an single def instruction that is
217 /// Opcode. Also try to do trivial folding if it's a COPY with
218 /// same types. Returns null otherwise.
219 MachineInstr *getOpcodeDef(unsigned Opcode, Register Reg,
220                            const MachineRegisterInfo &MRI);
221 
222 /// Simple struct used to hold a Register value and the instruction which
223 /// defines it.
224 struct DefinitionAndSourceRegister {
225   MachineInstr *MI;
226   Register Reg;
227 };
228 
229 /// Find the def instruction for \p Reg, and underlying value Register folding
230 /// away any copies.
231 ///
232 /// Also walks through hints such as G_ASSERT_ZEXT.
233 std::optional<DefinitionAndSourceRegister>
234 getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI);
235 
236 /// Find the def instruction for \p Reg, folding away any trivial copies. May
237 /// return nullptr if \p Reg is not a generic virtual register.
238 ///
239 /// Also walks through hints such as G_ASSERT_ZEXT.
240 MachineInstr *getDefIgnoringCopies(Register Reg,
241                                    const MachineRegisterInfo &MRI);
242 
243 /// Find the source register for \p Reg, folding away any trivial copies. It
244 /// will be an output register of the instruction that getDefIgnoringCopies
245 /// returns. May return an invalid register if \p Reg is not a generic virtual
246 /// register.
247 ///
248 /// Also walks through hints such as G_ASSERT_ZEXT.
249 Register getSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI);
250 
251 /// Helper function to split a wide generic register into bitwise blocks with
252 /// the given Type (which implies the number of blocks needed). The generic
253 /// registers created are appended to Ops, starting at bit 0 of Reg.
254 void extractParts(Register Reg, LLT Ty, int NumParts,
255                   SmallVectorImpl<Register> &VRegs,
256                   MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI);
257 
258 /// Version which handles irregular splits.
259 bool extractParts(Register Reg, LLT RegTy, LLT MainTy, LLT &LeftoverTy,
260                   SmallVectorImpl<Register> &VRegs,
261                   SmallVectorImpl<Register> &LeftoverVRegs,
262                   MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI);
263 
264 /// Version which handles irregular sub-vector splits.
265 void extractVectorParts(Register Reg, unsigned NumElts,
266                         SmallVectorImpl<Register> &VRegs,
267                         MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI);
268 
269 // Templated variant of getOpcodeDef returning a MachineInstr derived T.
270 /// See if Reg is defined by an single def instruction of type T
271 /// Also try to do trivial folding if it's a COPY with
272 /// same types. Returns null otherwise.
273 template <class T>
getOpcodeDef(Register Reg,const MachineRegisterInfo & MRI)274 T *getOpcodeDef(Register Reg, const MachineRegisterInfo &MRI) {
275   MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI);
276   return dyn_cast_or_null<T>(DefMI);
277 }
278 
279 /// Returns an APFloat from Val converted to the appropriate size.
280 APFloat getAPFloatFromSize(double Val, unsigned Size);
281 
282 /// Modify analysis usage so it preserves passes required for the SelectionDAG
283 /// fallback.
284 void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU);
285 
286 std::optional<APInt> ConstantFoldBinOp(unsigned Opcode, const Register Op1,
287                                        const Register Op2,
288                                        const MachineRegisterInfo &MRI);
289 std::optional<APFloat> ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
290                                            const Register Op2,
291                                            const MachineRegisterInfo &MRI);
292 
293 /// Tries to constant fold a vector binop with sources \p Op1 and \p Op2.
294 /// Returns an empty vector on failure.
295 SmallVector<APInt> ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
296                                            const Register Op2,
297                                            const MachineRegisterInfo &MRI);
298 
299 std::optional<APInt> ConstantFoldCastOp(unsigned Opcode, LLT DstTy,
300                                         const Register Op0,
301                                         const MachineRegisterInfo &MRI);
302 
303 std::optional<APInt> ConstantFoldExtOp(unsigned Opcode, const Register Op1,
304                                        uint64_t Imm,
305                                        const MachineRegisterInfo &MRI);
306 
307 std::optional<APFloat> ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy,
308                                               Register Src,
309                                               const MachineRegisterInfo &MRI);
310 
311 /// Tries to constant fold a G_CTLZ operation on \p Src. If \p Src is a vector
312 /// then it tries to do an element-wise constant fold.
313 std::optional<SmallVector<unsigned>>
314 ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI);
315 
316 /// Test if the given value is known to have exactly one bit set. This differs
317 /// from computeKnownBits in that it doesn't necessarily determine which bit is
318 /// set.
319 bool isKnownToBeAPowerOfTwo(Register Val, const MachineRegisterInfo &MRI,
320                             GISelKnownBits *KnownBits = nullptr);
321 
322 /// Returns true if \p Val can be assumed to never be a NaN. If \p SNaN is true,
323 /// this returns if \p Val can be assumed to never be a signaling NaN.
324 bool isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
325                      bool SNaN = false);
326 
327 /// Returns true if \p Val can be assumed to never be a signaling NaN.
isKnownNeverSNaN(Register Val,const MachineRegisterInfo & MRI)328 inline bool isKnownNeverSNaN(Register Val, const MachineRegisterInfo &MRI) {
329   return isKnownNeverNaN(Val, MRI, true);
330 }
331 
332 Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO);
333 
334 /// Return a virtual register corresponding to the incoming argument register \p
335 /// PhysReg. This register is expected to have class \p RC, and optional type \p
336 /// RegTy. This assumes all references to the register will use the same type.
337 ///
338 /// If there is an existing live-in argument register, it will be returned.
339 /// This will also ensure there is a valid copy
340 Register getFunctionLiveInPhysReg(MachineFunction &MF,
341                                   const TargetInstrInfo &TII,
342                                   MCRegister PhysReg,
343                                   const TargetRegisterClass &RC,
344                                   const DebugLoc &DL, LLT RegTy = LLT());
345 
346 /// Return the least common multiple type of \p OrigTy and \p TargetTy, by changing the
347 /// number of vector elements or scalar bitwidth. The intent is a
348 /// G_MERGE_VALUES, G_BUILD_VECTOR, or G_CONCAT_VECTORS can be constructed from
349 /// \p OrigTy elements, and unmerged into \p TargetTy
350 LLVM_READNONE
351 LLT getLCMType(LLT OrigTy, LLT TargetTy);
352 
353 LLVM_READNONE
354 /// Return smallest type that covers both \p OrigTy and \p TargetTy and is
355 /// multiple of TargetTy.
356 LLT getCoverTy(LLT OrigTy, LLT TargetTy);
357 
358 /// Return a type where the total size is the greatest common divisor of \p
359 /// OrigTy and \p TargetTy. This will try to either change the number of vector
360 /// elements, or bitwidth of scalars. The intent is the result type can be used
361 /// as the result of a G_UNMERGE_VALUES from \p OrigTy, and then some
362 /// combination of G_MERGE_VALUES, G_BUILD_VECTOR and G_CONCAT_VECTORS (possibly
363 /// with intermediate casts) can re-form \p TargetTy.
364 ///
365 /// If these are vectors with different element types, this will try to produce
366 /// a vector with a compatible total size, but the element type of \p OrigTy. If
367 /// this can't be satisfied, this will produce a scalar smaller than the
368 /// original vector elements.
369 ///
370 /// In the worst case, this returns LLT::scalar(1)
371 LLVM_READNONE
372 LLT getGCDType(LLT OrigTy, LLT TargetTy);
373 
374 /// Represents a value which can be a Register or a constant.
375 ///
376 /// This is useful in situations where an instruction may have an interesting
377 /// register operand or interesting constant operand. For a concrete example,
378 /// \see getVectorSplat.
379 class RegOrConstant {
380   int64_t Cst;
381   Register Reg;
382   bool IsReg;
383 
384 public:
RegOrConstant(Register Reg)385   explicit RegOrConstant(Register Reg) : Reg(Reg), IsReg(true) {}
RegOrConstant(int64_t Cst)386   explicit RegOrConstant(int64_t Cst) : Cst(Cst), IsReg(false) {}
isReg()387   bool isReg() const { return IsReg; }
isCst()388   bool isCst() const { return !IsReg; }
getReg()389   Register getReg() const {
390     assert(isReg() && "Expected a register!");
391     return Reg;
392   }
getCst()393   int64_t getCst() const {
394     assert(isCst() && "Expected a constant!");
395     return Cst;
396   }
397 };
398 
399 /// \returns The splat index of a G_SHUFFLE_VECTOR \p MI when \p MI is a splat.
400 /// If \p MI is not a splat, returns std::nullopt.
401 std::optional<int> getSplatIndex(MachineInstr &MI);
402 
403 /// \returns the scalar integral splat value of \p Reg if possible.
404 std::optional<APInt> getIConstantSplatVal(const Register Reg,
405                                           const MachineRegisterInfo &MRI);
406 
407 /// \returns the scalar integral splat value defined by \p MI if possible.
408 std::optional<APInt> getIConstantSplatVal(const MachineInstr &MI,
409                                           const MachineRegisterInfo &MRI);
410 
411 /// \returns the scalar sign extended integral splat value of \p Reg if
412 /// possible.
413 std::optional<int64_t> getIConstantSplatSExtVal(const Register Reg,
414                                                 const MachineRegisterInfo &MRI);
415 
416 /// \returns the scalar sign extended integral splat value defined by \p MI if
417 /// possible.
418 std::optional<int64_t> getIConstantSplatSExtVal(const MachineInstr &MI,
419                                                 const MachineRegisterInfo &MRI);
420 
421 /// Returns a floating point scalar constant of a build vector splat if it
422 /// exists. When \p AllowUndef == true some elements can be undef but not all.
423 std::optional<FPValueAndVReg> getFConstantSplat(Register VReg,
424                                                 const MachineRegisterInfo &MRI,
425                                                 bool AllowUndef = true);
426 
427 /// Return true if the specified register is defined by G_BUILD_VECTOR or
428 /// G_BUILD_VECTOR_TRUNC where all of the elements are \p SplatValue or undef.
429 bool isBuildVectorConstantSplat(const Register Reg,
430                                 const MachineRegisterInfo &MRI,
431                                 int64_t SplatValue, bool AllowUndef);
432 
433 /// Return true if the specified instruction is a G_BUILD_VECTOR or
434 /// G_BUILD_VECTOR_TRUNC where all of the elements are \p SplatValue or undef.
435 bool isBuildVectorConstantSplat(const MachineInstr &MI,
436                                 const MachineRegisterInfo &MRI,
437                                 int64_t SplatValue, bool AllowUndef);
438 
439 /// Return true if the specified instruction is a G_BUILD_VECTOR or
440 /// G_BUILD_VECTOR_TRUNC where all of the elements are 0 or undef.
441 bool isBuildVectorAllZeros(const MachineInstr &MI,
442                            const MachineRegisterInfo &MRI,
443                            bool AllowUndef = false);
444 
445 /// Return true if the specified instruction is a G_BUILD_VECTOR or
446 /// G_BUILD_VECTOR_TRUNC where all of the elements are ~0 or undef.
447 bool isBuildVectorAllOnes(const MachineInstr &MI,
448                           const MachineRegisterInfo &MRI,
449                           bool AllowUndef = false);
450 
451 /// Return true if the specified instruction is known to be a constant, or a
452 /// vector of constants.
453 ///
454 /// If \p AllowFP is true, this will consider G_FCONSTANT in addition to
455 /// G_CONSTANT. If \p AllowOpaqueConstants is true, constant-like instructions
456 /// such as G_GLOBAL_VALUE will also be considered.
457 bool isConstantOrConstantVector(const MachineInstr &MI,
458                                 const MachineRegisterInfo &MRI,
459                                 bool AllowFP = true,
460                                 bool AllowOpaqueConstants = true);
461 
462 /// Return true if the value is a constant 0 integer or a splatted vector of a
463 /// constant 0 integer (with no undefs if \p AllowUndefs is false). This will
464 /// handle G_BUILD_VECTOR and G_BUILD_VECTOR_TRUNC as truncation is not an issue
465 /// for null values.
466 bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI,
467                        bool AllowUndefs = false);
468 
469 /// Return true if the value is a constant -1 integer or a splatted vector of a
470 /// constant -1 integer (with no undefs if \p AllowUndefs is false).
471 bool isAllOnesOrAllOnesSplat(const MachineInstr &MI,
472                              const MachineRegisterInfo &MRI,
473                              bool AllowUndefs = false);
474 
475 /// \returns a value when \p MI is a vector splat. The splat can be either a
476 /// Register or a constant.
477 ///
478 /// Examples:
479 ///
480 /// \code
481 ///   %reg = COPY $physreg
482 ///   %reg_splat = G_BUILD_VECTOR %reg, %reg, ..., %reg
483 /// \endcode
484 ///
485 /// If called on the G_BUILD_VECTOR above, this will return a RegOrConstant
486 /// containing %reg.
487 ///
488 /// \code
489 ///   %cst = G_CONSTANT iN 4
490 ///   %constant_splat = G_BUILD_VECTOR %cst, %cst, ..., %cst
491 /// \endcode
492 ///
493 /// In the above case, this will return a RegOrConstant containing 4.
494 std::optional<RegOrConstant> getVectorSplat(const MachineInstr &MI,
495                                             const MachineRegisterInfo &MRI);
496 
497 /// Determines if \p MI defines a constant integer or a build vector of
498 /// constant integers. Treats undef values as constants.
499 bool isConstantOrConstantVector(MachineInstr &MI,
500                                 const MachineRegisterInfo &MRI);
501 
502 /// Determines if \p MI defines a constant integer or a splat vector of
503 /// constant integers.
504 /// \returns the scalar constant or std::nullopt.
505 std::optional<APInt>
506 isConstantOrConstantSplatVector(MachineInstr &MI,
507                                 const MachineRegisterInfo &MRI);
508 
509 /// Attempt to match a unary predicate against a scalar/splat constant or every
510 /// element of a constant G_BUILD_VECTOR. If \p ConstVal is null, the source
511 /// value was undef.
512 bool matchUnaryPredicate(const MachineRegisterInfo &MRI, Register Reg,
513                          std::function<bool(const Constant *ConstVal)> Match,
514                          bool AllowUndefs = false);
515 
516 /// Returns true if given the TargetLowering's boolean contents information,
517 /// the value \p Val contains a true value.
518 bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
519                     bool IsFP);
520 /// \returns true if given the TargetLowering's boolean contents information,
521 /// the value \p Val contains a false value.
522 bool isConstFalseVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
523                     bool IsFP);
524 
525 /// Returns an integer representing true, as defined by the
526 /// TargetBooleanContents.
527 int64_t getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool IsFP);
528 
529 /// Returns true if the given block should be optimized for size.
530 bool shouldOptForSize(const MachineBasicBlock &MBB, ProfileSummaryInfo *PSI,
531                       BlockFrequencyInfo *BFI);
532 
533 using SmallInstListTy = GISelWorkList<4>;
534 void saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI,
535                       LostDebugLocObserver *LocObserver,
536                       SmallInstListTy &DeadInstChain);
537 void eraseInstrs(ArrayRef<MachineInstr *> DeadInstrs, MachineRegisterInfo &MRI,
538                  LostDebugLocObserver *LocObserver = nullptr);
539 void eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI,
540                 LostDebugLocObserver *LocObserver = nullptr);
541 
542 /// Assuming the instruction \p MI is going to be deleted, attempt to salvage
543 /// debug users of \p MI by writing the effect of \p MI in a DIExpression.
544 void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI);
545 
546 } // End namespace llvm.
547 #endif
548