1 //==-- llvm/CodeGen/GlobalISel/Utils.h ---------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file This file declares the API of helper functions used throughout the
10 /// GlobalISel pipeline.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #ifndef LLVM_CODEGEN_GLOBALISEL_UTILS_H
15 #define LLVM_CODEGEN_GLOBALISEL_UTILS_H
16
17 #include "llvm/ADT/StringRef.h"
18 #include "llvm/CodeGen/MachineBasicBlock.h"
19 #include "llvm/CodeGen/Register.h"
20 #include "llvm/Support/Alignment.h"
21 #include "llvm/Support/LowLevelTypeImpl.h"
22 #include <cstdint>
23
24 namespace llvm {
25
26 class AnalysisUsage;
27 class BlockFrequencyInfo;
28 class GISelKnownBits;
29 class MachineFunction;
30 class MachineInstr;
31 class MachineOperand;
32 class MachineOptimizationRemarkEmitter;
33 class MachineOptimizationRemarkMissed;
34 struct MachinePointerInfo;
35 class MachineRegisterInfo;
36 class MCInstrDesc;
37 class ProfileSummaryInfo;
38 class RegisterBankInfo;
39 class TargetInstrInfo;
40 class TargetLowering;
41 class TargetPassConfig;
42 class TargetRegisterInfo;
43 class TargetRegisterClass;
44 class ConstantInt;
45 class ConstantFP;
46 class APFloat;
47
48 // Convenience macros for dealing with vector reduction opcodes.
49 #define GISEL_VECREDUCE_CASES_ALL \
50 case TargetOpcode::G_VECREDUCE_SEQ_FADD: \
51 case TargetOpcode::G_VECREDUCE_SEQ_FMUL: \
52 case TargetOpcode::G_VECREDUCE_FADD: \
53 case TargetOpcode::G_VECREDUCE_FMUL: \
54 case TargetOpcode::G_VECREDUCE_FMAX: \
55 case TargetOpcode::G_VECREDUCE_FMIN: \
56 case TargetOpcode::G_VECREDUCE_ADD: \
57 case TargetOpcode::G_VECREDUCE_MUL: \
58 case TargetOpcode::G_VECREDUCE_AND: \
59 case TargetOpcode::G_VECREDUCE_OR: \
60 case TargetOpcode::G_VECREDUCE_XOR: \
61 case TargetOpcode::G_VECREDUCE_SMAX: \
62 case TargetOpcode::G_VECREDUCE_SMIN: \
63 case TargetOpcode::G_VECREDUCE_UMAX: \
64 case TargetOpcode::G_VECREDUCE_UMIN:
65
66 #define GISEL_VECREDUCE_CASES_NONSEQ \
67 case TargetOpcode::G_VECREDUCE_FADD: \
68 case TargetOpcode::G_VECREDUCE_FMUL: \
69 case TargetOpcode::G_VECREDUCE_FMAX: \
70 case TargetOpcode::G_VECREDUCE_FMIN: \
71 case TargetOpcode::G_VECREDUCE_ADD: \
72 case TargetOpcode::G_VECREDUCE_MUL: \
73 case TargetOpcode::G_VECREDUCE_AND: \
74 case TargetOpcode::G_VECREDUCE_OR: \
75 case TargetOpcode::G_VECREDUCE_XOR: \
76 case TargetOpcode::G_VECREDUCE_SMAX: \
77 case TargetOpcode::G_VECREDUCE_SMIN: \
78 case TargetOpcode::G_VECREDUCE_UMAX: \
79 case TargetOpcode::G_VECREDUCE_UMIN:
80
81 /// Try to constrain Reg to the specified register class. If this fails,
82 /// create a new virtual register in the correct class.
83 ///
84 /// \return The virtual register constrained to the right register class.
85 Register constrainRegToClass(MachineRegisterInfo &MRI,
86 const TargetInstrInfo &TII,
87 const RegisterBankInfo &RBI, Register Reg,
88 const TargetRegisterClass &RegClass);
89
90 /// Constrain the Register operand OpIdx, so that it is now constrained to the
91 /// TargetRegisterClass passed as an argument (RegClass).
92 /// If this fails, create a new virtual register in the correct class and insert
93 /// a COPY before \p InsertPt if it is a use or after if it is a definition.
94 /// In both cases, the function also updates the register of RegMo. The debug
95 /// location of \p InsertPt is used for the new copy.
96 ///
97 /// \return The virtual register constrained to the right register class.
98 Register constrainOperandRegClass(const MachineFunction &MF,
99 const TargetRegisterInfo &TRI,
100 MachineRegisterInfo &MRI,
101 const TargetInstrInfo &TII,
102 const RegisterBankInfo &RBI,
103 MachineInstr &InsertPt,
104 const TargetRegisterClass &RegClass,
105 MachineOperand &RegMO);
106
107 /// Try to constrain Reg so that it is usable by argument OpIdx of the provided
108 /// MCInstrDesc \p II. If this fails, create a new virtual register in the
109 /// correct class and insert a COPY before \p InsertPt if it is a use or after
110 /// if it is a definition. In both cases, the function also updates the register
111 /// of RegMo.
112 /// This is equivalent to constrainOperandRegClass(..., RegClass, ...)
113 /// with RegClass obtained from the MCInstrDesc. The debug location of \p
114 /// InsertPt is used for the new copy.
115 ///
116 /// \return The virtual register constrained to the right register class.
117 Register constrainOperandRegClass(const MachineFunction &MF,
118 const TargetRegisterInfo &TRI,
119 MachineRegisterInfo &MRI,
120 const TargetInstrInfo &TII,
121 const RegisterBankInfo &RBI,
122 MachineInstr &InsertPt, const MCInstrDesc &II,
123 MachineOperand &RegMO, unsigned OpIdx);
124
125 /// Mutate the newly-selected instruction \p I to constrain its (possibly
126 /// generic) virtual register operands to the instruction's register class.
127 /// This could involve inserting COPYs before (for uses) or after (for defs).
128 /// This requires the number of operands to match the instruction description.
129 /// \returns whether operand regclass constraining succeeded.
130 ///
131 // FIXME: Not all instructions have the same number of operands. We should
132 // probably expose a constrain helper per operand and let the target selector
133 // constrain individual registers, like fast-isel.
134 bool constrainSelectedInstRegOperands(MachineInstr &I,
135 const TargetInstrInfo &TII,
136 const TargetRegisterInfo &TRI,
137 const RegisterBankInfo &RBI);
138
139 /// Check if DstReg can be replaced with SrcReg depending on the register
140 /// constraints.
141 bool canReplaceReg(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI);
142
143 /// Check whether an instruction \p MI is dead: it only defines dead virtual
144 /// registers, and doesn't have other side effects.
145 bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI);
146
147 /// Report an ISel error as a missed optimization remark to the LLVMContext's
148 /// diagnostic stream. Set the FailedISel MachineFunction property.
149 void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
150 MachineOptimizationRemarkEmitter &MORE,
151 MachineOptimizationRemarkMissed &R);
152
153 void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
154 MachineOptimizationRemarkEmitter &MORE,
155 const char *PassName, StringRef Msg,
156 const MachineInstr &MI);
157
158 /// Report an ISel warning as a missed optimization remark to the LLVMContext's
159 /// diagnostic stream.
160 void reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
161 MachineOptimizationRemarkEmitter &MORE,
162 MachineOptimizationRemarkMissed &R);
163
164 /// If \p VReg is defined by a G_CONSTANT, return the corresponding value.
165 Optional<APInt> getConstantVRegVal(Register VReg,
166 const MachineRegisterInfo &MRI);
167
168 /// If \p VReg is defined by a G_CONSTANT fits in int64_t
169 /// returns it.
170 Optional<int64_t> getConstantVRegSExtVal(Register VReg,
171 const MachineRegisterInfo &MRI);
172
173 /// Simple struct used to hold a constant integer value and a virtual
174 /// register.
175 struct ValueAndVReg {
176 APInt Value;
177 Register VReg;
178 };
179 /// If \p VReg is defined by a statically evaluable chain of
180 /// instructions rooted on a G_F/CONSTANT (\p LookThroughInstrs == true)
181 /// and that constant fits in int64_t, returns its value as well as the
182 /// virtual register defined by this G_F/CONSTANT.
183 /// When \p LookThroughInstrs == false this function behaves like
184 /// getConstantVRegVal.
185 /// When \p HandleFConstants == false the function bails on G_FCONSTANTs.
186 /// When \p LookThroughAnyExt == true the function treats G_ANYEXT same as
187 /// G_SEXT.
188 Optional<ValueAndVReg>
189 getConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI,
190 bool LookThroughInstrs = true,
191 bool HandleFConstants = true,
192 bool LookThroughAnyExt = false);
193 const ConstantInt *getConstantIntVRegVal(Register VReg,
194 const MachineRegisterInfo &MRI);
195 const ConstantFP* getConstantFPVRegVal(Register VReg,
196 const MachineRegisterInfo &MRI);
197
198 /// See if Reg is defined by an single def instruction that is
199 /// Opcode. Also try to do trivial folding if it's a COPY with
200 /// same types. Returns null otherwise.
201 MachineInstr *getOpcodeDef(unsigned Opcode, Register Reg,
202 const MachineRegisterInfo &MRI);
203
204 /// Simple struct used to hold a Register value and the instruction which
205 /// defines it.
206 struct DefinitionAndSourceRegister {
207 MachineInstr *MI;
208 Register Reg;
209 };
210
211 /// Find the def instruction for \p Reg, and underlying value Register folding
212 /// away any copies.
213 ///
214 /// Also walks through hints such as G_ASSERT_ZEXT.
215 Optional<DefinitionAndSourceRegister>
216 getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI);
217
218 /// Find the def instruction for \p Reg, folding away any trivial copies. May
219 /// return nullptr if \p Reg is not a generic virtual register.
220 ///
221 /// Also walks through hints such as G_ASSERT_ZEXT.
222 MachineInstr *getDefIgnoringCopies(Register Reg,
223 const MachineRegisterInfo &MRI);
224
225 /// Find the source register for \p Reg, folding away any trivial copies. It
226 /// will be an output register of the instruction that getDefIgnoringCopies
227 /// returns. May return an invalid register if \p Reg is not a generic virtual
228 /// register.
229 ///
230 /// Also walks through hints such as G_ASSERT_ZEXT.
231 Register getSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI);
232
233 // Templated variant of getOpcodeDef returning a MachineInstr derived T.
234 /// See if Reg is defined by an single def instruction of type T
235 /// Also try to do trivial folding if it's a COPY with
236 /// same types. Returns null otherwise.
237 template <class T>
getOpcodeDef(Register Reg,const MachineRegisterInfo & MRI)238 T *getOpcodeDef(Register Reg, const MachineRegisterInfo &MRI) {
239 MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI);
240 return dyn_cast_or_null<T>(DefMI);
241 }
242
243 /// Returns an APFloat from Val converted to the appropriate size.
244 APFloat getAPFloatFromSize(double Val, unsigned Size);
245
246 /// Modify analysis usage so it preserves passes required for the SelectionDAG
247 /// fallback.
248 void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU);
249
250 Optional<APInt> ConstantFoldBinOp(unsigned Opcode, const Register Op1,
251 const Register Op2,
252 const MachineRegisterInfo &MRI);
253 Optional<APFloat> ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
254 const Register Op2,
255 const MachineRegisterInfo &MRI);
256
257 Optional<APInt> ConstantFoldExtOp(unsigned Opcode, const Register Op1,
258 uint64_t Imm, const MachineRegisterInfo &MRI);
259
260 Optional<APFloat> ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy,
261 Register Src,
262 const MachineRegisterInfo &MRI);
263
264 /// Test if the given value is known to have exactly one bit set. This differs
265 /// from computeKnownBits in that it doesn't necessarily determine which bit is
266 /// set.
267 bool isKnownToBeAPowerOfTwo(Register Val, const MachineRegisterInfo &MRI,
268 GISelKnownBits *KnownBits = nullptr);
269
270 /// Returns true if \p Val can be assumed to never be a NaN. If \p SNaN is true,
271 /// this returns if \p Val can be assumed to never be a signaling NaN.
272 bool isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
273 bool SNaN = false);
274
275 /// Returns true if \p Val can be assumed to never be a signaling NaN.
isKnownNeverSNaN(Register Val,const MachineRegisterInfo & MRI)276 inline bool isKnownNeverSNaN(Register Val, const MachineRegisterInfo &MRI) {
277 return isKnownNeverNaN(Val, MRI, true);
278 }
279
280 Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO);
281
282 /// Return a virtual register corresponding to the incoming argument register \p
283 /// PhysReg. This register is expected to have class \p RC, and optional type \p
284 /// RegTy. This assumes all references to the register will use the same type.
285 ///
286 /// If there is an existing live-in argument register, it will be returned.
287 /// This will also ensure there is a valid copy
288 Register getFunctionLiveInPhysReg(MachineFunction &MF, const TargetInstrInfo &TII,
289 MCRegister PhysReg,
290 const TargetRegisterClass &RC,
291 LLT RegTy = LLT());
292
293 /// Return the least common multiple type of \p OrigTy and \p TargetTy, by changing the
294 /// number of vector elements or scalar bitwidth. The intent is a
295 /// G_MERGE_VALUES, G_BUILD_VECTOR, or G_CONCAT_VECTORS can be constructed from
296 /// \p OrigTy elements, and unmerged into \p TargetTy
297 LLVM_READNONE
298 LLT getLCMType(LLT OrigTy, LLT TargetTy);
299
300 /// Return a type where the total size is the greatest common divisor of \p
301 /// OrigTy and \p TargetTy. This will try to either change the number of vector
302 /// elements, or bitwidth of scalars. The intent is the result type can be used
303 /// as the result of a G_UNMERGE_VALUES from \p OrigTy, and then some
304 /// combination of G_MERGE_VALUES, G_BUILD_VECTOR and G_CONCAT_VECTORS (possibly
305 /// with intermediate casts) can re-form \p TargetTy.
306 ///
307 /// If these are vectors with different element types, this will try to produce
308 /// a vector with a compatible total size, but the element type of \p OrigTy. If
309 /// this can't be satisfied, this will produce a scalar smaller than the
310 /// original vector elements.
311 ///
312 /// In the worst case, this returns LLT::scalar(1)
313 LLVM_READNONE
314 LLT getGCDType(LLT OrigTy, LLT TargetTy);
315
316 /// Represents a value which can be a Register or a constant.
317 ///
318 /// This is useful in situations where an instruction may have an interesting
319 /// register operand or interesting constant operand. For a concrete example,
320 /// \see getVectorSplat.
321 class RegOrConstant {
322 int64_t Cst;
323 Register Reg;
324 bool IsReg;
325
326 public:
RegOrConstant(Register Reg)327 explicit RegOrConstant(Register Reg) : Reg(Reg), IsReg(true) {}
RegOrConstant(int64_t Cst)328 explicit RegOrConstant(int64_t Cst) : Cst(Cst), IsReg(false) {}
isReg()329 bool isReg() const { return IsReg; }
isCst()330 bool isCst() const { return !IsReg; }
getReg()331 Register getReg() const {
332 assert(isReg() && "Expected a register!");
333 return Reg;
334 }
getCst()335 int64_t getCst() const {
336 assert(isCst() && "Expected a constant!");
337 return Cst;
338 }
339 };
340
341 /// \returns The splat index of a G_SHUFFLE_VECTOR \p MI when \p MI is a splat.
342 /// If \p MI is not a splat, returns None.
343 Optional<int> getSplatIndex(MachineInstr &MI);
344
345 /// Returns a scalar constant of a G_BUILD_VECTOR splat if it exists.
346 Optional<int64_t> getBuildVectorConstantSplat(const MachineInstr &MI,
347 const MachineRegisterInfo &MRI);
348
349 /// Return true if the specified instruction is a G_BUILD_VECTOR or
350 /// G_BUILD_VECTOR_TRUNC where all of the elements are 0 or undef.
351 bool isBuildVectorAllZeros(const MachineInstr &MI,
352 const MachineRegisterInfo &MRI);
353
354 /// Return true if the specified instruction is a G_BUILD_VECTOR or
355 /// G_BUILD_VECTOR_TRUNC where all of the elements are ~0 or undef.
356 bool isBuildVectorAllOnes(const MachineInstr &MI,
357 const MachineRegisterInfo &MRI);
358
359 /// \returns a value when \p MI is a vector splat. The splat can be either a
360 /// Register or a constant.
361 ///
362 /// Examples:
363 ///
364 /// \code
365 /// %reg = COPY $physreg
366 /// %reg_splat = G_BUILD_VECTOR %reg, %reg, ..., %reg
367 /// \endcode
368 ///
369 /// If called on the G_BUILD_VECTOR above, this will return a RegOrConstant
370 /// containing %reg.
371 ///
372 /// \code
373 /// %cst = G_CONSTANT iN 4
374 /// %constant_splat = G_BUILD_VECTOR %cst, %cst, ..., %cst
375 /// \endcode
376 ///
377 /// In the above case, this will return a RegOrConstant containing 4.
378 Optional<RegOrConstant> getVectorSplat(const MachineInstr &MI,
379 const MachineRegisterInfo &MRI);
380
381 /// Attempt to match a unary predicate against a scalar/splat constant or every
382 /// element of a constant G_BUILD_VECTOR. If \p ConstVal is null, the source
383 /// value was undef.
384 bool matchUnaryPredicate(const MachineRegisterInfo &MRI, Register Reg,
385 std::function<bool(const Constant *ConstVal)> Match,
386 bool AllowUndefs = false);
387
388 /// Returns true if given the TargetLowering's boolean contents information,
389 /// the value \p Val contains a true value.
390 bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
391 bool IsFP);
392
393 /// Returns an integer representing true, as defined by the
394 /// TargetBooleanContents.
395 int64_t getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool IsFP);
396
397 /// Returns true if the given block should be optimized for size.
398 bool shouldOptForSize(const MachineBasicBlock &MBB, ProfileSummaryInfo *PSI,
399 BlockFrequencyInfo *BFI);
400
401 } // End namespace llvm.
402 #endif
403