1 //==-- llvm/CodeGen/GlobalISel/Utils.h ---------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file This file declares the API of helper functions used throughout the
10 /// GlobalISel pipeline.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #ifndef LLVM_CODEGEN_GLOBALISEL_UTILS_H
15 #define LLVM_CODEGEN_GLOBALISEL_UTILS_H
16
17 #include "GISelWorkList.h"
18 #include "llvm/ADT/APFloat.h"
19 #include "llvm/ADT/StringRef.h"
20 #include "llvm/CodeGen/Register.h"
21 #include "llvm/IR/DebugLoc.h"
22 #include "llvm/Support/Alignment.h"
23 #include "llvm/Support/Casting.h"
24 #include "llvm/Support/LowLevelTypeImpl.h"
25 #include <cstdint>
26
27 namespace llvm {
28
29 class AnalysisUsage;
30 class LostDebugLocObserver;
31 class MachineBasicBlock;
32 class BlockFrequencyInfo;
33 class GISelKnownBits;
34 class MachineFunction;
35 class MachineInstr;
36 class MachineOperand;
37 class MachineOptimizationRemarkEmitter;
38 class MachineOptimizationRemarkMissed;
39 struct MachinePointerInfo;
40 class MachineRegisterInfo;
41 class MCInstrDesc;
42 class ProfileSummaryInfo;
43 class RegisterBankInfo;
44 class TargetInstrInfo;
45 class TargetLowering;
46 class TargetPassConfig;
47 class TargetRegisterInfo;
48 class TargetRegisterClass;
49 class ConstantFP;
50 class APFloat;
51
52 // Convenience macros for dealing with vector reduction opcodes.
53 #define GISEL_VECREDUCE_CASES_ALL \
54 case TargetOpcode::G_VECREDUCE_SEQ_FADD: \
55 case TargetOpcode::G_VECREDUCE_SEQ_FMUL: \
56 case TargetOpcode::G_VECREDUCE_FADD: \
57 case TargetOpcode::G_VECREDUCE_FMUL: \
58 case TargetOpcode::G_VECREDUCE_FMAX: \
59 case TargetOpcode::G_VECREDUCE_FMIN: \
60 case TargetOpcode::G_VECREDUCE_ADD: \
61 case TargetOpcode::G_VECREDUCE_MUL: \
62 case TargetOpcode::G_VECREDUCE_AND: \
63 case TargetOpcode::G_VECREDUCE_OR: \
64 case TargetOpcode::G_VECREDUCE_XOR: \
65 case TargetOpcode::G_VECREDUCE_SMAX: \
66 case TargetOpcode::G_VECREDUCE_SMIN: \
67 case TargetOpcode::G_VECREDUCE_UMAX: \
68 case TargetOpcode::G_VECREDUCE_UMIN:
69
70 #define GISEL_VECREDUCE_CASES_NONSEQ \
71 case TargetOpcode::G_VECREDUCE_FADD: \
72 case TargetOpcode::G_VECREDUCE_FMUL: \
73 case TargetOpcode::G_VECREDUCE_FMAX: \
74 case TargetOpcode::G_VECREDUCE_FMIN: \
75 case TargetOpcode::G_VECREDUCE_ADD: \
76 case TargetOpcode::G_VECREDUCE_MUL: \
77 case TargetOpcode::G_VECREDUCE_AND: \
78 case TargetOpcode::G_VECREDUCE_OR: \
79 case TargetOpcode::G_VECREDUCE_XOR: \
80 case TargetOpcode::G_VECREDUCE_SMAX: \
81 case TargetOpcode::G_VECREDUCE_SMIN: \
82 case TargetOpcode::G_VECREDUCE_UMAX: \
83 case TargetOpcode::G_VECREDUCE_UMIN:
84
85 /// Try to constrain Reg to the specified register class. If this fails,
86 /// create a new virtual register in the correct class.
87 ///
88 /// \return The virtual register constrained to the right register class.
89 Register constrainRegToClass(MachineRegisterInfo &MRI,
90 const TargetInstrInfo &TII,
91 const RegisterBankInfo &RBI, Register Reg,
92 const TargetRegisterClass &RegClass);
93
94 /// Constrain the Register operand OpIdx, so that it is now constrained to the
95 /// TargetRegisterClass passed as an argument (RegClass).
96 /// If this fails, create a new virtual register in the correct class and insert
97 /// a COPY before \p InsertPt if it is a use or after if it is a definition.
98 /// In both cases, the function also updates the register of RegMo. The debug
99 /// location of \p InsertPt is used for the new copy.
100 ///
101 /// \return The virtual register constrained to the right register class.
102 Register constrainOperandRegClass(const MachineFunction &MF,
103 const TargetRegisterInfo &TRI,
104 MachineRegisterInfo &MRI,
105 const TargetInstrInfo &TII,
106 const RegisterBankInfo &RBI,
107 MachineInstr &InsertPt,
108 const TargetRegisterClass &RegClass,
109 MachineOperand &RegMO);
110
111 /// Try to constrain Reg so that it is usable by argument OpIdx of the provided
112 /// MCInstrDesc \p II. If this fails, create a new virtual register in the
113 /// correct class and insert a COPY before \p InsertPt if it is a use or after
114 /// if it is a definition. In both cases, the function also updates the register
115 /// of RegMo.
116 /// This is equivalent to constrainOperandRegClass(..., RegClass, ...)
117 /// with RegClass obtained from the MCInstrDesc. The debug location of \p
118 /// InsertPt is used for the new copy.
119 ///
120 /// \return The virtual register constrained to the right register class.
121 Register constrainOperandRegClass(const MachineFunction &MF,
122 const TargetRegisterInfo &TRI,
123 MachineRegisterInfo &MRI,
124 const TargetInstrInfo &TII,
125 const RegisterBankInfo &RBI,
126 MachineInstr &InsertPt, const MCInstrDesc &II,
127 MachineOperand &RegMO, unsigned OpIdx);
128
129 /// Mutate the newly-selected instruction \p I to constrain its (possibly
130 /// generic) virtual register operands to the instruction's register class.
131 /// This could involve inserting COPYs before (for uses) or after (for defs).
132 /// This requires the number of operands to match the instruction description.
133 /// \returns whether operand regclass constraining succeeded.
134 ///
135 // FIXME: Not all instructions have the same number of operands. We should
136 // probably expose a constrain helper per operand and let the target selector
137 // constrain individual registers, like fast-isel.
138 bool constrainSelectedInstRegOperands(MachineInstr &I,
139 const TargetInstrInfo &TII,
140 const TargetRegisterInfo &TRI,
141 const RegisterBankInfo &RBI);
142
143 /// Check if DstReg can be replaced with SrcReg depending on the register
144 /// constraints.
145 bool canReplaceReg(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI);
146
147 /// Check whether an instruction \p MI is dead: it only defines dead virtual
148 /// registers, and doesn't have other side effects.
149 bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI);
150
151 /// Report an ISel error as a missed optimization remark to the LLVMContext's
152 /// diagnostic stream. Set the FailedISel MachineFunction property.
153 void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
154 MachineOptimizationRemarkEmitter &MORE,
155 MachineOptimizationRemarkMissed &R);
156
157 void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
158 MachineOptimizationRemarkEmitter &MORE,
159 const char *PassName, StringRef Msg,
160 const MachineInstr &MI);
161
162 /// Report an ISel warning as a missed optimization remark to the LLVMContext's
163 /// diagnostic stream.
164 void reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
165 MachineOptimizationRemarkEmitter &MORE,
166 MachineOptimizationRemarkMissed &R);
167
168 /// If \p VReg is defined by a G_CONSTANT, return the corresponding value.
169 std::optional<APInt> getIConstantVRegVal(Register VReg,
170 const MachineRegisterInfo &MRI);
171
172 /// If \p VReg is defined by a G_CONSTANT fits in int64_t returns it.
173 std::optional<int64_t> getIConstantVRegSExtVal(Register VReg,
174 const MachineRegisterInfo &MRI);
175
176 /// Simple struct used to hold a constant integer value and a virtual
177 /// register.
178 struct ValueAndVReg {
179 APInt Value;
180 Register VReg;
181 };
182
183 /// If \p VReg is defined by a statically evaluable chain of instructions rooted
184 /// on a G_CONSTANT returns its APInt value and def register.
185 std::optional<ValueAndVReg>
186 getIConstantVRegValWithLookThrough(Register VReg,
187 const MachineRegisterInfo &MRI,
188 bool LookThroughInstrs = true);
189
190 /// If \p VReg is defined by a statically evaluable chain of instructions rooted
191 /// on a G_CONSTANT or G_FCONSTANT returns its value as APInt and def register.
192 std::optional<ValueAndVReg> getAnyConstantVRegValWithLookThrough(
193 Register VReg, const MachineRegisterInfo &MRI,
194 bool LookThroughInstrs = true, bool LookThroughAnyExt = false);
195
196 struct FPValueAndVReg {
197 APFloat Value;
198 Register VReg;
199 };
200
201 /// If \p VReg is defined by a statically evaluable chain of instructions rooted
202 /// on a G_FCONSTANT returns its APFloat value and def register.
203 std::optional<FPValueAndVReg>
204 getFConstantVRegValWithLookThrough(Register VReg,
205 const MachineRegisterInfo &MRI,
206 bool LookThroughInstrs = true);
207
208 const ConstantFP* getConstantFPVRegVal(Register VReg,
209 const MachineRegisterInfo &MRI);
210
211 /// See if Reg is defined by an single def instruction that is
212 /// Opcode. Also try to do trivial folding if it's a COPY with
213 /// same types. Returns null otherwise.
214 MachineInstr *getOpcodeDef(unsigned Opcode, Register Reg,
215 const MachineRegisterInfo &MRI);
216
217 /// Simple struct used to hold a Register value and the instruction which
218 /// defines it.
219 struct DefinitionAndSourceRegister {
220 MachineInstr *MI;
221 Register Reg;
222 };
223
224 /// Find the def instruction for \p Reg, and underlying value Register folding
225 /// away any copies.
226 ///
227 /// Also walks through hints such as G_ASSERT_ZEXT.
228 std::optional<DefinitionAndSourceRegister>
229 getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI);
230
231 /// Find the def instruction for \p Reg, folding away any trivial copies. May
232 /// return nullptr if \p Reg is not a generic virtual register.
233 ///
234 /// Also walks through hints such as G_ASSERT_ZEXT.
235 MachineInstr *getDefIgnoringCopies(Register Reg,
236 const MachineRegisterInfo &MRI);
237
238 /// Find the source register for \p Reg, folding away any trivial copies. It
239 /// will be an output register of the instruction that getDefIgnoringCopies
240 /// returns. May return an invalid register if \p Reg is not a generic virtual
241 /// register.
242 ///
243 /// Also walks through hints such as G_ASSERT_ZEXT.
244 Register getSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI);
245
246 // Templated variant of getOpcodeDef returning a MachineInstr derived T.
247 /// See if Reg is defined by an single def instruction of type T
248 /// Also try to do trivial folding if it's a COPY with
249 /// same types. Returns null otherwise.
250 template <class T>
getOpcodeDef(Register Reg,const MachineRegisterInfo & MRI)251 T *getOpcodeDef(Register Reg, const MachineRegisterInfo &MRI) {
252 MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI);
253 return dyn_cast_or_null<T>(DefMI);
254 }
255
256 /// Returns an APFloat from Val converted to the appropriate size.
257 APFloat getAPFloatFromSize(double Val, unsigned Size);
258
259 /// Modify analysis usage so it preserves passes required for the SelectionDAG
260 /// fallback.
261 void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU);
262
263 std::optional<APInt> ConstantFoldBinOp(unsigned Opcode, const Register Op1,
264 const Register Op2,
265 const MachineRegisterInfo &MRI);
266 std::optional<APFloat> ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
267 const Register Op2,
268 const MachineRegisterInfo &MRI);
269
270 /// Tries to constant fold a vector binop with sources \p Op1 and \p Op2.
271 /// Returns an empty vector on failure.
272 SmallVector<APInt> ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
273 const Register Op2,
274 const MachineRegisterInfo &MRI);
275
276 std::optional<APInt> ConstantFoldExtOp(unsigned Opcode, const Register Op1,
277 uint64_t Imm,
278 const MachineRegisterInfo &MRI);
279
280 std::optional<APFloat> ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy,
281 Register Src,
282 const MachineRegisterInfo &MRI);
283
284 /// Tries to constant fold a G_CTLZ operation on \p Src. If \p Src is a vector
285 /// then it tries to do an element-wise constant fold.
286 std::optional<SmallVector<unsigned>>
287 ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI);
288
289 /// Test if the given value is known to have exactly one bit set. This differs
290 /// from computeKnownBits in that it doesn't necessarily determine which bit is
291 /// set.
292 bool isKnownToBeAPowerOfTwo(Register Val, const MachineRegisterInfo &MRI,
293 GISelKnownBits *KnownBits = nullptr);
294
295 /// Returns true if \p Val can be assumed to never be a NaN. If \p SNaN is true,
296 /// this returns if \p Val can be assumed to never be a signaling NaN.
297 bool isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
298 bool SNaN = false);
299
300 /// Returns true if \p Val can be assumed to never be a signaling NaN.
isKnownNeverSNaN(Register Val,const MachineRegisterInfo & MRI)301 inline bool isKnownNeverSNaN(Register Val, const MachineRegisterInfo &MRI) {
302 return isKnownNeverNaN(Val, MRI, true);
303 }
304
305 Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO);
306
307 /// Return a virtual register corresponding to the incoming argument register \p
308 /// PhysReg. This register is expected to have class \p RC, and optional type \p
309 /// RegTy. This assumes all references to the register will use the same type.
310 ///
311 /// If there is an existing live-in argument register, it will be returned.
312 /// This will also ensure there is a valid copy
313 Register getFunctionLiveInPhysReg(MachineFunction &MF,
314 const TargetInstrInfo &TII,
315 MCRegister PhysReg,
316 const TargetRegisterClass &RC,
317 const DebugLoc &DL, LLT RegTy = LLT());
318
319 /// Return the least common multiple type of \p OrigTy and \p TargetTy, by changing the
320 /// number of vector elements or scalar bitwidth. The intent is a
321 /// G_MERGE_VALUES, G_BUILD_VECTOR, or G_CONCAT_VECTORS can be constructed from
322 /// \p OrigTy elements, and unmerged into \p TargetTy
323 LLVM_READNONE
324 LLT getLCMType(LLT OrigTy, LLT TargetTy);
325
326 LLVM_READNONE
327 /// Return smallest type that covers both \p OrigTy and \p TargetTy and is
328 /// multiple of TargetTy.
329 LLT getCoverTy(LLT OrigTy, LLT TargetTy);
330
331 /// Return a type where the total size is the greatest common divisor of \p
332 /// OrigTy and \p TargetTy. This will try to either change the number of vector
333 /// elements, or bitwidth of scalars. The intent is the result type can be used
334 /// as the result of a G_UNMERGE_VALUES from \p OrigTy, and then some
335 /// combination of G_MERGE_VALUES, G_BUILD_VECTOR and G_CONCAT_VECTORS (possibly
336 /// with intermediate casts) can re-form \p TargetTy.
337 ///
338 /// If these are vectors with different element types, this will try to produce
339 /// a vector with a compatible total size, but the element type of \p OrigTy. If
340 /// this can't be satisfied, this will produce a scalar smaller than the
341 /// original vector elements.
342 ///
343 /// In the worst case, this returns LLT::scalar(1)
344 LLVM_READNONE
345 LLT getGCDType(LLT OrigTy, LLT TargetTy);
346
347 /// Represents a value which can be a Register or a constant.
348 ///
349 /// This is useful in situations where an instruction may have an interesting
350 /// register operand or interesting constant operand. For a concrete example,
351 /// \see getVectorSplat.
352 class RegOrConstant {
353 int64_t Cst;
354 Register Reg;
355 bool IsReg;
356
357 public:
RegOrConstant(Register Reg)358 explicit RegOrConstant(Register Reg) : Reg(Reg), IsReg(true) {}
RegOrConstant(int64_t Cst)359 explicit RegOrConstant(int64_t Cst) : Cst(Cst), IsReg(false) {}
isReg()360 bool isReg() const { return IsReg; }
isCst()361 bool isCst() const { return !IsReg; }
getReg()362 Register getReg() const {
363 assert(isReg() && "Expected a register!");
364 return Reg;
365 }
getCst()366 int64_t getCst() const {
367 assert(isCst() && "Expected a constant!");
368 return Cst;
369 }
370 };
371
372 /// \returns The splat index of a G_SHUFFLE_VECTOR \p MI when \p MI is a splat.
373 /// If \p MI is not a splat, returns std::nullopt.
374 std::optional<int> getSplatIndex(MachineInstr &MI);
375
376 /// \returns the scalar integral splat value of \p Reg if possible.
377 std::optional<APInt> getIConstantSplatVal(const Register Reg,
378 const MachineRegisterInfo &MRI);
379
380 /// \returns the scalar integral splat value defined by \p MI if possible.
381 std::optional<APInt> getIConstantSplatVal(const MachineInstr &MI,
382 const MachineRegisterInfo &MRI);
383
384 /// \returns the scalar sign extended integral splat value of \p Reg if
385 /// possible.
386 std::optional<int64_t> getIConstantSplatSExtVal(const Register Reg,
387 const MachineRegisterInfo &MRI);
388
389 /// \returns the scalar sign extended integral splat value defined by \p MI if
390 /// possible.
391 std::optional<int64_t> getIConstantSplatSExtVal(const MachineInstr &MI,
392 const MachineRegisterInfo &MRI);
393
394 /// Returns a floating point scalar constant of a build vector splat if it
395 /// exists. When \p AllowUndef == true some elements can be undef but not all.
396 std::optional<FPValueAndVReg> getFConstantSplat(Register VReg,
397 const MachineRegisterInfo &MRI,
398 bool AllowUndef = true);
399
400 /// Return true if the specified register is defined by G_BUILD_VECTOR or
401 /// G_BUILD_VECTOR_TRUNC where all of the elements are \p SplatValue or undef.
402 bool isBuildVectorConstantSplat(const Register Reg,
403 const MachineRegisterInfo &MRI,
404 int64_t SplatValue, bool AllowUndef);
405
406 /// Return true if the specified instruction is a G_BUILD_VECTOR or
407 /// G_BUILD_VECTOR_TRUNC where all of the elements are \p SplatValue or undef.
408 bool isBuildVectorConstantSplat(const MachineInstr &MI,
409 const MachineRegisterInfo &MRI,
410 int64_t SplatValue, bool AllowUndef);
411
412 /// Return true if the specified instruction is a G_BUILD_VECTOR or
413 /// G_BUILD_VECTOR_TRUNC where all of the elements are 0 or undef.
414 bool isBuildVectorAllZeros(const MachineInstr &MI,
415 const MachineRegisterInfo &MRI,
416 bool AllowUndef = false);
417
418 /// Return true if the specified instruction is a G_BUILD_VECTOR or
419 /// G_BUILD_VECTOR_TRUNC where all of the elements are ~0 or undef.
420 bool isBuildVectorAllOnes(const MachineInstr &MI,
421 const MachineRegisterInfo &MRI,
422 bool AllowUndef = false);
423
424 /// Return true if the specified instruction is known to be a constant, or a
425 /// vector of constants.
426 ///
427 /// If \p AllowFP is true, this will consider G_FCONSTANT in addition to
428 /// G_CONSTANT. If \p AllowOpaqueConstants is true, constant-like instructions
429 /// such as G_GLOBAL_VALUE will also be considered.
430 bool isConstantOrConstantVector(const MachineInstr &MI,
431 const MachineRegisterInfo &MRI,
432 bool AllowFP = true,
433 bool AllowOpaqueConstants = true);
434
435 /// Return true if the value is a constant 0 integer or a splatted vector of a
436 /// constant 0 integer (with no undefs if \p AllowUndefs is false). This will
437 /// handle G_BUILD_VECTOR and G_BUILD_VECTOR_TRUNC as truncation is not an issue
438 /// for null values.
439 bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI,
440 bool AllowUndefs = false);
441
442 /// Return true if the value is a constant -1 integer or a splatted vector of a
443 /// constant -1 integer (with no undefs if \p AllowUndefs is false).
444 bool isAllOnesOrAllOnesSplat(const MachineInstr &MI,
445 const MachineRegisterInfo &MRI,
446 bool AllowUndefs = false);
447
448 /// \returns a value when \p MI is a vector splat. The splat can be either a
449 /// Register or a constant.
450 ///
451 /// Examples:
452 ///
453 /// \code
454 /// %reg = COPY $physreg
455 /// %reg_splat = G_BUILD_VECTOR %reg, %reg, ..., %reg
456 /// \endcode
457 ///
458 /// If called on the G_BUILD_VECTOR above, this will return a RegOrConstant
459 /// containing %reg.
460 ///
461 /// \code
462 /// %cst = G_CONSTANT iN 4
463 /// %constant_splat = G_BUILD_VECTOR %cst, %cst, ..., %cst
464 /// \endcode
465 ///
466 /// In the above case, this will return a RegOrConstant containing 4.
467 std::optional<RegOrConstant> getVectorSplat(const MachineInstr &MI,
468 const MachineRegisterInfo &MRI);
469
470 /// Determines if \p MI defines a constant integer or a build vector of
471 /// constant integers. Treats undef values as constants.
472 bool isConstantOrConstantVector(MachineInstr &MI,
473 const MachineRegisterInfo &MRI);
474
475 /// Determines if \p MI defines a constant integer or a splat vector of
476 /// constant integers.
477 /// \returns the scalar constant or std::nullopt.
478 std::optional<APInt>
479 isConstantOrConstantSplatVector(MachineInstr &MI,
480 const MachineRegisterInfo &MRI);
481
482 /// Attempt to match a unary predicate against a scalar/splat constant or every
483 /// element of a constant G_BUILD_VECTOR. If \p ConstVal is null, the source
484 /// value was undef.
485 bool matchUnaryPredicate(const MachineRegisterInfo &MRI, Register Reg,
486 std::function<bool(const Constant *ConstVal)> Match,
487 bool AllowUndefs = false);
488
489 /// Returns true if given the TargetLowering's boolean contents information,
490 /// the value \p Val contains a true value.
491 bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
492 bool IsFP);
493 /// \returns true if given the TargetLowering's boolean contents information,
494 /// the value \p Val contains a false value.
495 bool isConstFalseVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
496 bool IsFP);
497
498 /// Returns an integer representing true, as defined by the
499 /// TargetBooleanContents.
500 int64_t getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool IsFP);
501
502 /// Returns true if the given block should be optimized for size.
503 bool shouldOptForSize(const MachineBasicBlock &MBB, ProfileSummaryInfo *PSI,
504 BlockFrequencyInfo *BFI);
505
506 using SmallInstListTy = GISelWorkList<4>;
507 void saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI,
508 LostDebugLocObserver *LocObserver,
509 SmallInstListTy &DeadInstChain);
510 void eraseInstrs(ArrayRef<MachineInstr *> DeadInstrs, MachineRegisterInfo &MRI,
511 LostDebugLocObserver *LocObserver = nullptr);
512 void eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI,
513 LostDebugLocObserver *LocObserver = nullptr);
514
515 /// Assuming the instruction \p MI is going to be deleted, attempt to salvage
516 /// debug users of \p MI by writing the effect of \p MI in a DIExpression.
517 void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI);
518
519 } // End namespace llvm.
520 #endif
521