1 //==-- llvm/CodeGen/GlobalISel/Utils.h ---------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file This file declares the API of helper functions used throughout the
10 /// GlobalISel pipeline.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_CODEGEN_GLOBALISEL_UTILS_H
15 #define LLVM_CODEGEN_GLOBALISEL_UTILS_H
16 
17 #include "GISelWorkList.h"
18 #include "llvm/ADT/APFloat.h"
19 #include "llvm/ADT/StringRef.h"
20 #include "llvm/CodeGen/Register.h"
21 #include "llvm/IR/DebugLoc.h"
22 #include "llvm/Support/Alignment.h"
23 #include "llvm/Support/Casting.h"
24 #include "llvm/Support/LowLevelTypeImpl.h"
25 #include <cstdint>
26 
27 namespace llvm {
28 
29 class AnalysisUsage;
30 class LostDebugLocObserver;
31 class MachineBasicBlock;
32 class BlockFrequencyInfo;
33 class GISelKnownBits;
34 class MachineFunction;
35 class MachineInstr;
36 class MachineOperand;
37 class MachineOptimizationRemarkEmitter;
38 class MachineOptimizationRemarkMissed;
39 struct MachinePointerInfo;
40 class MachineRegisterInfo;
41 class MCInstrDesc;
42 class ProfileSummaryInfo;
43 class RegisterBankInfo;
44 class TargetInstrInfo;
45 class TargetLowering;
46 class TargetPassConfig;
47 class TargetRegisterInfo;
48 class TargetRegisterClass;
49 class ConstantFP;
50 class APFloat;
51 
52 // Convenience macros for dealing with vector reduction opcodes.
53 #define GISEL_VECREDUCE_CASES_ALL                                              \
54   case TargetOpcode::G_VECREDUCE_SEQ_FADD:                                     \
55   case TargetOpcode::G_VECREDUCE_SEQ_FMUL:                                     \
56   case TargetOpcode::G_VECREDUCE_FADD:                                         \
57   case TargetOpcode::G_VECREDUCE_FMUL:                                         \
58   case TargetOpcode::G_VECREDUCE_FMAX:                                         \
59   case TargetOpcode::G_VECREDUCE_FMIN:                                         \
60   case TargetOpcode::G_VECREDUCE_ADD:                                          \
61   case TargetOpcode::G_VECREDUCE_MUL:                                          \
62   case TargetOpcode::G_VECREDUCE_AND:                                          \
63   case TargetOpcode::G_VECREDUCE_OR:                                           \
64   case TargetOpcode::G_VECREDUCE_XOR:                                          \
65   case TargetOpcode::G_VECREDUCE_SMAX:                                         \
66   case TargetOpcode::G_VECREDUCE_SMIN:                                         \
67   case TargetOpcode::G_VECREDUCE_UMAX:                                         \
68   case TargetOpcode::G_VECREDUCE_UMIN:
69 
70 #define GISEL_VECREDUCE_CASES_NONSEQ                                           \
71   case TargetOpcode::G_VECREDUCE_FADD:                                         \
72   case TargetOpcode::G_VECREDUCE_FMUL:                                         \
73   case TargetOpcode::G_VECREDUCE_FMAX:                                         \
74   case TargetOpcode::G_VECREDUCE_FMIN:                                         \
75   case TargetOpcode::G_VECREDUCE_ADD:                                          \
76   case TargetOpcode::G_VECREDUCE_MUL:                                          \
77   case TargetOpcode::G_VECREDUCE_AND:                                          \
78   case TargetOpcode::G_VECREDUCE_OR:                                           \
79   case TargetOpcode::G_VECREDUCE_XOR:                                          \
80   case TargetOpcode::G_VECREDUCE_SMAX:                                         \
81   case TargetOpcode::G_VECREDUCE_SMIN:                                         \
82   case TargetOpcode::G_VECREDUCE_UMAX:                                         \
83   case TargetOpcode::G_VECREDUCE_UMIN:
84 
85 /// Try to constrain Reg to the specified register class. If this fails,
86 /// create a new virtual register in the correct class.
87 ///
88 /// \return The virtual register constrained to the right register class.
89 Register constrainRegToClass(MachineRegisterInfo &MRI,
90                              const TargetInstrInfo &TII,
91                              const RegisterBankInfo &RBI, Register Reg,
92                              const TargetRegisterClass &RegClass);
93 
94 /// Constrain the Register operand OpIdx, so that it is now constrained to the
95 /// TargetRegisterClass passed as an argument (RegClass).
96 /// If this fails, create a new virtual register in the correct class and insert
97 /// a COPY before \p InsertPt if it is a use or after if it is a definition.
98 /// In both cases, the function also updates the register of RegMo. The debug
99 /// location of \p InsertPt is used for the new copy.
100 ///
101 /// \return The virtual register constrained to the right register class.
102 Register constrainOperandRegClass(const MachineFunction &MF,
103                                   const TargetRegisterInfo &TRI,
104                                   MachineRegisterInfo &MRI,
105                                   const TargetInstrInfo &TII,
106                                   const RegisterBankInfo &RBI,
107                                   MachineInstr &InsertPt,
108                                   const TargetRegisterClass &RegClass,
109                                   MachineOperand &RegMO);
110 
111 /// Try to constrain Reg so that it is usable by argument OpIdx of the provided
112 /// MCInstrDesc \p II. If this fails, create a new virtual register in the
113 /// correct class and insert a COPY before \p InsertPt if it is a use or after
114 /// if it is a definition. In both cases, the function also updates the register
115 /// of RegMo.
116 /// This is equivalent to constrainOperandRegClass(..., RegClass, ...)
117 /// with RegClass obtained from the MCInstrDesc. The debug location of \p
118 /// InsertPt is used for the new copy.
119 ///
120 /// \return The virtual register constrained to the right register class.
121 Register constrainOperandRegClass(const MachineFunction &MF,
122                                   const TargetRegisterInfo &TRI,
123                                   MachineRegisterInfo &MRI,
124                                   const TargetInstrInfo &TII,
125                                   const RegisterBankInfo &RBI,
126                                   MachineInstr &InsertPt, const MCInstrDesc &II,
127                                   MachineOperand &RegMO, unsigned OpIdx);
128 
129 /// Mutate the newly-selected instruction \p I to constrain its (possibly
130 /// generic) virtual register operands to the instruction's register class.
131 /// This could involve inserting COPYs before (for uses) or after (for defs).
132 /// This requires the number of operands to match the instruction description.
133 /// \returns whether operand regclass constraining succeeded.
134 ///
135 // FIXME: Not all instructions have the same number of operands. We should
136 // probably expose a constrain helper per operand and let the target selector
137 // constrain individual registers, like fast-isel.
138 bool constrainSelectedInstRegOperands(MachineInstr &I,
139                                       const TargetInstrInfo &TII,
140                                       const TargetRegisterInfo &TRI,
141                                       const RegisterBankInfo &RBI);
142 
143 /// Check if DstReg can be replaced with SrcReg depending on the register
144 /// constraints.
145 bool canReplaceReg(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI);
146 
147 /// Check whether an instruction \p MI is dead: it only defines dead virtual
148 /// registers, and doesn't have other side effects.
149 bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI);
150 
151 /// Report an ISel error as a missed optimization remark to the LLVMContext's
152 /// diagnostic stream.  Set the FailedISel MachineFunction property.
153 void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
154                         MachineOptimizationRemarkEmitter &MORE,
155                         MachineOptimizationRemarkMissed &R);
156 
157 void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
158                         MachineOptimizationRemarkEmitter &MORE,
159                         const char *PassName, StringRef Msg,
160                         const MachineInstr &MI);
161 
162 /// Report an ISel warning as a missed optimization remark to the LLVMContext's
163 /// diagnostic stream.
164 void reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
165                         MachineOptimizationRemarkEmitter &MORE,
166                         MachineOptimizationRemarkMissed &R);
167 
168 /// If \p VReg is defined by a G_CONSTANT, return the corresponding value.
169 Optional<APInt> getIConstantVRegVal(Register VReg,
170                                     const MachineRegisterInfo &MRI);
171 
172 /// If \p VReg is defined by a G_CONSTANT fits in int64_t returns it.
173 Optional<int64_t> getIConstantVRegSExtVal(Register VReg,
174                                           const MachineRegisterInfo &MRI);
175 
176 /// Simple struct used to hold a constant integer value and a virtual
177 /// register.
178 struct ValueAndVReg {
179   APInt Value;
180   Register VReg;
181 };
182 
183 /// If \p VReg is defined by a statically evaluable chain of instructions rooted
184 /// on a G_CONSTANT returns its APInt value and def register.
185 Optional<ValueAndVReg>
186 getIConstantVRegValWithLookThrough(Register VReg,
187                                    const MachineRegisterInfo &MRI,
188                                    bool LookThroughInstrs = true);
189 
190 /// If \p VReg is defined by a statically evaluable chain of instructions rooted
191 /// on a G_CONSTANT or G_FCONSTANT returns its value as APInt and def register.
192 Optional<ValueAndVReg> getAnyConstantVRegValWithLookThrough(
193     Register VReg, const MachineRegisterInfo &MRI,
194     bool LookThroughInstrs = true, bool LookThroughAnyExt = false);
195 
196 struct FPValueAndVReg {
197   APFloat Value;
198   Register VReg;
199 };
200 
201 /// If \p VReg is defined by a statically evaluable chain of instructions rooted
202 /// on a G_FCONSTANT returns its APFloat value and def register.
203 Optional<FPValueAndVReg>
204 getFConstantVRegValWithLookThrough(Register VReg,
205                                    const MachineRegisterInfo &MRI,
206                                    bool LookThroughInstrs = true);
207 
208 const ConstantFP* getConstantFPVRegVal(Register VReg,
209                                        const MachineRegisterInfo &MRI);
210 
211 /// See if Reg is defined by an single def instruction that is
212 /// Opcode. Also try to do trivial folding if it's a COPY with
213 /// same types. Returns null otherwise.
214 MachineInstr *getOpcodeDef(unsigned Opcode, Register Reg,
215                            const MachineRegisterInfo &MRI);
216 
217 /// Simple struct used to hold a Register value and the instruction which
218 /// defines it.
219 struct DefinitionAndSourceRegister {
220   MachineInstr *MI;
221   Register Reg;
222 };
223 
224 /// Find the def instruction for \p Reg, and underlying value Register folding
225 /// away any copies.
226 ///
227 /// Also walks through hints such as G_ASSERT_ZEXT.
228 Optional<DefinitionAndSourceRegister>
229 getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI);
230 
231 /// Find the def instruction for \p Reg, folding away any trivial copies. May
232 /// return nullptr if \p Reg is not a generic virtual register.
233 ///
234 /// Also walks through hints such as G_ASSERT_ZEXT.
235 MachineInstr *getDefIgnoringCopies(Register Reg,
236                                    const MachineRegisterInfo &MRI);
237 
238 /// Find the source register for \p Reg, folding away any trivial copies. It
239 /// will be an output register of the instruction that getDefIgnoringCopies
240 /// returns. May return an invalid register if \p Reg is not a generic virtual
241 /// register.
242 ///
243 /// Also walks through hints such as G_ASSERT_ZEXT.
244 Register getSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI);
245 
246 // Templated variant of getOpcodeDef returning a MachineInstr derived T.
247 /// See if Reg is defined by an single def instruction of type T
248 /// Also try to do trivial folding if it's a COPY with
249 /// same types. Returns null otherwise.
250 template <class T>
251 T *getOpcodeDef(Register Reg, const MachineRegisterInfo &MRI) {
252   MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI);
253   return dyn_cast_or_null<T>(DefMI);
254 }
255 
256 /// Returns an APFloat from Val converted to the appropriate size.
257 APFloat getAPFloatFromSize(double Val, unsigned Size);
258 
259 /// Modify analysis usage so it preserves passes required for the SelectionDAG
260 /// fallback.
261 void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU);
262 
263 Optional<APInt> ConstantFoldBinOp(unsigned Opcode, const Register Op1,
264                                   const Register Op2,
265                                   const MachineRegisterInfo &MRI);
266 Optional<APFloat> ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
267                                       const Register Op2,
268                                       const MachineRegisterInfo &MRI);
269 
270 /// Tries to constant fold a vector binop with sources \p Op1 and \p Op2.
271 /// Returns an empty vector on failure.
272 SmallVector<APInt> ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
273                                            const Register Op2,
274                                            const MachineRegisterInfo &MRI);
275 
276 Optional<APInt> ConstantFoldExtOp(unsigned Opcode, const Register Op1,
277                                   uint64_t Imm, const MachineRegisterInfo &MRI);
278 
279 Optional<APFloat> ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy,
280                                          Register Src,
281                                          const MachineRegisterInfo &MRI);
282 
283 /// Tries to constant fold a G_CTLZ operation on \p Src. If \p Src is a vector
284 /// then it tries to do an element-wise constant fold.
285 Optional<SmallVector<unsigned>>
286 ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI);
287 
288 /// Test if the given value is known to have exactly one bit set. This differs
289 /// from computeKnownBits in that it doesn't necessarily determine which bit is
290 /// set.
291 bool isKnownToBeAPowerOfTwo(Register Val, const MachineRegisterInfo &MRI,
292                             GISelKnownBits *KnownBits = nullptr);
293 
294 /// Returns true if \p Val can be assumed to never be a NaN. If \p SNaN is true,
295 /// this returns if \p Val can be assumed to never be a signaling NaN.
296 bool isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
297                      bool SNaN = false);
298 
299 /// Returns true if \p Val can be assumed to never be a signaling NaN.
300 inline bool isKnownNeverSNaN(Register Val, const MachineRegisterInfo &MRI) {
301   return isKnownNeverNaN(Val, MRI, true);
302 }
303 
304 Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO);
305 
306 /// Return a virtual register corresponding to the incoming argument register \p
307 /// PhysReg. This register is expected to have class \p RC, and optional type \p
308 /// RegTy. This assumes all references to the register will use the same type.
309 ///
310 /// If there is an existing live-in argument register, it will be returned.
311 /// This will also ensure there is a valid copy
312 Register getFunctionLiveInPhysReg(MachineFunction &MF,
313                                   const TargetInstrInfo &TII,
314                                   MCRegister PhysReg,
315                                   const TargetRegisterClass &RC,
316                                   const DebugLoc &DL, LLT RegTy = LLT());
317 
318 /// Return the least common multiple type of \p OrigTy and \p TargetTy, by changing the
319 /// number of vector elements or scalar bitwidth. The intent is a
320 /// G_MERGE_VALUES, G_BUILD_VECTOR, or G_CONCAT_VECTORS can be constructed from
321 /// \p OrigTy elements, and unmerged into \p TargetTy
322 LLVM_READNONE
323 LLT getLCMType(LLT OrigTy, LLT TargetTy);
324 
325 LLVM_READNONE
326 /// Return smallest type that covers both \p OrigTy and \p TargetTy and is
327 /// multiple of TargetTy.
328 LLT getCoverTy(LLT OrigTy, LLT TargetTy);
329 
330 /// Return a type where the total size is the greatest common divisor of \p
331 /// OrigTy and \p TargetTy. This will try to either change the number of vector
332 /// elements, or bitwidth of scalars. The intent is the result type can be used
333 /// as the result of a G_UNMERGE_VALUES from \p OrigTy, and then some
334 /// combination of G_MERGE_VALUES, G_BUILD_VECTOR and G_CONCAT_VECTORS (possibly
335 /// with intermediate casts) can re-form \p TargetTy.
336 ///
337 /// If these are vectors with different element types, this will try to produce
338 /// a vector with a compatible total size, but the element type of \p OrigTy. If
339 /// this can't be satisfied, this will produce a scalar smaller than the
340 /// original vector elements.
341 ///
342 /// In the worst case, this returns LLT::scalar(1)
343 LLVM_READNONE
344 LLT getGCDType(LLT OrigTy, LLT TargetTy);
345 
346 /// Represents a value which can be a Register or a constant.
347 ///
348 /// This is useful in situations where an instruction may have an interesting
349 /// register operand or interesting constant operand. For a concrete example,
350 /// \see getVectorSplat.
351 class RegOrConstant {
352   int64_t Cst;
353   Register Reg;
354   bool IsReg;
355 
356 public:
357   explicit RegOrConstant(Register Reg) : Reg(Reg), IsReg(true) {}
358   explicit RegOrConstant(int64_t Cst) : Cst(Cst), IsReg(false) {}
359   bool isReg() const { return IsReg; }
360   bool isCst() const { return !IsReg; }
361   Register getReg() const {
362     assert(isReg() && "Expected a register!");
363     return Reg;
364   }
365   int64_t getCst() const {
366     assert(isCst() && "Expected a constant!");
367     return Cst;
368   }
369 };
370 
371 /// \returns The splat index of a G_SHUFFLE_VECTOR \p MI when \p MI is a splat.
372 /// If \p MI is not a splat, returns None.
373 Optional<int> getSplatIndex(MachineInstr &MI);
374 
375 /// \returns the scalar integral splat value of \p Reg if possible.
376 Optional<APInt> getIConstantSplatVal(const Register Reg,
377                                      const MachineRegisterInfo &MRI);
378 
379 /// \returns the scalar integral splat value defined by \p MI if possible.
380 Optional<APInt> getIConstantSplatVal(const MachineInstr &MI,
381                                      const MachineRegisterInfo &MRI);
382 
383 /// \returns the scalar sign extended integral splat value of \p Reg if
384 /// possible.
385 Optional<int64_t> getIConstantSplatSExtVal(const Register Reg,
386                                            const MachineRegisterInfo &MRI);
387 
388 /// \returns the scalar sign extended integral splat value defined by \p MI if
389 /// possible.
390 Optional<int64_t> getIConstantSplatSExtVal(const MachineInstr &MI,
391                                            const MachineRegisterInfo &MRI);
392 
393 /// Returns a floating point scalar constant of a build vector splat if it
394 /// exists. When \p AllowUndef == true some elements can be undef but not all.
395 Optional<FPValueAndVReg> getFConstantSplat(Register VReg,
396                                            const MachineRegisterInfo &MRI,
397                                            bool AllowUndef = true);
398 
399 /// Return true if the specified register is defined by G_BUILD_VECTOR or
400 /// G_BUILD_VECTOR_TRUNC where all of the elements are \p SplatValue or undef.
401 bool isBuildVectorConstantSplat(const Register Reg,
402                                 const MachineRegisterInfo &MRI,
403                                 int64_t SplatValue, bool AllowUndef);
404 
405 /// Return true if the specified instruction is a G_BUILD_VECTOR or
406 /// G_BUILD_VECTOR_TRUNC where all of the elements are \p SplatValue or undef.
407 bool isBuildVectorConstantSplat(const MachineInstr &MI,
408                                 const MachineRegisterInfo &MRI,
409                                 int64_t SplatValue, bool AllowUndef);
410 
411 /// Return true if the specified instruction is a G_BUILD_VECTOR or
412 /// G_BUILD_VECTOR_TRUNC where all of the elements are 0 or undef.
413 bool isBuildVectorAllZeros(const MachineInstr &MI,
414                            const MachineRegisterInfo &MRI,
415                            bool AllowUndef = false);
416 
417 /// Return true if the specified instruction is a G_BUILD_VECTOR or
418 /// G_BUILD_VECTOR_TRUNC where all of the elements are ~0 or undef.
419 bool isBuildVectorAllOnes(const MachineInstr &MI,
420                           const MachineRegisterInfo &MRI,
421                           bool AllowUndef = false);
422 
423 /// Return true if the specified instruction is known to be a constant, or a
424 /// vector of constants.
425 ///
426 /// If \p AllowFP is true, this will consider G_FCONSTANT in addition to
427 /// G_CONSTANT. If \p AllowOpaqueConstants is true, constant-like instructions
428 /// such as G_GLOBAL_VALUE will also be considered.
429 bool isConstantOrConstantVector(const MachineInstr &MI,
430                                 const MachineRegisterInfo &MRI,
431                                 bool AllowFP = true,
432                                 bool AllowOpaqueConstants = true);
433 
434 /// Return true if the value is a constant 0 integer or a splatted vector of a
435 /// constant 0 integer (with no undefs if \p AllowUndefs is false). This will
436 /// handle G_BUILD_VECTOR and G_BUILD_VECTOR_TRUNC as truncation is not an issue
437 /// for null values.
438 bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI,
439                        bool AllowUndefs = false);
440 
441 /// Return true if the value is a constant -1 integer or a splatted vector of a
442 /// constant -1 integer (with no undefs if \p AllowUndefs is false).
443 bool isAllOnesOrAllOnesSplat(const MachineInstr &MI,
444                              const MachineRegisterInfo &MRI,
445                              bool AllowUndefs = false);
446 
447 /// \returns a value when \p MI is a vector splat. The splat can be either a
448 /// Register or a constant.
449 ///
450 /// Examples:
451 ///
452 /// \code
453 ///   %reg = COPY $physreg
454 ///   %reg_splat = G_BUILD_VECTOR %reg, %reg, ..., %reg
455 /// \endcode
456 ///
457 /// If called on the G_BUILD_VECTOR above, this will return a RegOrConstant
458 /// containing %reg.
459 ///
460 /// \code
461 ///   %cst = G_CONSTANT iN 4
462 ///   %constant_splat = G_BUILD_VECTOR %cst, %cst, ..., %cst
463 /// \endcode
464 ///
465 /// In the above case, this will return a RegOrConstant containing 4.
466 Optional<RegOrConstant> getVectorSplat(const MachineInstr &MI,
467                                        const MachineRegisterInfo &MRI);
468 
469 /// Determines if \p MI defines a constant integer or a build vector of
470 /// constant integers. Treats undef values as constants.
471 bool isConstantOrConstantVector(MachineInstr &MI,
472                                 const MachineRegisterInfo &MRI);
473 
474 /// Determines if \p MI defines a constant integer or a splat vector of
475 /// constant integers.
476 /// \returns the scalar constant or None.
477 Optional<APInt> isConstantOrConstantSplatVector(MachineInstr &MI,
478                                                 const MachineRegisterInfo &MRI);
479 
480 /// Attempt to match a unary predicate against a scalar/splat constant or every
481 /// element of a constant G_BUILD_VECTOR. If \p ConstVal is null, the source
482 /// value was undef.
483 bool matchUnaryPredicate(const MachineRegisterInfo &MRI, Register Reg,
484                          std::function<bool(const Constant *ConstVal)> Match,
485                          bool AllowUndefs = false);
486 
487 /// Returns true if given the TargetLowering's boolean contents information,
488 /// the value \p Val contains a true value.
489 bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
490                     bool IsFP);
491 
492 /// Returns an integer representing true, as defined by the
493 /// TargetBooleanContents.
494 int64_t getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool IsFP);
495 
496 /// Returns true if the given block should be optimized for size.
497 bool shouldOptForSize(const MachineBasicBlock &MBB, ProfileSummaryInfo *PSI,
498                       BlockFrequencyInfo *BFI);
499 
500 using SmallInstListTy = GISelWorkList<4>;
501 void saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI,
502                       LostDebugLocObserver *LocObserver,
503                       SmallInstListTy &DeadInstChain);
504 void eraseInstrs(ArrayRef<MachineInstr *> DeadInstrs, MachineRegisterInfo &MRI,
505                  LostDebugLocObserver *LocObserver = nullptr);
506 void eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI,
507                 LostDebugLocObserver *LocObserver = nullptr);
508 
509 } // End namespace llvm.
510 #endif
511