1 //===- FastISel.h - Definition of the FastISel class ------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file defines the FastISel class.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_CODEGEN_FASTISEL_H
15 #define LLVM_CODEGEN_FASTISEL_H
16 
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/StringRef.h"
20 #include "llvm/CodeGen/MachineBasicBlock.h"
21 #include "llvm/CodeGen/TargetLowering.h"
22 #include "llvm/IR/Attributes.h"
23 #include "llvm/IR/CallSite.h"
24 #include "llvm/IR/CallingConv.h"
25 #include "llvm/IR/DebugLoc.h"
26 #include "llvm/IR/DerivedTypes.h"
27 #include "llvm/IR/InstrTypes.h"
28 #include "llvm/IR/IntrinsicInst.h"
29 #include "llvm/Support/MachineValueType.h"
30 #include <algorithm>
31 #include <cstdint>
32 #include <utility>
33 
34 namespace llvm {
35 
36 class AllocaInst;
37 class BasicBlock;
38 class CallInst;
39 class Constant;
40 class ConstantFP;
41 class DataLayout;
42 class FunctionLoweringInfo;
43 class LoadInst;
44 class MachineConstantPool;
45 class MachineFrameInfo;
46 class MachineFunction;
47 class MachineInstr;
48 class MachineMemOperand;
49 class MachineOperand;
50 class MachineRegisterInfo;
51 class MCContext;
52 class MCInstrDesc;
53 class MCSymbol;
54 class TargetInstrInfo;
55 class TargetLibraryInfo;
56 class TargetMachine;
57 class TargetRegisterClass;
58 class TargetRegisterInfo;
59 class Type;
60 class User;
61 class Value;
62 
63 /// This is a fast-path instruction selection class that generates poor
64 /// code and doesn't support illegal types or non-trivial lowering, but runs
65 /// quickly.
66 class FastISel {
67 public:
68   using ArgListEntry = TargetLoweringBase::ArgListEntry;
69   using ArgListTy = TargetLoweringBase::ArgListTy;
70   struct CallLoweringInfo {
71     Type *RetTy = nullptr;
72     bool RetSExt : 1;
73     bool RetZExt : 1;
74     bool IsVarArg : 1;
75     bool IsInReg : 1;
76     bool DoesNotReturn : 1;
77     bool IsReturnValueUsed : 1;
78     bool IsPatchPoint : 1;
79 
80     // IsTailCall Should be modified by implementations of FastLowerCall
81     // that perform tail call conversions.
82     bool IsTailCall = false;
83 
84     unsigned NumFixedArgs = -1;
85     CallingConv::ID CallConv = CallingConv::C;
86     const Value *Callee = nullptr;
87     MCSymbol *Symbol = nullptr;
88     ArgListTy Args;
89     ImmutableCallSite *CS = nullptr;
90     MachineInstr *Call = nullptr;
91     unsigned ResultReg = 0;
92     unsigned NumResultRegs = 0;
93 
94     SmallVector<Value *, 16> OutVals;
95     SmallVector<ISD::ArgFlagsTy, 16> OutFlags;
96     SmallVector<Register, 16> OutRegs;
97     SmallVector<ISD::InputArg, 4> Ins;
98     SmallVector<Register, 4> InRegs;
99 
100     CallLoweringInfo()
101         : RetSExt(false), RetZExt(false), IsVarArg(false), IsInReg(false),
102           DoesNotReturn(false), IsReturnValueUsed(true), IsPatchPoint(false) {}
103 
104     CallLoweringInfo &setCallee(Type *ResultTy, FunctionType *FuncTy,
105                                 const Value *Target, ArgListTy &&ArgsList,
106                                 ImmutableCallSite &Call) {
107       RetTy = ResultTy;
108       Callee = Target;
109 
110       IsInReg = Call.hasRetAttr(Attribute::InReg);
111       DoesNotReturn = Call.doesNotReturn();
112       IsVarArg = FuncTy->isVarArg();
113       IsReturnValueUsed = !Call.getInstruction()->use_empty();
114       RetSExt = Call.hasRetAttr(Attribute::SExt);
115       RetZExt = Call.hasRetAttr(Attribute::ZExt);
116 
117       CallConv = Call.getCallingConv();
118       Args = std::move(ArgsList);
119       NumFixedArgs = FuncTy->getNumParams();
120 
121       CS = &Call;
122 
123       return *this;
124     }
125 
126     CallLoweringInfo &setCallee(Type *ResultTy, FunctionType *FuncTy,
127                                 MCSymbol *Target, ArgListTy &&ArgsList,
128                                 ImmutableCallSite &Call,
129                                 unsigned FixedArgs = ~0U) {
130       RetTy = ResultTy;
131       Callee = Call.getCalledValue();
132       Symbol = Target;
133 
134       IsInReg = Call.hasRetAttr(Attribute::InReg);
135       DoesNotReturn = Call.doesNotReturn();
136       IsVarArg = FuncTy->isVarArg();
137       IsReturnValueUsed = !Call.getInstruction()->use_empty();
138       RetSExt = Call.hasRetAttr(Attribute::SExt);
139       RetZExt = Call.hasRetAttr(Attribute::ZExt);
140 
141       CallConv = Call.getCallingConv();
142       Args = std::move(ArgsList);
143       NumFixedArgs = (FixedArgs == ~0U) ? FuncTy->getNumParams() : FixedArgs;
144 
145       CS = &Call;
146 
147       return *this;
148     }
149 
150     CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultTy,
151                                 const Value *Target, ArgListTy &&ArgsList,
152                                 unsigned FixedArgs = ~0U) {
153       RetTy = ResultTy;
154       Callee = Target;
155       CallConv = CC;
156       Args = std::move(ArgsList);
157       NumFixedArgs = (FixedArgs == ~0U) ? Args.size() : FixedArgs;
158       return *this;
159     }
160 
161     CallLoweringInfo &setCallee(const DataLayout &DL, MCContext &Ctx,
162                                 CallingConv::ID CC, Type *ResultTy,
163                                 StringRef Target, ArgListTy &&ArgsList,
164                                 unsigned FixedArgs = ~0U);
165 
166     CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultTy,
167                                 MCSymbol *Target, ArgListTy &&ArgsList,
168                                 unsigned FixedArgs = ~0U) {
169       RetTy = ResultTy;
170       Symbol = Target;
171       CallConv = CC;
172       Args = std::move(ArgsList);
173       NumFixedArgs = (FixedArgs == ~0U) ? Args.size() : FixedArgs;
174       return *this;
175     }
176 
177     CallLoweringInfo &setTailCall(bool Value = true) {
178       IsTailCall = Value;
179       return *this;
180     }
181 
182     CallLoweringInfo &setIsPatchPoint(bool Value = true) {
183       IsPatchPoint = Value;
184       return *this;
185     }
186 
187     ArgListTy &getArgs() { return Args; }
188 
189     void clearOuts() {
190       OutVals.clear();
191       OutFlags.clear();
192       OutRegs.clear();
193     }
194 
195     void clearIns() {
196       Ins.clear();
197       InRegs.clear();
198     }
199   };
200 
201 protected:
202   DenseMap<const Value *, unsigned> LocalValueMap;
203   FunctionLoweringInfo &FuncInfo;
204   MachineFunction *MF;
205   MachineRegisterInfo &MRI;
206   MachineFrameInfo &MFI;
207   MachineConstantPool &MCP;
208   DebugLoc DbgLoc;
209   const TargetMachine &TM;
210   const DataLayout &DL;
211   const TargetInstrInfo &TII;
212   const TargetLowering &TLI;
213   const TargetRegisterInfo &TRI;
214   const TargetLibraryInfo *LibInfo;
215   bool SkipTargetIndependentISel;
216 
217   /// The position of the last instruction for materializing constants
218   /// for use in the current block. It resets to EmitStartPt when it makes sense
219   /// (for example, it's usually profitable to avoid function calls between the
220   /// definition and the use)
221   MachineInstr *LastLocalValue;
222 
223   /// The top most instruction in the current block that is allowed for
224   /// emitting local variables. LastLocalValue resets to EmitStartPt when it
225   /// makes sense (for example, on function calls)
226   MachineInstr *EmitStartPt;
227 
228   /// Last local value flush point. On a subsequent flush, no local value will
229   /// sink past this point.
230   MachineBasicBlock::iterator LastFlushPoint;
231 
232 public:
233   virtual ~FastISel();
234 
235   /// Return the position of the last instruction emitted for
236   /// materializing constants for use in the current block.
237   MachineInstr *getLastLocalValue() { return LastLocalValue; }
238 
239   /// Update the position of the last instruction emitted for
240   /// materializing constants for use in the current block.
241   void setLastLocalValue(MachineInstr *I) {
242     EmitStartPt = I;
243     LastLocalValue = I;
244   }
245 
246   /// Set the current block to which generated machine instructions will
247   /// be appended.
248   void startNewBlock();
249 
250   /// Flush the local value map and sink local values if possible.
251   void finishBasicBlock();
252 
253   /// Return current debug location information.
254   DebugLoc getCurDebugLoc() const { return DbgLoc; }
255 
256   /// Do "fast" instruction selection for function arguments and append
257   /// the machine instructions to the current block. Returns true when
258   /// successful.
259   bool lowerArguments();
260 
261   /// Do "fast" instruction selection for the given LLVM IR instruction
262   /// and append the generated machine instructions to the current block.
263   /// Returns true if selection was successful.
264   bool selectInstruction(const Instruction *I);
265 
266   /// Do "fast" instruction selection for the given LLVM IR operator
267   /// (Instruction or ConstantExpr), and append generated machine instructions
268   /// to the current block. Return true if selection was successful.
269   bool selectOperator(const User *I, unsigned Opcode);
270 
271   /// Create a virtual register and arrange for it to be assigned the
272   /// value for the given LLVM value.
273   unsigned getRegForValue(const Value *V);
274 
275   /// Look up the value to see if its value is already cached in a
276   /// register. It may be defined by instructions across blocks or defined
277   /// locally.
278   unsigned lookUpRegForValue(const Value *V);
279 
280   /// This is a wrapper around getRegForValue that also takes care of
281   /// truncating or sign-extending the given getelementptr index value.
282   std::pair<unsigned, bool> getRegForGEPIndex(const Value *Idx);
283 
284   /// We're checking to see if we can fold \p LI into \p FoldInst. Note
285   /// that we could have a sequence where multiple LLVM IR instructions are
286   /// folded into the same machineinstr.  For example we could have:
287   ///
288   ///   A: x = load i32 *P
289   ///   B: y = icmp A, 42
290   ///   C: br y, ...
291   ///
292   /// In this scenario, \p LI is "A", and \p FoldInst is "C".  We know about "B"
293   /// (and any other folded instructions) because it is between A and C.
294   ///
295   /// If we succeed folding, return true.
296   bool tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst);
297 
298   /// The specified machine instr operand is a vreg, and that vreg is
299   /// being provided by the specified load instruction.  If possible, try to
300   /// fold the load as an operand to the instruction, returning true if
301   /// possible.
302   ///
303   /// This method should be implemented by targets.
304   virtual bool tryToFoldLoadIntoMI(MachineInstr * /*MI*/, unsigned /*OpNo*/,
305                                    const LoadInst * /*LI*/) {
306     return false;
307   }
308 
309   /// Reset InsertPt to prepare for inserting instructions into the
310   /// current block.
311   void recomputeInsertPt();
312 
313   /// Remove all dead instructions between the I and E.
314   void removeDeadCode(MachineBasicBlock::iterator I,
315                       MachineBasicBlock::iterator E);
316 
317   struct SavePoint {
318     MachineBasicBlock::iterator InsertPt;
319     DebugLoc DL;
320   };
321 
322   /// Prepare InsertPt to begin inserting instructions into the local
323   /// value area and return the old insert position.
324   SavePoint enterLocalValueArea();
325 
326   /// Reset InsertPt to the given old insert position.
327   void leaveLocalValueArea(SavePoint Old);
328 
329 protected:
330   explicit FastISel(FunctionLoweringInfo &FuncInfo,
331                     const TargetLibraryInfo *LibInfo,
332                     bool SkipTargetIndependentISel = false);
333 
334   /// This method is called by target-independent code when the normal
335   /// FastISel process fails to select an instruction. This gives targets a
336   /// chance to emit code for anything that doesn't fit into FastISel's
337   /// framework. It returns true if it was successful.
338   virtual bool fastSelectInstruction(const Instruction *I) = 0;
339 
340   /// This method is called by target-independent code to do target-
341   /// specific argument lowering. It returns true if it was successful.
342   virtual bool fastLowerArguments();
343 
344   /// This method is called by target-independent code to do target-
345   /// specific call lowering. It returns true if it was successful.
346   virtual bool fastLowerCall(CallLoweringInfo &CLI);
347 
348   /// This method is called by target-independent code to do target-
349   /// specific intrinsic lowering. It returns true if it was successful.
350   virtual bool fastLowerIntrinsicCall(const IntrinsicInst *II);
351 
352   /// This method is called by target-independent code to request that an
353   /// instruction with the given type and opcode be emitted.
354   virtual unsigned fastEmit_(MVT VT, MVT RetVT, unsigned Opcode);
355 
356   /// This method is called by target-independent code to request that an
357   /// instruction with the given type, opcode, and register operand be emitted.
358   virtual unsigned fastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
359                               bool Op0IsKill);
360 
361   /// This method is called by target-independent code to request that an
362   /// instruction with the given type, opcode, and register operands be emitted.
363   virtual unsigned fastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
364                                bool Op0IsKill, unsigned Op1, bool Op1IsKill);
365 
366   /// This method is called by target-independent code to request that an
367   /// instruction with the given type, opcode, and register and immediate
368   /// operands be emitted.
369   virtual unsigned fastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
370                                bool Op0IsKill, uint64_t Imm);
371 
372   /// This method is a wrapper of fastEmit_ri.
373   ///
374   /// It first tries to emit an instruction with an immediate operand using
375   /// fastEmit_ri.  If that fails, it materializes the immediate into a register
376   /// and try fastEmit_rr instead.
377   unsigned fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, bool Op0IsKill,
378                         uint64_t Imm, MVT ImmType);
379 
380   /// This method is called by target-independent code to request that an
381   /// instruction with the given type, opcode, and immediate operand be emitted.
382   virtual unsigned fastEmit_i(MVT VT, MVT RetVT, unsigned Opcode, uint64_t Imm);
383 
384   /// This method is called by target-independent code to request that an
385   /// instruction with the given type, opcode, and floating-point immediate
386   /// operand be emitted.
387   virtual unsigned fastEmit_f(MVT VT, MVT RetVT, unsigned Opcode,
388                               const ConstantFP *FPImm);
389 
390   /// Emit a MachineInstr with no operands and a result register in the
391   /// given register class.
392   unsigned fastEmitInst_(unsigned MachineInstOpcode,
393                          const TargetRegisterClass *RC);
394 
395   /// Emit a MachineInstr with one register operand and a result register
396   /// in the given register class.
397   unsigned fastEmitInst_r(unsigned MachineInstOpcode,
398                           const TargetRegisterClass *RC, unsigned Op0,
399                           bool Op0IsKill);
400 
401   /// Emit a MachineInstr with two register operands and a result
402   /// register in the given register class.
403   unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
404                            const TargetRegisterClass *RC, unsigned Op0,
405                            bool Op0IsKill, unsigned Op1, bool Op1IsKill);
406 
407   /// Emit a MachineInstr with three register operands and a result
408   /// register in the given register class.
409   unsigned fastEmitInst_rrr(unsigned MachineInstOpcode,
410                             const TargetRegisterClass *RC, unsigned Op0,
411                             bool Op0IsKill, unsigned Op1, bool Op1IsKill,
412                             unsigned Op2, bool Op2IsKill);
413 
414   /// Emit a MachineInstr with a register operand, an immediate, and a
415   /// result register in the given register class.
416   unsigned fastEmitInst_ri(unsigned MachineInstOpcode,
417                            const TargetRegisterClass *RC, unsigned Op0,
418                            bool Op0IsKill, uint64_t Imm);
419 
420   /// Emit a MachineInstr with one register operand and two immediate
421   /// operands.
422   unsigned fastEmitInst_rii(unsigned MachineInstOpcode,
423                             const TargetRegisterClass *RC, unsigned Op0,
424                             bool Op0IsKill, uint64_t Imm1, uint64_t Imm2);
425 
426   /// Emit a MachineInstr with a floating point immediate, and a result
427   /// register in the given register class.
428   unsigned fastEmitInst_f(unsigned MachineInstOpcode,
429                           const TargetRegisterClass *RC,
430                           const ConstantFP *FPImm);
431 
432   /// Emit a MachineInstr with two register operands, an immediate, and a
433   /// result register in the given register class.
434   unsigned fastEmitInst_rri(unsigned MachineInstOpcode,
435                             const TargetRegisterClass *RC, unsigned Op0,
436                             bool Op0IsKill, unsigned Op1, bool Op1IsKill,
437                             uint64_t Imm);
438 
439   /// Emit a MachineInstr with a single immediate operand, and a result
440   /// register in the given register class.
441   unsigned fastEmitInst_i(unsigned MachineInstOpcode,
442                           const TargetRegisterClass *RC, uint64_t Imm);
443 
444   /// Emit a MachineInstr for an extract_subreg from a specified index of
445   /// a superregister to a specified type.
446   unsigned fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, bool Op0IsKill,
447                                       uint32_t Idx);
448 
449   /// Emit MachineInstrs to compute the value of Op with all but the
450   /// least significant bit set to zero.
451   unsigned fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill);
452 
453   /// Emit an unconditional branch to the given block, unless it is the
454   /// immediate (fall-through) successor, and update the CFG.
455   void fastEmitBranch(MachineBasicBlock *MSucc, const DebugLoc &DbgLoc);
456 
457   /// Emit an unconditional branch to \p FalseMBB, obtains the branch weight
458   /// and adds TrueMBB and FalseMBB to the successor list.
459   void finishCondBranch(const BasicBlock *BranchBB, MachineBasicBlock *TrueMBB,
460                         MachineBasicBlock *FalseMBB);
461 
462   /// Update the value map to include the new mapping for this
463   /// instruction, or insert an extra copy to get the result in a previous
464   /// determined register.
465   ///
466   /// NOTE: This is only necessary because we might select a block that uses a
467   /// value before we select the block that defines the value. It might be
468   /// possible to fix this by selecting blocks in reverse postorder.
469   void updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs = 1);
470 
471   unsigned createResultReg(const TargetRegisterClass *RC);
472 
473   /// Try to constrain Op so that it is usable by argument OpNum of the
474   /// provided MCInstrDesc. If this fails, create a new virtual register in the
475   /// correct class and COPY the value there.
476   unsigned constrainOperandRegClass(const MCInstrDesc &II, unsigned Op,
477                                     unsigned OpNum);
478 
479   /// Emit a constant in a register using target-specific logic, such as
480   /// constant pool loads.
481   virtual unsigned fastMaterializeConstant(const Constant *C) { return 0; }
482 
483   /// Emit an alloca address in a register using target-specific logic.
484   virtual unsigned fastMaterializeAlloca(const AllocaInst *C) { return 0; }
485 
486   /// Emit the floating-point constant +0.0 in a register using target-
487   /// specific logic.
488   virtual unsigned fastMaterializeFloatZero(const ConstantFP *CF) {
489     return 0;
490   }
491 
492   /// Check if \c Add is an add that can be safely folded into \c GEP.
493   ///
494   /// \c Add can be folded into \c GEP if:
495   /// - \c Add is an add,
496   /// - \c Add's size matches \c GEP's,
497   /// - \c Add is in the same basic block as \c GEP, and
498   /// - \c Add has a constant operand.
499   bool canFoldAddIntoGEP(const User *GEP, const Value *Add);
500 
501   /// Test whether the given value has exactly one use.
502   bool hasTrivialKill(const Value *V);
503 
504   /// Create a machine mem operand from the given instruction.
505   MachineMemOperand *createMachineMemOperandFor(const Instruction *I) const;
506 
507   CmpInst::Predicate optimizeCmpPredicate(const CmpInst *CI) const;
508 
509   bool lowerCallTo(const CallInst *CI, MCSymbol *Symbol, unsigned NumArgs);
510   bool lowerCallTo(const CallInst *CI, const char *SymName,
511                    unsigned NumArgs);
512   bool lowerCallTo(CallLoweringInfo &CLI);
513 
514   bool isCommutativeIntrinsic(IntrinsicInst const *II) {
515     switch (II->getIntrinsicID()) {
516     case Intrinsic::sadd_with_overflow:
517     case Intrinsic::uadd_with_overflow:
518     case Intrinsic::smul_with_overflow:
519     case Intrinsic::umul_with_overflow:
520       return true;
521     default:
522       return false;
523     }
524   }
525 
526   bool lowerCall(const CallInst *I);
527   /// Select and emit code for a binary operator instruction, which has
528   /// an opcode which directly corresponds to the given ISD opcode.
529   bool selectBinaryOp(const User *I, unsigned ISDOpcode);
530   bool selectFNeg(const User *I, const Value *In);
531   bool selectGetElementPtr(const User *I);
532   bool selectStackmap(const CallInst *I);
533   bool selectPatchpoint(const CallInst *I);
534   bool selectCall(const User *I);
535   bool selectIntrinsicCall(const IntrinsicInst *II);
536   bool selectBitCast(const User *I);
537   bool selectCast(const User *I, unsigned Opcode);
538   bool selectExtractValue(const User *U);
539   bool selectInsertValue(const User *I);
540   bool selectXRayCustomEvent(const CallInst *II);
541   bool selectXRayTypedEvent(const CallInst *II);
542 
543   bool shouldOptForSize(const MachineFunction *MF) const {
544     // TODO: Implement PGSO.
545     return MF->getFunction().hasOptSize();
546   }
547 
548 private:
549   /// Handle PHI nodes in successor blocks.
550   ///
551   /// Emit code to ensure constants are copied into registers when needed.
552   /// Remember the virtual registers that need to be added to the Machine PHI
553   /// nodes as input.  We cannot just directly add them, because expansion might
554   /// result in multiple MBB's for one BB.  As such, the start of the BB might
555   /// correspond to a different MBB than the end.
556   bool handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB);
557 
558   /// Helper for materializeRegForValue to materialize a constant in a
559   /// target-independent way.
560   unsigned materializeConstant(const Value *V, MVT VT);
561 
562   /// Helper for getRegForVale. This function is called when the value
563   /// isn't already available in a register and must be materialized with new
564   /// instructions.
565   unsigned materializeRegForValue(const Value *V, MVT VT);
566 
567   /// Clears LocalValueMap and moves the area for the new local variables
568   /// to the beginning of the block. It helps to avoid spilling cached variables
569   /// across heavy instructions like calls.
570   void flushLocalValueMap();
571 
572   /// Removes dead local value instructions after SavedLastLocalvalue.
573   void removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue);
574 
575   struct InstOrderMap {
576     DenseMap<MachineInstr *, unsigned> Orders;
577     MachineInstr *FirstTerminator = nullptr;
578     unsigned FirstTerminatorOrder = std::numeric_limits<unsigned>::max();
579 
580     void initialize(MachineBasicBlock *MBB,
581                     MachineBasicBlock::iterator LastFlushPoint);
582   };
583 
584   /// Sinks the local value materialization instruction LocalMI to its first use
585   /// in the basic block, or deletes it if it is not used.
586   void sinkLocalValueMaterialization(MachineInstr &LocalMI, unsigned DefReg,
587                                      InstOrderMap &OrderMap);
588 
589   /// Insertion point before trying to select the current instruction.
590   MachineBasicBlock::iterator SavedInsertPt;
591 
592   /// Add a stackmap or patchpoint intrinsic call's live variable
593   /// operands to a stackmap or patchpoint machine instruction.
594   bool addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
595                            const CallInst *CI, unsigned StartIdx);
596   bool lowerCallOperands(const CallInst *CI, unsigned ArgIdx, unsigned NumArgs,
597                          const Value *Callee, bool ForceRetVoidTy,
598                          CallLoweringInfo &CLI);
599 };
600 
601 } // end namespace llvm
602 
603 #endif // LLVM_CODEGEN_FASTISEL_H
604