1 //==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that AArch64 uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
15 #define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
16 
17 #include "AArch64.h"
18 #include "llvm/CodeGen/CallingConvLower.h"
19 #include "llvm/CodeGen/SelectionDAG.h"
20 #include "llvm/CodeGen/TargetLowering.h"
21 #include "llvm/IR/CallingConv.h"
22 #include "llvm/IR/Instruction.h"
23 
24 namespace llvm {
25 
26 namespace AArch64ISD {
27 
28 enum NodeType : unsigned {
29   FIRST_NUMBER = ISD::BUILTIN_OP_END,
30   WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
31   CALL,         // Function call.
32 
33   // Produces the full sequence of instructions for getting the thread pointer
34   // offset of a variable into X0, using the TLSDesc model.
35   TLSDESC_CALLSEQ,
36   ADRP,     // Page address of a TargetGlobalAddress operand.
37   ADR,      // ADR
38   ADDlow,   // Add the low 12 bits of a TargetGlobalAddress operand.
39   LOADgot,  // Load from automatically generated descriptor (e.g. Global
40             // Offset Table, TLS record).
41   RET_FLAG, // Return with a flag operand. Operand 0 is the chain operand.
42   BRCOND,   // Conditional branch instruction; "b.cond".
43   CSEL,
44   FCSEL, // Conditional move instruction.
45   CSINV, // Conditional select invert.
46   CSNEG, // Conditional select negate.
47   CSINC, // Conditional select increment.
48 
49   // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
50   // ELF.
51   THREAD_POINTER,
52   ADC,
53   SBC, // adc, sbc instructions
54 
55   // Arithmetic instructions which write flags.
56   ADDS,
57   SUBS,
58   ADCS,
59   SBCS,
60   ANDS,
61 
62   // Conditional compares. Operands: left,right,falsecc,cc,flags
63   CCMP,
64   CCMN,
65   FCCMP,
66 
67   // Floating point comparison
68   FCMP,
69 
70   // Scalar extract
71   EXTR,
72 
73   // Scalar-to-vector duplication
74   DUP,
75   DUPLANE8,
76   DUPLANE16,
77   DUPLANE32,
78   DUPLANE64,
79 
80   // Vector immedate moves
81   MOVI,
82   MOVIshift,
83   MOVIedit,
84   MOVImsl,
85   FMOV,
86   MVNIshift,
87   MVNImsl,
88 
89   // Vector immediate ops
90   BICi,
91   ORRi,
92 
93   // Vector bit select: similar to ISD::VSELECT but not all bits within an
94   // element must be identical.
95   BSL,
96 
97   // Vector arithmetic negation
98   NEG,
99 
100   // Vector shuffles
101   ZIP1,
102   ZIP2,
103   UZP1,
104   UZP2,
105   TRN1,
106   TRN2,
107   REV16,
108   REV32,
109   REV64,
110   EXT,
111 
112   // Vector shift by scalar
113   VSHL,
114   VLSHR,
115   VASHR,
116 
117   // Vector shift by scalar (again)
118   SQSHL_I,
119   UQSHL_I,
120   SQSHLU_I,
121   SRSHR_I,
122   URSHR_I,
123 
124   // Vector comparisons
125   CMEQ,
126   CMGE,
127   CMGT,
128   CMHI,
129   CMHS,
130   FCMEQ,
131   FCMGE,
132   FCMGT,
133 
134   // Vector zero comparisons
135   CMEQz,
136   CMGEz,
137   CMGTz,
138   CMLEz,
139   CMLTz,
140   FCMEQz,
141   FCMGEz,
142   FCMGTz,
143   FCMLEz,
144   FCMLTz,
145 
146   // Vector across-lanes addition
147   // Only the lower result lane is defined.
148   SADDV,
149   UADDV,
150 
151   // Vector across-lanes min/max
152   // Only the lower result lane is defined.
153   SMINV,
154   UMINV,
155   SMAXV,
156   UMAXV,
157 
158   SMAXV_PRED,
159   UMAXV_PRED,
160   SMINV_PRED,
161   UMINV_PRED,
162   ORV_PRED,
163   EORV_PRED,
164   ANDV_PRED,
165 
166   // Vector bitwise negation
167   NOT,
168 
169   // Vector bitwise selection
170   BIT,
171 
172   // Compare-and-branch
173   CBZ,
174   CBNZ,
175   TBZ,
176   TBNZ,
177 
178   // Tail calls
179   TC_RETURN,
180 
181   // Custom prefetch handling
182   PREFETCH,
183 
184   // {s|u}int to FP within a FP register.
185   SITOF,
186   UITOF,
187 
188   /// Natural vector cast. ISD::BITCAST is not natural in the big-endian
189   /// world w.r.t vectors; which causes additional REV instructions to be
190   /// generated to compensate for the byte-swapping. But sometimes we do
191   /// need to re-interpret the data in SIMD vector registers in big-endian
192   /// mode without emitting such REV instructions.
193   NVCAST,
194 
195   SMULL,
196   UMULL,
197 
198   // Reciprocal estimates and steps.
199   FRECPE, FRECPS,
200   FRSQRTE, FRSQRTS,
201 
202   SUNPKHI,
203   SUNPKLO,
204   UUNPKHI,
205   UUNPKLO,
206 
207   CLASTA_N,
208   CLASTB_N,
209   LASTA,
210   LASTB,
211   REV,
212   TBL,
213 
214   INSR,
215   PTEST,
216   PTRUE,
217 
218   // Unsigned gather loads.
219   GLD1,
220   GLD1_SCALED,
221   GLD1_UXTW,
222   GLD1_SXTW,
223   GLD1_UXTW_SCALED,
224   GLD1_SXTW_SCALED,
225   GLD1_IMM,
226 
227   // Signed gather loads
228   GLD1S,
229   GLD1S_SCALED,
230   GLD1S_UXTW,
231   GLD1S_SXTW,
232   GLD1S_UXTW_SCALED,
233   GLD1S_SXTW_SCALED,
234   GLD1S_IMM,
235   // Scatter store
236   SST1,
237   SST1_SCALED,
238   SST1_UXTW,
239   SST1_SXTW,
240   SST1_UXTW_SCALED,
241   SST1_SXTW_SCALED,
242   SST1_IMM,
243 
244   // Strict (exception-raising) floating point comparison
245   STRICT_FCMP = ISD::FIRST_TARGET_STRICTFP_OPCODE,
246   STRICT_FCMPE,
247 
248   // NEON Load/Store with post-increment base updates
249   LD2post = ISD::FIRST_TARGET_MEMORY_OPCODE,
250   LD3post,
251   LD4post,
252   ST2post,
253   ST3post,
254   ST4post,
255   LD1x2post,
256   LD1x3post,
257   LD1x4post,
258   ST1x2post,
259   ST1x3post,
260   ST1x4post,
261   LD1DUPpost,
262   LD2DUPpost,
263   LD3DUPpost,
264   LD4DUPpost,
265   LD1LANEpost,
266   LD2LANEpost,
267   LD3LANEpost,
268   LD4LANEpost,
269   ST2LANEpost,
270   ST3LANEpost,
271   ST4LANEpost,
272 
273   STG,
274   STZG,
275   ST2G,
276   STZ2G,
277 
278   LDP,
279   STP
280 };
281 
282 } // end namespace AArch64ISD
283 
284 namespace {
285 
286 // Any instruction that defines a 32-bit result zeros out the high half of the
287 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
288 // be copying from a truncate. But any other 32-bit operation will zero-extend
289 // up to 64 bits.
290 // FIXME: X86 also checks for CMOV here. Do we need something similar?
291 static inline bool isDef32(const SDNode &N) {
292   unsigned Opc = N.getOpcode();
293   return Opc != ISD::TRUNCATE && Opc != TargetOpcode::EXTRACT_SUBREG &&
294          Opc != ISD::CopyFromReg;
295 }
296 
297 } // end anonymous namespace
298 
299 class AArch64Subtarget;
300 class AArch64TargetMachine;
301 
302 class AArch64TargetLowering : public TargetLowering {
303 public:
304   explicit AArch64TargetLowering(const TargetMachine &TM,
305                                  const AArch64Subtarget &STI);
306 
307   /// Selects the correct CCAssignFn for a given CallingConvention value.
308   CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
309 
310   /// Selects the correct CCAssignFn for a given CallingConvention value.
311   CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC) const;
312 
313   /// Determine which of the bits specified in Mask are known to be either zero
314   /// or one and return them in the KnownZero/KnownOne bitsets.
315   void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known,
316                                      const APInt &DemandedElts,
317                                      const SelectionDAG &DAG,
318                                      unsigned Depth = 0) const override;
319 
320   MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const override {
321     // Returning i64 unconditionally here (i.e. even for ILP32) means that the
322     // *DAG* representation of pointers will always be 64-bits. They will be
323     // truncated and extended when transferred to memory, but the 64-bit DAG
324     // allows us to use AArch64's addressing modes much more easily.
325     return MVT::getIntegerVT(64);
326   }
327 
328   bool targetShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
329                                     TargetLoweringOpt &TLO) const override;
330 
331   MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
332 
333   /// Returns true if the target allows unaligned memory accesses of the
334   /// specified type.
335   bool allowsMisalignedMemoryAccesses(
336       EVT VT, unsigned AddrSpace = 0, unsigned Align = 1,
337       MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
338       bool *Fast = nullptr) const override;
339   /// LLT variant.
340   bool allowsMisalignedMemoryAccesses(
341     LLT Ty, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags,
342     bool *Fast = nullptr) const override;
343 
344   /// Provide custom lowering hooks for some operations.
345   SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
346 
347   const char *getTargetNodeName(unsigned Opcode) const override;
348 
349   SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
350 
351   /// Returns true if a cast between SrcAS and DestAS is a noop.
352   bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
353     // Addrspacecasts are always noops.
354     return true;
355   }
356 
357   /// This method returns a target specific FastISel object, or null if the
358   /// target does not support "fast" ISel.
359   FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
360                            const TargetLibraryInfo *libInfo) const override;
361 
362   bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
363 
364   bool isFPImmLegal(const APFloat &Imm, EVT VT,
365                     bool ForCodeSize) const override;
366 
367   /// Return true if the given shuffle mask can be codegen'd directly, or if it
368   /// should be stack expanded.
369   bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
370 
371   /// Return the ISD::SETCC ValueType.
372   EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
373                          EVT VT) const override;
374 
375   SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
376 
377   MachineBasicBlock *EmitF128CSEL(MachineInstr &MI,
378                                   MachineBasicBlock *BB) const;
379 
380   MachineBasicBlock *EmitLoweredCatchRet(MachineInstr &MI,
381                                            MachineBasicBlock *BB) const;
382 
383   MachineBasicBlock *EmitLoweredCatchPad(MachineInstr &MI,
384                                          MachineBasicBlock *BB) const;
385 
386   MachineBasicBlock *
387   EmitInstrWithCustomInserter(MachineInstr &MI,
388                               MachineBasicBlock *MBB) const override;
389 
390   bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
391                           MachineFunction &MF,
392                           unsigned Intrinsic) const override;
393 
394   bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
395                              EVT NewVT) const override;
396 
397   bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
398   bool isTruncateFree(EVT VT1, EVT VT2) const override;
399 
400   bool isProfitableToHoist(Instruction *I) const override;
401 
402   bool isZExtFree(Type *Ty1, Type *Ty2) const override;
403   bool isZExtFree(EVT VT1, EVT VT2) const override;
404   bool isZExtFree(SDValue Val, EVT VT2) const override;
405 
406   bool shouldSinkOperands(Instruction *I,
407                           SmallVectorImpl<Use *> &Ops) const override;
408 
409   bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override;
410 
411   unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
412 
413   bool lowerInterleavedLoad(LoadInst *LI,
414                             ArrayRef<ShuffleVectorInst *> Shuffles,
415                             ArrayRef<unsigned> Indices,
416                             unsigned Factor) const override;
417   bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
418                              unsigned Factor) const override;
419 
420   bool isLegalAddImmediate(int64_t) const override;
421   bool isLegalICmpImmediate(int64_t) const override;
422 
423   bool shouldConsiderGEPOffsetSplit() const override;
424 
425   EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
426                           bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
427                           const AttributeList &FuncAttributes) const override;
428 
429   LLT getOptimalMemOpLLT(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
430                           bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
431                           const AttributeList &FuncAttributes) const override;
432 
433   /// Return true if the addressing mode represented by AM is legal for this
434   /// target, for a load/store of the specified type.
435   bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
436                              unsigned AS,
437                              Instruction *I = nullptr) const override;
438 
439   /// Return the cost of the scaling factor used in the addressing
440   /// mode represented by AM for this target, for a load/store
441   /// of the specified type.
442   /// If the AM is supported, the return value must be >= 0.
443   /// If the AM is not supported, it returns a negative value.
444   int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty,
445                            unsigned AS) const override;
446 
447   /// Return true if an FMA operation is faster than a pair of fmul and fadd
448   /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
449   /// returns true, otherwise fmuladd is expanded to fmul + fadd.
450   bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
451                                   EVT VT) const override;
452   bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *Ty) const override;
453 
454   const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
455 
456   /// Returns false if N is a bit extraction pattern of (X >> C) & Mask.
457   bool isDesirableToCommuteWithShift(const SDNode *N,
458                                      CombineLevel Level) const override;
459 
460   /// Returns true if it is beneficial to convert a load of a constant
461   /// to just the constant itself.
462   bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
463                                          Type *Ty) const override;
464 
465   /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
466   /// with this index.
467   bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
468                                unsigned Index) const override;
469 
470   Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
471                         AtomicOrdering Ord) const override;
472   Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
473                               Value *Addr, AtomicOrdering Ord) const override;
474 
475   void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override;
476 
477   TargetLoweringBase::AtomicExpansionKind
478   shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
479   bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
480   TargetLoweringBase::AtomicExpansionKind
481   shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
482 
483   TargetLoweringBase::AtomicExpansionKind
484   shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
485 
486   bool useLoadStackGuardNode() const override;
487   TargetLoweringBase::LegalizeTypeAction
488   getPreferredVectorAction(MVT VT) const override;
489 
490   /// If the target has a standard location for the stack protector cookie,
491   /// returns the address of that location. Otherwise, returns nullptr.
492   Value *getIRStackGuard(IRBuilder<> &IRB) const override;
493 
494   void insertSSPDeclarations(Module &M) const override;
495   Value *getSDagStackGuard(const Module &M) const override;
496   Function *getSSPStackGuardCheck(const Module &M) const override;
497 
498   /// If the target has a standard location for the unsafe stack pointer,
499   /// returns the address of that location. Otherwise, returns nullptr.
500   Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const override;
501 
502   /// If a physical register, this returns the register that receives the
503   /// exception address on entry to an EH pad.
504   unsigned
505   getExceptionPointerRegister(const Constant *PersonalityFn) const override {
506     // FIXME: This is a guess. Has this been defined yet?
507     return AArch64::X0;
508   }
509 
510   /// If a physical register, this returns the register that receives the
511   /// exception typeid on entry to a landing pad.
512   unsigned
513   getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
514     // FIXME: This is a guess. Has this been defined yet?
515     return AArch64::X1;
516   }
517 
518   bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
519 
520   bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
521                         const SelectionDAG &DAG) const override {
522     // Do not merge to float value size (128 bytes) if no implicit
523     // float attribute is set.
524 
525     bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute(
526         Attribute::NoImplicitFloat);
527 
528     if (NoFloat)
529       return (MemVT.getSizeInBits() <= 64);
530     return true;
531   }
532 
533   bool isCheapToSpeculateCttz() const override {
534     return true;
535   }
536 
537   bool isCheapToSpeculateCtlz() const override {
538     return true;
539   }
540 
541   bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
542 
543   bool hasAndNotCompare(SDValue V) const override {
544     // We can use bics for any scalar.
545     return V.getValueType().isScalarInteger();
546   }
547 
548   bool hasAndNot(SDValue Y) const override {
549     EVT VT = Y.getValueType();
550 
551     if (!VT.isVector())
552       return hasAndNotCompare(Y);
553 
554     return VT.getSizeInBits() >= 64; // vector 'bic'
555   }
556 
557   bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
558       SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
559       unsigned OldShiftOpcode, unsigned NewShiftOpcode,
560       SelectionDAG &DAG) const override;
561 
562   bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override;
563 
564   bool shouldTransformSignedTruncationCheck(EVT XVT,
565                                             unsigned KeptBits) const override {
566     // For vectors, we don't have a preference..
567     if (XVT.isVector())
568       return false;
569 
570     auto VTIsOk = [](EVT VT) -> bool {
571       return VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
572              VT == MVT::i64;
573     };
574 
575     // We are ok with KeptBitsVT being byte/word/dword, what SXT supports.
576     // XVT will be larger than KeptBitsVT.
577     MVT KeptBitsVT = MVT::getIntegerVT(KeptBits);
578     return VTIsOk(XVT) && VTIsOk(KeptBitsVT);
579   }
580 
581   bool preferIncOfAddToSubOfNot(EVT VT) const override;
582 
583   bool hasBitPreservingFPLogic(EVT VT) const override {
584     // FIXME: Is this always true? It should be true for vectors at least.
585     return VT == MVT::f32 || VT == MVT::f64;
586   }
587 
588   bool supportSplitCSR(MachineFunction *MF) const override {
589     return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
590            MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
591   }
592   void initializeSplitCSR(MachineBasicBlock *Entry) const override;
593   void insertCopiesSplitCSR(
594       MachineBasicBlock *Entry,
595       const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
596 
597   bool supportSwiftError() const override {
598     return true;
599   }
600 
601   /// Enable aggressive FMA fusion on targets that want it.
602   bool enableAggressiveFMAFusion(EVT VT) const override;
603 
604   /// Returns the size of the platform's va_list object.
605   unsigned getVaListSizeInBits(const DataLayout &DL) const override;
606 
607   /// Returns true if \p VecTy is a legal interleaved access type. This
608   /// function checks the vector element type and the overall width of the
609   /// vector.
610   bool isLegalInterleavedAccessType(VectorType *VecTy,
611                                     const DataLayout &DL) const;
612 
613   /// Returns the number of interleaved accesses that will be generated when
614   /// lowering accesses of the given type.
615   unsigned getNumInterleavedAccesses(VectorType *VecTy,
616                                      const DataLayout &DL) const;
617 
618   MachineMemOperand::Flags getMMOFlags(const Instruction &I) const override;
619 
620   bool functionArgumentNeedsConsecutiveRegisters(Type *Ty,
621                                                  CallingConv::ID CallConv,
622                                                  bool isVarArg) const override;
623   /// Used for exception handling on Win64.
624   bool needsFixedCatchObjects() const override;
625 private:
626   /// Keep a pointer to the AArch64Subtarget around so that we can
627   /// make the right decision when generating code for different targets.
628   const AArch64Subtarget *Subtarget;
629 
630   bool isExtFreeImpl(const Instruction *Ext) const override;
631 
632   void addTypeForNEON(MVT VT, MVT PromotedBitwiseVT);
633   void addDRTypeForNEON(MVT VT);
634   void addQRTypeForNEON(MVT VT);
635 
636   SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
637                                bool isVarArg,
638                                const SmallVectorImpl<ISD::InputArg> &Ins,
639                                const SDLoc &DL, SelectionDAG &DAG,
640                                SmallVectorImpl<SDValue> &InVals) const override;
641 
642   SDValue LowerCall(CallLoweringInfo & /*CLI*/,
643                     SmallVectorImpl<SDValue> &InVals) const override;
644 
645   SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
646                           CallingConv::ID CallConv, bool isVarArg,
647                           const SmallVectorImpl<ISD::InputArg> &Ins,
648                           const SDLoc &DL, SelectionDAG &DAG,
649                           SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
650                           SDValue ThisVal) const;
651 
652   SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
653 
654   SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
655 
656   bool isEligibleForTailCallOptimization(
657       SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
658       const SmallVectorImpl<ISD::OutputArg> &Outs,
659       const SmallVectorImpl<SDValue> &OutVals,
660       const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
661 
662   /// Finds the incoming stack arguments which overlap the given fixed stack
663   /// object and incorporates their load into the current chain. This prevents
664   /// an upcoming store from clobbering the stack argument before it's used.
665   SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
666                               MachineFrameInfo &MFI, int ClobberedFI) const;
667 
668   bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
669 
670   void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &DL,
671                            SDValue &Chain) const;
672 
673   bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
674                       bool isVarArg,
675                       const SmallVectorImpl<ISD::OutputArg> &Outs,
676                       LLVMContext &Context) const override;
677 
678   SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
679                       const SmallVectorImpl<ISD::OutputArg> &Outs,
680                       const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
681                       SelectionDAG &DAG) const override;
682 
683   SDValue getTargetNode(GlobalAddressSDNode *N, EVT Ty, SelectionDAG &DAG,
684                         unsigned Flag) const;
685   SDValue getTargetNode(JumpTableSDNode *N, EVT Ty, SelectionDAG &DAG,
686                         unsigned Flag) const;
687   SDValue getTargetNode(ConstantPoolSDNode *N, EVT Ty, SelectionDAG &DAG,
688                         unsigned Flag) const;
689   SDValue getTargetNode(BlockAddressSDNode *N, EVT Ty, SelectionDAG &DAG,
690                         unsigned Flag) const;
691   template <class NodeTy>
692   SDValue getGOT(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
693   template <class NodeTy>
694   SDValue getAddrLarge(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
695   template <class NodeTy>
696   SDValue getAddr(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
697   template <class NodeTy>
698   SDValue getAddrTiny(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
699   SDValue LowerADDROFRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
700   SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
701   SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
702   SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
703   SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
704   SDValue LowerELFTLSLocalExec(const GlobalValue *GV, SDValue ThreadBase,
705                                const SDLoc &DL, SelectionDAG &DAG) const;
706   SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, const SDLoc &DL,
707                                  SelectionDAG &DAG) const;
708   SDValue LowerWindowsGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
709   SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
710   SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
711   SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
712   SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
713   SDValue LowerSELECT_CC(ISD::CondCode CC, SDValue LHS, SDValue RHS,
714                          SDValue TVal, SDValue FVal, const SDLoc &dl,
715                          SelectionDAG &DAG) const;
716   SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
717   SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
718   SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
719   SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
720   SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
721   SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
722   SDValue LowerWin64_VASTART(SDValue Op, SelectionDAG &DAG) const;
723   SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
724   SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
725   SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
726   SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
727   SDValue LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const;
728   SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
729   SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
730   SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
731   SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
732   SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
733   SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
734   SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
735   SDValue LowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG) const;
736   SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
737   SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
738   SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
739   SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
740   SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
741   SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
742   SDValue LowerF128Call(SDValue Op, SelectionDAG &DAG,
743                         RTLIB::Libcall Call) const;
744   SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
745   SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
746   SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
747   SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
748   SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
749   SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
750   SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
751   SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
752   SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
753   SDValue LowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
754   SDValue LowerATOMIC_LOAD_SUB(SDValue Op, SelectionDAG &DAG) const;
755   SDValue LowerATOMIC_LOAD_AND(SDValue Op, SelectionDAG &DAG) const;
756   SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
757   SDValue LowerWindowsDYNAMIC_STACKALLOC(SDValue Op, SDValue Chain,
758                                          SDValue &Size,
759                                          SelectionDAG &DAG) const;
760 
761   SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
762                         SmallVectorImpl<SDNode *> &Created) const override;
763   SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
764                           int &ExtraSteps, bool &UseOneConst,
765                           bool Reciprocal) const override;
766   SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
767                            int &ExtraSteps) const override;
768   unsigned combineRepeatedFPDivisors() const override;
769 
770   ConstraintType getConstraintType(StringRef Constraint) const override;
771   Register getRegisterByName(const char* RegName, LLT VT,
772                              const MachineFunction &MF) const override;
773 
774   /// Examine constraint string and operand type and determine a weight value.
775   /// The operand object must already have been set up with the operand type.
776   ConstraintWeight
777   getSingleConstraintMatchWeight(AsmOperandInfo &info,
778                                  const char *constraint) const override;
779 
780   std::pair<unsigned, const TargetRegisterClass *>
781   getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
782                                StringRef Constraint, MVT VT) const override;
783 
784   const char *LowerXConstraint(EVT ConstraintVT) const override;
785 
786   void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
787                                     std::vector<SDValue> &Ops,
788                                     SelectionDAG &DAG) const override;
789 
790   unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
791     if (ConstraintCode == "Q")
792       return InlineAsm::Constraint_Q;
793     // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
794     //        followed by llvm_unreachable so we'll leave them unimplemented in
795     //        the backend for now.
796     return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
797   }
798 
799   bool isVectorLoadExtDesirable(SDValue ExtVal) const override;
800   bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
801   bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
802   bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,
803                               ISD::MemIndexedMode &AM, bool &IsInc,
804                               SelectionDAG &DAG) const;
805   bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
806                                  ISD::MemIndexedMode &AM,
807                                  SelectionDAG &DAG) const override;
808   bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
809                                   SDValue &Offset, ISD::MemIndexedMode &AM,
810                                   SelectionDAG &DAG) const override;
811 
812   void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
813                           SelectionDAG &DAG) const override;
814 
815   bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
816 
817   void finalizeLowering(MachineFunction &MF) const override;
818 };
819 
820 namespace AArch64 {
821 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
822                          const TargetLibraryInfo *libInfo);
823 } // end namespace AArch64
824 
825 } // end namespace llvm
826 
827 #endif
828