1 //===-- RISCVISelLowering.h - RISCV DAG Lowering Interface ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_LIB_TARGET_RISCV_RISCVISELLOWERING_H
15 #define LLVM_LIB_TARGET_RISCV_RISCVISELLOWERING_H
16 
17 #include "RISCV.h"
18 #include "llvm/CodeGen/CallingConvLower.h"
19 #include "llvm/CodeGen/SelectionDAG.h"
20 #include "llvm/CodeGen/TargetLowering.h"
21 
22 namespace llvm {
23 class RISCVSubtarget;
24 struct RISCVRegisterInfo;
25 namespace RISCVISD {
26 enum NodeType : unsigned {
27   FIRST_NUMBER = ISD::BUILTIN_OP_END,
28   RET_FLAG,
29   URET_FLAG,
30   SRET_FLAG,
31   MRET_FLAG,
32   CALL,
33   /// Select with condition operator - This selects between a true value and
34   /// a false value (ops #3 and #4) based on the boolean result of comparing
35   /// the lhs and rhs (ops #0 and #1) of a conditional expression with the
36   /// condition code in op #2, a XLenVT constant from the ISD::CondCode enum.
37   /// The lhs and rhs are XLenVT integers. The true and false values can be
38   /// integer or floating point.
39   SELECT_CC,
40   BR_CC,
41   BuildPairF64,
42   SplitF64,
43   TAIL,
44   // Multiply high for signedxunsigned.
45   MULHSU,
46   // RV64I shifts, directly matching the semantics of the named RISC-V
47   // instructions.
48   SLLW,
49   SRAW,
50   SRLW,
51   // 32-bit operations from RV64M that can't be simply matched with a pattern
52   // at instruction selection time. These have undefined behavior for division
53   // by 0 or overflow (divw) like their target independent counterparts.
54   DIVW,
55   DIVUW,
56   REMUW,
57   // RV64IB rotates, directly matching the semantics of the named RISC-V
58   // instructions.
59   ROLW,
60   RORW,
61   // RV64IZbb bit counting instructions directly matching the semantics of the
62   // named RISC-V instructions.
63   CLZW,
64   CTZW,
65   // RV64IB/RV32IB funnel shifts, with the semantics of the named RISC-V
66   // instructions, but the same operand order as fshl/fshr intrinsics.
67   FSR,
68   FSL,
69   // RV64IB funnel shifts, with the semantics of the named RISC-V instructions,
70   // but the same operand order as fshl/fshr intrinsics.
71   FSRW,
72   FSLW,
73   // FPR<->GPR transfer operations when the FPR is smaller than XLEN, needed as
74   // XLEN is the only legal integer width.
75   //
76   // FMV_H_X matches the semantics of the FMV.H.X.
77   // FMV_X_ANYEXTH is similar to FMV.X.H but has an any-extended result.
78   // FMV_W_X_RV64 matches the semantics of the FMV.W.X.
79   // FMV_X_ANYEXTW_RV64 is similar to FMV.X.W but has an any-extended result.
80   //
81   // This is a more convenient semantic for producing dagcombines that remove
82   // unnecessary GPR->FPR->GPR moves.
83   FMV_H_X,
84   FMV_X_ANYEXTH,
85   FMV_W_X_RV64,
86   FMV_X_ANYEXTW_RV64,
87   // FP to 32 bit int conversions for RV64. These are used to keep track of the
88   // result being sign extended to 64 bit.
89   FCVT_W_RV64,
90   FCVT_WU_RV64,
91   // READ_CYCLE_WIDE - A read of the 64-bit cycle CSR on a 32-bit target
92   // (returns (Lo, Hi)). It takes a chain operand.
93   READ_CYCLE_WIDE,
94   // Generalized Reverse and Generalized Or-Combine - directly matching the
95   // semantics of the named RISC-V instructions. Lowered as custom nodes as
96   // TableGen chokes when faced with commutative permutations in deeply-nested
97   // DAGs. Each node takes an input operand and a control operand and outputs a
98   // bit-manipulated version of input. All operands are i32 or XLenVT.
99   GREV,
100   GREVW,
101   GORC,
102   GORCW,
103   SHFL,
104   SHFLW,
105   UNSHFL,
106   UNSHFLW,
107   // Bit Compress/Decompress implement the generic bit extract and bit deposit
108   // functions. This operation is also referred to as bit gather/scatter, bit
109   // pack/unpack, parallel extract/deposit, compress/expand, or right
110   // compress/right expand.
111   BCOMPRESS,
112   BCOMPRESSW,
113   BDECOMPRESS,
114   BDECOMPRESSW,
115   // Vector Extension
116   // VMV_V_X_VL matches the semantics of vmv.v.x but includes an extra operand
117   // for the VL value to be used for the operation.
118   VMV_V_X_VL,
119   // VFMV_V_F_VL matches the semantics of vfmv.v.f but includes an extra operand
120   // for the VL value to be used for the operation.
121   VFMV_V_F_VL,
122   // VMV_X_S matches the semantics of vmv.x.s. The result is always XLenVT sign
123   // extended from the vector element size.
124   VMV_X_S,
125   // VMV_S_X_VL matches the semantics of vmv.s.x. It carries a VL operand.
126   VMV_S_X_VL,
127   // VFMV_S_F_VL matches the semantics of vfmv.s.f. It carries a VL operand.
128   VFMV_S_F_VL,
129   // Splats an i64 scalar to a vector type (with element type i64) where the
130   // scalar is a sign-extended i32.
131   SPLAT_VECTOR_I64,
132   // Splats an 64-bit value that has been split into two i32 parts. This is
133   // expanded late to two scalar stores and a stride 0 vector load.
134   SPLAT_VECTOR_SPLIT_I64_VL,
135   // Read VLENB CSR
136   READ_VLENB,
137   // Truncates a RVV integer vector by one power-of-two. Carries both an extra
138   // mask and VL operand.
139   TRUNCATE_VECTOR_VL,
140   // Matches the semantics of vslideup/vslidedown. The first operand is the
141   // pass-thru operand, the second is the source vector, the third is the
142   // XLenVT index (either constant or non-constant), the fourth is the mask
143   // and the fifth the VL.
144   VSLIDEUP_VL,
145   VSLIDEDOWN_VL,
146   // Matches the semantics of vslide1up/slide1down. The first operand is the
147   // source vector, the second is the XLenVT scalar value. The third and fourth
148   // operands are the mask and VL operands.
149   VSLIDE1UP_VL,
150   VSLIDE1DOWN_VL,
151   // Matches the semantics of the vid.v instruction, with a mask and VL
152   // operand.
153   VID_VL,
154   // Matches the semantics of the vfcnvt.rod function (Convert double-width
155   // float to single-width float, rounding towards odd). Takes a double-width
156   // float vector and produces a single-width float vector. Also has a mask and
157   // VL operand.
158   VFNCVT_ROD_VL,
159   // These nodes match the semantics of the corresponding RVV vector reduction
160   // instructions. They produce a vector result which is the reduction
161   // performed over the first vector operand plus the first element of the
162   // second vector operand. The first operand is an unconstrained vector type,
163   // and the result and second operand's types are expected to be the
164   // corresponding full-width LMUL=1 type for the first operand:
165   //   nxv8i8 = vecreduce_add nxv32i8, nxv8i8
166   //   nxv2i32 = vecreduce_add nxv8i32, nxv2i32
167   // The different in types does introduce extra vsetvli instructions but
168   // similarly it reduces the number of registers consumed per reduction.
169   // Also has a mask and VL operand.
170   VECREDUCE_ADD_VL,
171   VECREDUCE_UMAX_VL,
172   VECREDUCE_SMAX_VL,
173   VECREDUCE_UMIN_VL,
174   VECREDUCE_SMIN_VL,
175   VECREDUCE_AND_VL,
176   VECREDUCE_OR_VL,
177   VECREDUCE_XOR_VL,
178   VECREDUCE_FADD_VL,
179   VECREDUCE_SEQ_FADD_VL,
180   VECREDUCE_FMIN_VL,
181   VECREDUCE_FMAX_VL,
182 
183   // Vector binary and unary ops with a mask as a third operand, and VL as a
184   // fourth operand.
185   // FIXME: Can we replace these with ISD::VP_*?
186   ADD_VL,
187   AND_VL,
188   MUL_VL,
189   OR_VL,
190   SDIV_VL,
191   SHL_VL,
192   SREM_VL,
193   SRA_VL,
194   SRL_VL,
195   SUB_VL,
196   UDIV_VL,
197   UREM_VL,
198   XOR_VL,
199 
200   SADDSAT_VL,
201   UADDSAT_VL,
202   SSUBSAT_VL,
203   USUBSAT_VL,
204 
205   FADD_VL,
206   FSUB_VL,
207   FMUL_VL,
208   FDIV_VL,
209   FNEG_VL,
210   FABS_VL,
211   FSQRT_VL,
212   FMA_VL,
213   FCOPYSIGN_VL,
214   SMIN_VL,
215   SMAX_VL,
216   UMIN_VL,
217   UMAX_VL,
218   FMINNUM_VL,
219   FMAXNUM_VL,
220   MULHS_VL,
221   MULHU_VL,
222   FP_TO_SINT_VL,
223   FP_TO_UINT_VL,
224   SINT_TO_FP_VL,
225   UINT_TO_FP_VL,
226   FP_ROUND_VL,
227   FP_EXTEND_VL,
228 
229   // Widening instructions
230   VWMUL_VL,
231   VWMULU_VL,
232 
233   // Vector compare producing a mask. Fourth operand is input mask. Fifth
234   // operand is VL.
235   SETCC_VL,
236 
237   // Vector select with an additional VL operand. This operation is unmasked.
238   VSELECT_VL,
239 
240   // Mask binary operators.
241   VMAND_VL,
242   VMOR_VL,
243   VMXOR_VL,
244 
245   // Set mask vector to all zeros or ones.
246   VMCLR_VL,
247   VMSET_VL,
248 
249   // Matches the semantics of vrgather.vx and vrgather.vv with an extra operand
250   // for VL.
251   VRGATHER_VX_VL,
252   VRGATHER_VV_VL,
253   VRGATHEREI16_VV_VL,
254 
255   // Vector sign/zero extend with additional mask & VL operands.
256   VSEXT_VL,
257   VZEXT_VL,
258 
259   //  vpopc.m with additional mask and VL operands.
260   VPOPC_VL,
261 
262   // Reads value of CSR.
263   // The first operand is a chain pointer. The second specifies address of the
264   // required CSR. Two results are produced, the read value and the new chain
265   // pointer.
266   READ_CSR,
267   // Write value to CSR.
268   // The first operand is a chain pointer, the second specifies address of the
269   // required CSR and the third is the value to write. The result is the new
270   // chain pointer.
271   WRITE_CSR,
272   // Read and write value of CSR.
273   // The first operand is a chain pointer, the second specifies address of the
274   // required CSR and the third is the value to write. Two results are produced,
275   // the value read before the modification and the new chain pointer.
276   SWAP_CSR,
277 
278   // Memory opcodes start here.
279   VLE_VL = ISD::FIRST_TARGET_MEMORY_OPCODE,
280   VSE_VL,
281 
282   // WARNING: Do not add anything in the end unless you want the node to
283   // have memop! In fact, starting from FIRST_TARGET_MEMORY_OPCODE all
284   // opcodes will be thought as target memory ops!
285 };
286 } // namespace RISCVISD
287 
288 class RISCVTargetLowering : public TargetLowering {
289   const RISCVSubtarget &Subtarget;
290 
291 public:
292   explicit RISCVTargetLowering(const TargetMachine &TM,
293                                const RISCVSubtarget &STI);
294 
getSubtarget()295   const RISCVSubtarget &getSubtarget() const { return Subtarget; }
296 
297   bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
298                           MachineFunction &MF,
299                           unsigned Intrinsic) const override;
300   bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
301                              unsigned AS,
302                              Instruction *I = nullptr) const override;
303   bool isLegalICmpImmediate(int64_t Imm) const override;
304   bool isLegalAddImmediate(int64_t Imm) const override;
305   bool isTruncateFree(Type *SrcTy, Type *DstTy) const override;
306   bool isTruncateFree(EVT SrcVT, EVT DstVT) const override;
307   bool isZExtFree(SDValue Val, EVT VT2) const override;
308   bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override;
309   bool isCheapToSpeculateCttz() const override;
310   bool isCheapToSpeculateCtlz() const override;
311   bool isFPImmLegal(const APFloat &Imm, EVT VT,
312                     bool ForCodeSize) const override;
313 
softPromoteHalfType()314   bool softPromoteHalfType() const override { return true; }
315 
316   /// Return the register type for a given MVT, ensuring vectors are treated
317   /// as a series of gpr sized integers.
318   MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC,
319                                     EVT VT) const override;
320 
321   /// Return the number of registers for a given MVT, ensuring vectors are
322   /// treated as a series of gpr sized integers.
323   unsigned getNumRegistersForCallingConv(LLVMContext &Context,
324                                          CallingConv::ID CC,
325                                          EVT VT) const override;
326 
327   /// Return true if the given shuffle mask can be codegen'd directly, or if it
328   /// should be stack expanded.
329   bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
330 
331   bool hasBitPreservingFPLogic(EVT VT) const override;
332   bool
333   shouldExpandBuildVectorWithShuffles(EVT VT,
334                                       unsigned DefinedValues) const override;
335 
336   // Provide custom lowering hooks for some operations.
337   SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
338   void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
339                           SelectionDAG &DAG) const override;
340 
341   SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
342 
343   bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits,
344                                     const APInt &DemandedElts,
345                                     TargetLoweringOpt &TLO) const override;
346 
347   void computeKnownBitsForTargetNode(const SDValue Op,
348                                      KnownBits &Known,
349                                      const APInt &DemandedElts,
350                                      const SelectionDAG &DAG,
351                                      unsigned Depth) const override;
352   unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
353                                            const APInt &DemandedElts,
354                                            const SelectionDAG &DAG,
355                                            unsigned Depth) const override;
356 
357   // This method returns the name of a target specific DAG node.
358   const char *getTargetNodeName(unsigned Opcode) const override;
359 
360   ConstraintType getConstraintType(StringRef Constraint) const override;
361 
362   unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override;
363 
364   std::pair<unsigned, const TargetRegisterClass *>
365   getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
366                                StringRef Constraint, MVT VT) const override;
367 
368   void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
369                                     std::vector<SDValue> &Ops,
370                                     SelectionDAG &DAG) const override;
371 
372   MachineBasicBlock *
373   EmitInstrWithCustomInserter(MachineInstr &MI,
374                               MachineBasicBlock *BB) const override;
375 
376   EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
377                          EVT VT) const override;
378 
convertSetCCLogicToBitwiseLogic(EVT VT)379   bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
380     return VT.isScalarInteger();
381   }
convertSelectOfConstantsToMath(EVT VT)382   bool convertSelectOfConstantsToMath(EVT VT) const override { return true; }
383 
shouldInsertFencesForAtomic(const Instruction * I)384   bool shouldInsertFencesForAtomic(const Instruction *I) const override {
385     return isa<LoadInst>(I) || isa<StoreInst>(I);
386   }
387   Instruction *emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst,
388                                 AtomicOrdering Ord) const override;
389   Instruction *emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst,
390                                  AtomicOrdering Ord) const override;
391 
392   bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
393                                   EVT VT) const override;
394 
getExtendForAtomicOps()395   ISD::NodeType getExtendForAtomicOps() const override {
396     return ISD::SIGN_EXTEND;
397   }
398 
getExtendForAtomicCmpSwapArg()399   ISD::NodeType getExtendForAtomicCmpSwapArg() const override {
400     return ISD::SIGN_EXTEND;
401   }
402 
shouldExpandShift(SelectionDAG & DAG,SDNode * N)403   bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override {
404     if (DAG.getMachineFunction().getFunction().hasMinSize())
405       return false;
406     return true;
407   }
408   bool isDesirableToCommuteWithShift(const SDNode *N,
409                                      CombineLevel Level) const override;
410 
411   /// If a physical register, this returns the register that receives the
412   /// exception address on entry to an EH pad.
413   Register
414   getExceptionPointerRegister(const Constant *PersonalityFn) const override;
415 
416   /// If a physical register, this returns the register that receives the
417   /// exception typeid on entry to a landing pad.
418   Register
419   getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
420 
421   bool shouldExtendTypeInLibCall(EVT Type) const override;
422   bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const override;
423 
424   /// Returns the register with the specified architectural or ABI name. This
425   /// method is necessary to lower the llvm.read_register.* and
426   /// llvm.write_register.* intrinsics. Allocatable registers must be reserved
427   /// with the clang -ffixed-xX flag for access to be allowed.
428   Register getRegisterByName(const char *RegName, LLT VT,
429                              const MachineFunction &MF) const override;
430 
431   // Lower incoming arguments, copy physregs into vregs
432   SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
433                                bool IsVarArg,
434                                const SmallVectorImpl<ISD::InputArg> &Ins,
435                                const SDLoc &DL, SelectionDAG &DAG,
436                                SmallVectorImpl<SDValue> &InVals) const override;
437   bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
438                       bool IsVarArg,
439                       const SmallVectorImpl<ISD::OutputArg> &Outs,
440                       LLVMContext &Context) const override;
441   SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
442                       const SmallVectorImpl<ISD::OutputArg> &Outs,
443                       const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
444                       SelectionDAG &DAG) const override;
445   SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
446                     SmallVectorImpl<SDValue> &InVals) const override;
447 
shouldConvertConstantLoadToIntImm(const APInt & Imm,Type * Ty)448   bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
449                                          Type *Ty) const override {
450     return true;
451   }
452   bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
shouldConsiderGEPOffsetSplit()453   bool shouldConsiderGEPOffsetSplit() const override { return true; }
454 
455   bool decomposeMulByConstant(LLVMContext &Context, EVT VT,
456                               SDValue C) const override;
457 
458   TargetLowering::AtomicExpansionKind
459   shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
460   Value *emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI,
461                                       Value *AlignedAddr, Value *Incr,
462                                       Value *Mask, Value *ShiftAmt,
463                                       AtomicOrdering Ord) const override;
464   TargetLowering::AtomicExpansionKind
465   shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CI) const override;
466   Value *emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder,
467                                           AtomicCmpXchgInst *CI,
468                                           Value *AlignedAddr, Value *CmpVal,
469                                           Value *NewVal, Value *Mask,
470                                           AtomicOrdering Ord) const override;
471 
472   /// Returns true if the target allows unaligned memory accesses of the
473   /// specified type.
474   bool allowsMisalignedMemoryAccesses(
475       EVT VT, unsigned AddrSpace = 0, Align Alignment = Align(1),
476       MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
477       bool *Fast = nullptr) const override;
478 
479   bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL,
480                                    SDValue Val, SDValue *Parts,
481                                    unsigned NumParts, MVT PartVT,
482                                    Optional<CallingConv::ID> CC) const override;
483 
484   SDValue
485   joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL,
486                              const SDValue *Parts, unsigned NumParts,
487                              MVT PartVT, EVT ValueVT,
488                              Optional<CallingConv::ID> CC) const override;
489 
490   static RISCVII::VLMUL getLMUL(MVT VT);
491   static unsigned getRegClassIDForLMUL(RISCVII::VLMUL LMul);
492   static unsigned getSubregIndexByMVT(MVT VT, unsigned Index);
493   static unsigned getRegClassIDForVecVT(MVT VT);
494   static std::pair<unsigned, unsigned>
495   decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT,
496                                            unsigned InsertExtractIdx,
497                                            const RISCVRegisterInfo *TRI);
498   MVT getContainerForFixedLengthVector(MVT VT) const;
499 
500   bool shouldRemoveExtendFromGSIndex(EVT VT) const override;
501 
502 private:
503   /// RISCVCCAssignFn - This target-specific function extends the default
504   /// CCValAssign with additional information used to lower RISC-V calling
505   /// conventions.
506   typedef bool RISCVCCAssignFn(const DataLayout &DL, RISCVABI::ABI,
507                                unsigned ValNo, MVT ValVT, MVT LocVT,
508                                CCValAssign::LocInfo LocInfo,
509                                ISD::ArgFlagsTy ArgFlags, CCState &State,
510                                bool IsFixed, bool IsRet, Type *OrigTy,
511                                const RISCVTargetLowering &TLI,
512                                Optional<unsigned> FirstMaskArgument);
513 
514   void analyzeInputArgs(MachineFunction &MF, CCState &CCInfo,
515                         const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
516                         RISCVCCAssignFn Fn) const;
517   void analyzeOutputArgs(MachineFunction &MF, CCState &CCInfo,
518                          const SmallVectorImpl<ISD::OutputArg> &Outs,
519                          bool IsRet, CallLoweringInfo *CLI,
520                          RISCVCCAssignFn Fn) const;
521 
522   template <class NodeTy>
523   SDValue getAddr(NodeTy *N, SelectionDAG &DAG, bool IsLocal = true) const;
524 
525   SDValue getStaticTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG,
526                            bool UseGOT) const;
527   SDValue getDynamicTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG) const;
528 
529   SDValue lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
530   SDValue lowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
531   SDValue lowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
532   SDValue lowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
533   SDValue lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
534   SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG) const;
535   SDValue lowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
536   SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const;
537   SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
538   SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
539   SDValue lowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
540   SDValue lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, bool IsSRA) const;
541   SDValue lowerSPLAT_VECTOR_PARTS(SDValue Op, SelectionDAG &DAG) const;
542   SDValue lowerVectorMaskSplat(SDValue Op, SelectionDAG &DAG) const;
543   SDValue lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
544                              int64_t ExtTrueVal) const;
545   SDValue lowerVectorMaskTrunc(SDValue Op, SelectionDAG &DAG) const;
546   SDValue lowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
547   SDValue lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
548   SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
549   SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
550   SDValue lowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
551   SDValue lowerVectorMaskVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
552   SDValue lowerFPVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
553   SDValue lowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
554   SDValue lowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
555   SDValue lowerSTEP_VECTOR(SDValue Op, SelectionDAG &DAG) const;
556   SDValue lowerVECTOR_REVERSE(SDValue Op, SelectionDAG &DAG) const;
557   SDValue lowerABS(SDValue Op, SelectionDAG &DAG) const;
558   SDValue lowerMLOAD(SDValue Op, SelectionDAG &DAG) const;
559   SDValue lowerMSTORE(SDValue Op, SelectionDAG &DAG) const;
560   SDValue lowerFixedLengthVectorFCOPYSIGNToRVV(SDValue Op,
561                                                SelectionDAG &DAG) const;
562   SDValue lowerMGATHER(SDValue Op, SelectionDAG &DAG) const;
563   SDValue lowerMSCATTER(SDValue Op, SelectionDAG &DAG) const;
564   SDValue lowerFixedLengthVectorLoadToRVV(SDValue Op, SelectionDAG &DAG) const;
565   SDValue lowerFixedLengthVectorStoreToRVV(SDValue Op, SelectionDAG &DAG) const;
566   SDValue lowerFixedLengthVectorSetccToRVV(SDValue Op, SelectionDAG &DAG) const;
567   SDValue lowerFixedLengthVectorLogicOpToRVV(SDValue Op, SelectionDAG &DAG,
568                                              unsigned MaskOpc,
569                                              unsigned VecOpc) const;
570   SDValue lowerFixedLengthVectorShiftToRVV(SDValue Op, SelectionDAG &DAG) const;
571   SDValue lowerFixedLengthVectorSelectToRVV(SDValue Op,
572                                             SelectionDAG &DAG) const;
573   SDValue lowerToScalableOp(SDValue Op, SelectionDAG &DAG, unsigned NewOpc,
574                             bool HasMask = true) const;
575   SDValue lowerVPOp(SDValue Op, SelectionDAG &DAG, unsigned RISCVISDOpc) const;
576   SDValue lowerFixedLengthVectorExtendToRVV(SDValue Op, SelectionDAG &DAG,
577                                             unsigned ExtendOpc) const;
578   SDValue lowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
579   SDValue lowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
580 
581   SDValue expandUnalignedRVVLoad(SDValue Op, SelectionDAG &DAG) const;
582   SDValue expandUnalignedRVVStore(SDValue Op, SelectionDAG &DAG) const;
583 
584   bool isEligibleForTailCallOptimization(
585       CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
586       const SmallVector<CCValAssign, 16> &ArgLocs) const;
587 
588   /// Generate error diagnostics if any register used by CC has been marked
589   /// reserved.
590   void validateCCReservedRegs(
591       const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
592       MachineFunction &MF) const;
593 
594   bool useRVVForFixedLengthVectorVT(MVT VT) const;
595 
596   MVT getVPExplicitVectorLengthTy() const override;
597 
598   /// RVV code generation for fixed length vectors does not lower all
599   /// BUILD_VECTORs. This makes BUILD_VECTOR legalisation a source of stores to
600   /// merge. However, merging them creates a BUILD_VECTOR that is just as
601   /// illegal as the original, thus leading to an infinite legalisation loop.
602   /// NOTE: Once BUILD_VECTOR can be custom lowered for all legal vector types,
603   /// this override can be removed.
604   bool mergeStoresAfterLegalization(EVT VT) const override;
605 };
606 
607 namespace RISCV {
608 // We use 64 bits as the known part in the scalable vector types.
609 static constexpr unsigned RVVBitsPerBlock = 64;
610 } // namespace RISCV
611 
612 namespace RISCVVIntrinsicsTable {
613 
614 struct RISCVVIntrinsicInfo {
615   unsigned IntrinsicID;
616   uint8_t SplatOperand;
617 };
618 
619 using namespace RISCV;
620 
621 #define GET_RISCVVIntrinsicsTable_DECL
622 #include "RISCVGenSearchableTables.inc"
623 
624 } // end namespace RISCVVIntrinsicsTable
625 
626 } // end namespace llvm
627 
628 #endif
629