1 //===-- RISCVISelLowering.h - RISCV DAG Lowering Interface ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_LIB_TARGET_RISCV_RISCVISELLOWERING_H
15 #define LLVM_LIB_TARGET_RISCV_RISCVISELLOWERING_H
16 
17 #include "RISCV.h"
18 #include "llvm/CodeGen/CallingConvLower.h"
19 #include "llvm/CodeGen/SelectionDAG.h"
20 #include "llvm/CodeGen/TargetLowering.h"
21 #include "llvm/TargetParser/RISCVTargetParser.h"
22 #include <optional>
23 
24 namespace llvm {
25 class RISCVSubtarget;
26 struct RISCVRegisterInfo;
27 namespace RISCVISD {
28 enum NodeType : unsigned {
29   FIRST_NUMBER = ISD::BUILTIN_OP_END,
30   RET_FLAG,
31   URET_FLAG,
32   SRET_FLAG,
33   MRET_FLAG,
34   CALL,
35   /// Select with condition operator - This selects between a true value and
36   /// a false value (ops #3 and #4) based on the boolean result of comparing
37   /// the lhs and rhs (ops #0 and #1) of a conditional expression with the
38   /// condition code in op #2, a XLenVT constant from the ISD::CondCode enum.
39   /// The lhs and rhs are XLenVT integers. The true and false values can be
40   /// integer or floating point.
41   SELECT_CC,
42   BR_CC,
43   BuildPairF64,
44   SplitF64,
45   TAIL,
46 
47   // Add the Lo 12 bits from an address. Selected to ADDI.
48   ADD_LO,
49   // Get the Hi 20 bits from an address. Selected to LUI.
50   HI,
51 
52   // Represents an AUIPC+ADDI pair. Selected to PseudoLLA.
53   LLA,
54 
55   // Selected as PseudoAddTPRel. Used to emit a TP-relative relocation.
56   ADD_TPREL,
57 
58   // Load address.
59   LA_TLS_GD,
60 
61   // Multiply high for signedxunsigned.
62   MULHSU,
63   // RV64I shifts, directly matching the semantics of the named RISC-V
64   // instructions.
65   SLLW,
66   SRAW,
67   SRLW,
68   // 32-bit operations from RV64M that can't be simply matched with a pattern
69   // at instruction selection time. These have undefined behavior for division
70   // by 0 or overflow (divw) like their target independent counterparts.
71   DIVW,
72   DIVUW,
73   REMUW,
74   // RV64IB rotates, directly matching the semantics of the named RISC-V
75   // instructions.
76   ROLW,
77   RORW,
78   // RV64IZbb bit counting instructions directly matching the semantics of the
79   // named RISC-V instructions.
80   CLZW,
81   CTZW,
82 
83   // RV64IZbb absolute value for i32. Expanded to (max (negw X), X) during isel.
84   ABSW,
85 
86   // FPR<->GPR transfer operations when the FPR is smaller than XLEN, needed as
87   // XLEN is the only legal integer width.
88   //
89   // FMV_H_X matches the semantics of the FMV.H.X.
90   // FMV_X_ANYEXTH is similar to FMV.X.H but has an any-extended result.
91   // FMV_X_SIGNEXTH is similar to FMV.X.H and has a sign-extended result.
92   // FMV_W_X_RV64 matches the semantics of the FMV.W.X.
93   // FMV_X_ANYEXTW_RV64 is similar to FMV.X.W but has an any-extended result.
94   //
95   // This is a more convenient semantic for producing dagcombines that remove
96   // unnecessary GPR->FPR->GPR moves.
97   FMV_H_X,
98   FMV_X_ANYEXTH,
99   FMV_X_SIGNEXTH,
100   FMV_W_X_RV64,
101   FMV_X_ANYEXTW_RV64,
102   // FP to XLen int conversions. Corresponds to fcvt.l(u).s/d/h on RV64 and
103   // fcvt.w(u).s/d/h on RV32. Unlike FP_TO_S/UINT these saturate out of
104   // range inputs. These are used for FP_TO_S/UINT_SAT lowering. Rounding mode
105   // is passed as a TargetConstant operand using the RISCVFPRndMode enum.
106   FCVT_X,
107   FCVT_XU,
108   // FP to 32 bit int conversions for RV64. These are used to keep track of the
109   // result being sign extended to 64 bit. These saturate out of range inputs.
110   // Used for FP_TO_S/UINT and FP_TO_S/UINT_SAT lowering. Rounding mode
111   // is passed as a TargetConstant operand using the RISCVFPRndMode enum.
112   FCVT_W_RV64,
113   FCVT_WU_RV64,
114 
115   // Rounds an FP value to its corresponding integer in the same FP format.
116   // First operand is the value to round, the second operand is the largest
117   // integer that can be represented exactly in the FP format. This will be
118   // expanded into multiple instructions and basic blocks with a custom
119   // inserter.
120   FROUND,
121 
122   // READ_CYCLE_WIDE - A read of the 64-bit cycle CSR on a 32-bit target
123   // (returns (Lo, Hi)). It takes a chain operand.
124   READ_CYCLE_WIDE,
125   // brev8, orc.b, zip, and unzip from Zbb and Zbkb. All operands are i32 or
126   // XLenVT.
127   BREV8,
128   ORC_B,
129   ZIP,
130   UNZIP,
131   // Vector Extension
132   // VMV_V_X_VL matches the semantics of vmv.v.x but includes an extra operand
133   // for the VL value to be used for the operation. The first operand is
134   // passthru operand.
135   VMV_V_X_VL,
136   // VFMV_V_F_VL matches the semantics of vfmv.v.f but includes an extra operand
137   // for the VL value to be used for the operation. The first operand is
138   // passthru operand.
139   VFMV_V_F_VL,
140   // VMV_X_S matches the semantics of vmv.x.s. The result is always XLenVT sign
141   // extended from the vector element size.
142   VMV_X_S,
143   // VMV_S_X_VL matches the semantics of vmv.s.x. It carries a VL operand.
144   VMV_S_X_VL,
145   // VFMV_S_F_VL matches the semantics of vfmv.s.f. It carries a VL operand.
146   VFMV_S_F_VL,
147   // Splats an 64-bit value that has been split into two i32 parts. This is
148   // expanded late to two scalar stores and a stride 0 vector load.
149   // The first operand is passthru operand.
150   SPLAT_VECTOR_SPLIT_I64_VL,
151   // Read VLENB CSR
152   READ_VLENB,
153   // Truncates a RVV integer vector by one power-of-two. Carries both an extra
154   // mask and VL operand.
155   TRUNCATE_VECTOR_VL,
156   // Matches the semantics of vslideup/vslidedown. The first operand is the
157   // pass-thru operand, the second is the source vector, the third is the
158   // XLenVT index (either constant or non-constant), the fourth is the mask
159   // and the fifth the VL.
160   VSLIDEUP_VL,
161   VSLIDEDOWN_VL,
162   // Matches the semantics of vslide1up/slide1down. The first operand is
163   // passthru operand, the second is source vector, third is the XLenVT scalar
164   // value. The fourth and fifth operands are the mask and VL operands.
165   VSLIDE1UP_VL,
166   VSLIDE1DOWN_VL,
167   // Matches the semantics of the vid.v instruction, with a mask and VL
168   // operand.
169   VID_VL,
170   // Matches the semantics of the vfcnvt.rod function (Convert double-width
171   // float to single-width float, rounding towards odd). Takes a double-width
172   // float vector and produces a single-width float vector. Also has a mask and
173   // VL operand.
174   VFNCVT_ROD_VL,
175   // These nodes match the semantics of the corresponding RVV vector reduction
176   // instructions. They produce a vector result which is the reduction
177   // performed over the second vector operand plus the first element of the
178   // third vector operand. The first operand is the pass-thru operand. The
179   // second operand is an unconstrained vector type, and the result, first, and
180   // third operand's types are expected to be the corresponding full-width
181   // LMUL=1 type for the second operand:
182   //   nxv8i8 = vecreduce_add nxv8i8, nxv32i8, nxv8i8
183   //   nxv2i32 = vecreduce_add nxv2i32, nxv8i32, nxv2i32
184   // The different in types does introduce extra vsetvli instructions but
185   // similarly it reduces the number of registers consumed per reduction.
186   // Also has a mask and VL operand.
187   VECREDUCE_ADD_VL,
188   VECREDUCE_UMAX_VL,
189   VECREDUCE_SMAX_VL,
190   VECREDUCE_UMIN_VL,
191   VECREDUCE_SMIN_VL,
192   VECREDUCE_AND_VL,
193   VECREDUCE_OR_VL,
194   VECREDUCE_XOR_VL,
195   VECREDUCE_FADD_VL,
196   VECREDUCE_SEQ_FADD_VL,
197   VECREDUCE_FMIN_VL,
198   VECREDUCE_FMAX_VL,
199 
200   // Vector binary ops with a merge as a third operand, a mask as a fourth
201   // operand, and VL as a fifth operand.
202   ADD_VL,
203   AND_VL,
204   MUL_VL,
205   OR_VL,
206   SDIV_VL,
207   SHL_VL,
208   SREM_VL,
209   SRA_VL,
210   SRL_VL,
211   SUB_VL,
212   UDIV_VL,
213   UREM_VL,
214   XOR_VL,
215   SMIN_VL,
216   SMAX_VL,
217   UMIN_VL,
218   UMAX_VL,
219 
220   SADDSAT_VL,
221   UADDSAT_VL,
222   SSUBSAT_VL,
223   USUBSAT_VL,
224 
225   MULHS_VL,
226   MULHU_VL,
227   FADD_VL,
228   FSUB_VL,
229   FMUL_VL,
230   FDIV_VL,
231   FMINNUM_VL,
232   FMAXNUM_VL,
233 
234   // Vector unary ops with a mask as a second operand and VL as a third operand.
235   FNEG_VL,
236   FABS_VL,
237   FSQRT_VL,
238   FCOPYSIGN_VL, // Has a merge operand
239   VFCVT_RTZ_X_F_VL,
240   VFCVT_RTZ_XU_F_VL,
241   VFCVT_X_F_VL,
242   VFCVT_XU_F_VL,
243   VFROUND_NOEXCEPT_VL,
244   VFCVT_RM_X_F_VL, // Has a rounding mode operand.
245   VFCVT_RM_XU_F_VL, // Has a rounding mode operand.
246   SINT_TO_FP_VL,
247   UINT_TO_FP_VL,
248   VFCVT_RM_F_X_VL, // Has a rounding mode operand.
249   VFCVT_RM_F_XU_VL, // Has a rounding mode operand.
250   FP_ROUND_VL,
251   FP_EXTEND_VL,
252 
253   // Vector FMA ops with a mask as a fourth operand and VL as a fifth operand.
254   VFMADD_VL,
255   VFNMADD_VL,
256   VFMSUB_VL,
257   VFNMSUB_VL,
258 
259   // Widening instructions with a merge value a third operand, a mask as a
260   // fourth operand, and VL as a fifth operand.
261   VWMUL_VL,
262   VWMULU_VL,
263   VWMULSU_VL,
264   VWADD_VL,
265   VWADDU_VL,
266   VWSUB_VL,
267   VWSUBU_VL,
268   VWADD_W_VL,
269   VWADDU_W_VL,
270   VWSUB_W_VL,
271   VWSUBU_W_VL,
272 
273   VNSRL_VL,
274 
275   // Vector compare producing a mask. Fourth operand is input mask. Fifth
276   // operand is VL.
277   SETCC_VL,
278 
279   // Vector select with an additional VL operand. This operation is unmasked.
280   VSELECT_VL,
281   // Vector select with operand #2 (the value when the condition is false) tied
282   // to the destination and an additional VL operand. This operation is
283   // unmasked.
284   VP_MERGE_VL,
285 
286   // Mask binary operators.
287   VMAND_VL,
288   VMOR_VL,
289   VMXOR_VL,
290 
291   // Set mask vector to all zeros or ones.
292   VMCLR_VL,
293   VMSET_VL,
294 
295   // Matches the semantics of vrgather.vx and vrgather.vv with extra operands
296   // for passthru and VL. Operands are (src, index, mask, passthru, vl).
297   VRGATHER_VX_VL,
298   VRGATHER_VV_VL,
299   VRGATHEREI16_VV_VL,
300 
301   // Vector sign/zero extend with additional mask & VL operands.
302   VSEXT_VL,
303   VZEXT_VL,
304 
305   //  vcpop.m with additional mask and VL operands.
306   VCPOP_VL,
307 
308   //  vfirst.m with additional mask and VL operands.
309   VFIRST_VL,
310 
311   // Reads value of CSR.
312   // The first operand is a chain pointer. The second specifies address of the
313   // required CSR. Two results are produced, the read value and the new chain
314   // pointer.
315   READ_CSR,
316   // Write value to CSR.
317   // The first operand is a chain pointer, the second specifies address of the
318   // required CSR and the third is the value to write. The result is the new
319   // chain pointer.
320   WRITE_CSR,
321   // Read and write value of CSR.
322   // The first operand is a chain pointer, the second specifies address of the
323   // required CSR and the third is the value to write. Two results are produced,
324   // the value read before the modification and the new chain pointer.
325   SWAP_CSR,
326 
327   // FP to 32 bit int conversions for RV64. These are used to keep track of the
328   // result being sign extended to 64 bit. These saturate out of range inputs.
329   STRICT_FCVT_W_RV64 = ISD::FIRST_TARGET_STRICTFP_OPCODE,
330   STRICT_FCVT_WU_RV64,
331 
332   // WARNING: Do not add anything in the end unless you want the node to
333   // have memop! In fact, starting from FIRST_TARGET_MEMORY_OPCODE all
334   // opcodes will be thought as target memory ops!
335 
336   // Load address.
337   LA = ISD::FIRST_TARGET_MEMORY_OPCODE,
338   LA_TLS_IE,
339 };
340 } // namespace RISCVISD
341 
342 class RISCVTargetLowering : public TargetLowering {
343   const RISCVSubtarget &Subtarget;
344 
345 public:
346   explicit RISCVTargetLowering(const TargetMachine &TM,
347                                const RISCVSubtarget &STI);
348 
349   const RISCVSubtarget &getSubtarget() const { return Subtarget; }
350 
351   bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
352                           MachineFunction &MF,
353                           unsigned Intrinsic) const override;
354   bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
355                              unsigned AS,
356                              Instruction *I = nullptr) const override;
357   bool isLegalICmpImmediate(int64_t Imm) const override;
358   bool isLegalAddImmediate(int64_t Imm) const override;
359   bool isTruncateFree(Type *SrcTy, Type *DstTy) const override;
360   bool isTruncateFree(EVT SrcVT, EVT DstVT) const override;
361   bool isZExtFree(SDValue Val, EVT VT2) const override;
362   bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override;
363   bool signExtendConstant(const ConstantInt *CI) const override;
364   bool isCheapToSpeculateCttz(Type *Ty) const override;
365   bool isCheapToSpeculateCtlz(Type *Ty) const override;
366   bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
367   bool hasAndNotCompare(SDValue Y) const override;
368   bool hasBitTest(SDValue X, SDValue Y) const override;
369   bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
370       SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
371       unsigned OldShiftOpcode, unsigned NewShiftOpcode,
372       SelectionDAG &DAG) const override;
373   /// Return true if the (vector) instruction I will be lowered to an instruction
374   /// with a scalar splat operand for the given Operand number.
375   bool canSplatOperand(Instruction *I, int Operand) const;
376   /// Return true if a vector instruction will lower to a target instruction
377   /// able to splat the given operand.
378   bool canSplatOperand(unsigned Opcode, int Operand) const;
379   bool shouldSinkOperands(Instruction *I,
380                           SmallVectorImpl<Use *> &Ops) const override;
381   bool shouldScalarizeBinop(SDValue VecOp) const override;
382   bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
383   bool isFPImmLegal(const APFloat &Imm, EVT VT,
384                     bool ForCodeSize) const override;
385   bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
386                                unsigned Index) const override;
387 
388   bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
389 
390   bool preferScalarizeSplat(unsigned Opc) const override;
391 
392   bool softPromoteHalfType() const override { return true; }
393 
394   /// Return the register type for a given MVT, ensuring vectors are treated
395   /// as a series of gpr sized integers.
396   MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC,
397                                     EVT VT) const override;
398 
399   /// Return the number of registers for a given MVT, ensuring vectors are
400   /// treated as a series of gpr sized integers.
401   unsigned getNumRegistersForCallingConv(LLVMContext &Context,
402                                          CallingConv::ID CC,
403                                          EVT VT) const override;
404 
405   bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode,
406                                             EVT VT) const override;
407 
408   /// Return true if the given shuffle mask can be codegen'd directly, or if it
409   /// should be stack expanded.
410   bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
411 
412   bool hasBitPreservingFPLogic(EVT VT) const override;
413   bool
414   shouldExpandBuildVectorWithShuffles(EVT VT,
415                                       unsigned DefinedValues) const override;
416 
417   // Provide custom lowering hooks for some operations.
418   SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
419   void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
420                           SelectionDAG &DAG) const override;
421 
422   SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
423 
424   bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits,
425                                     const APInt &DemandedElts,
426                                     TargetLoweringOpt &TLO) const override;
427 
428   void computeKnownBitsForTargetNode(const SDValue Op,
429                                      KnownBits &Known,
430                                      const APInt &DemandedElts,
431                                      const SelectionDAG &DAG,
432                                      unsigned Depth) const override;
433   unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
434                                            const APInt &DemandedElts,
435                                            const SelectionDAG &DAG,
436                                            unsigned Depth) const override;
437 
438   const Constant *getTargetConstantFromLoad(LoadSDNode *LD) const override;
439 
440   // This method returns the name of a target specific DAG node.
441   const char *getTargetNodeName(unsigned Opcode) const override;
442 
443   ConstraintType getConstraintType(StringRef Constraint) const override;
444 
445   unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override;
446 
447   std::pair<unsigned, const TargetRegisterClass *>
448   getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
449                                StringRef Constraint, MVT VT) const override;
450 
451   void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
452                                     std::vector<SDValue> &Ops,
453                                     SelectionDAG &DAG) const override;
454 
455   MachineBasicBlock *
456   EmitInstrWithCustomInserter(MachineInstr &MI,
457                               MachineBasicBlock *BB) const override;
458 
459   void AdjustInstrPostInstrSelection(MachineInstr &MI,
460                                      SDNode *Node) const override;
461 
462   EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
463                          EVT VT) const override;
464 
465   bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
466     return VT.isScalarInteger();
467   }
468   bool convertSelectOfConstantsToMath(EVT VT) const override { return true; }
469 
470   bool shouldInsertFencesForAtomic(const Instruction *I) const override {
471     return isa<LoadInst>(I) || isa<StoreInst>(I);
472   }
473   Instruction *emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst,
474                                 AtomicOrdering Ord) const override;
475   Instruction *emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst,
476                                  AtomicOrdering Ord) const override;
477 
478   bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
479                                   EVT VT) const override;
480 
481   ISD::NodeType getExtendForAtomicOps() const override {
482     return ISD::SIGN_EXTEND;
483   }
484 
485   ISD::NodeType getExtendForAtomicCmpSwapArg() const override {
486     return ISD::SIGN_EXTEND;
487   }
488 
489   TargetLowering::ShiftLegalizationStrategy
490   preferredShiftLegalizationStrategy(SelectionDAG &DAG, SDNode *N,
491                                      unsigned ExpansionFactor) const override {
492     if (DAG.getMachineFunction().getFunction().hasMinSize())
493       return ShiftLegalizationStrategy::LowerToLibcall;
494     return TargetLowering::preferredShiftLegalizationStrategy(DAG, N,
495                                                               ExpansionFactor);
496   }
497 
498   bool isDesirableToCommuteWithShift(const SDNode *N,
499                                      CombineLevel Level) const override;
500 
501   /// If a physical register, this returns the register that receives the
502   /// exception address on entry to an EH pad.
503   Register
504   getExceptionPointerRegister(const Constant *PersonalityFn) const override;
505 
506   /// If a physical register, this returns the register that receives the
507   /// exception typeid on entry to a landing pad.
508   Register
509   getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
510 
511   bool shouldExtendTypeInLibCall(EVT Type) const override;
512   bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const override;
513 
514   /// Returns the register with the specified architectural or ABI name. This
515   /// method is necessary to lower the llvm.read_register.* and
516   /// llvm.write_register.* intrinsics. Allocatable registers must be reserved
517   /// with the clang -ffixed-xX flag for access to be allowed.
518   Register getRegisterByName(const char *RegName, LLT VT,
519                              const MachineFunction &MF) const override;
520 
521   // Lower incoming arguments, copy physregs into vregs
522   SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
523                                bool IsVarArg,
524                                const SmallVectorImpl<ISD::InputArg> &Ins,
525                                const SDLoc &DL, SelectionDAG &DAG,
526                                SmallVectorImpl<SDValue> &InVals) const override;
527   bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
528                       bool IsVarArg,
529                       const SmallVectorImpl<ISD::OutputArg> &Outs,
530                       LLVMContext &Context) const override;
531   SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
532                       const SmallVectorImpl<ISD::OutputArg> &Outs,
533                       const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
534                       SelectionDAG &DAG) const override;
535   SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
536                     SmallVectorImpl<SDValue> &InVals) const override;
537 
538   bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
539                                          Type *Ty) const override;
540   bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
541   bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
542   bool shouldConsiderGEPOffsetSplit() const override { return true; }
543 
544   bool decomposeMulByConstant(LLVMContext &Context, EVT VT,
545                               SDValue C) const override;
546 
547   bool isMulAddWithConstProfitable(SDValue AddNode,
548                                    SDValue ConstNode) const override;
549 
550   TargetLowering::AtomicExpansionKind
551   shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
552   Value *emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI,
553                                       Value *AlignedAddr, Value *Incr,
554                                       Value *Mask, Value *ShiftAmt,
555                                       AtomicOrdering Ord) const override;
556   TargetLowering::AtomicExpansionKind
557   shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CI) const override;
558   Value *emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder,
559                                           AtomicCmpXchgInst *CI,
560                                           Value *AlignedAddr, Value *CmpVal,
561                                           Value *NewVal, Value *Mask,
562                                           AtomicOrdering Ord) const override;
563 
564   /// Returns true if the target allows unaligned memory accesses of the
565   /// specified type.
566   bool allowsMisalignedMemoryAccesses(
567       EVT VT, unsigned AddrSpace = 0, Align Alignment = Align(1),
568       MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
569       unsigned *Fast = nullptr) const override;
570 
571   bool splitValueIntoRegisterParts(
572       SelectionDAG & DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
573       unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC)
574       const override;
575 
576   SDValue joinRegisterPartsIntoValue(
577       SelectionDAG & DAG, const SDLoc &DL, const SDValue *Parts,
578       unsigned NumParts, MVT PartVT, EVT ValueVT,
579       std::optional<CallingConv::ID> CC) const override;
580 
581   static RISCVII::VLMUL getLMUL(MVT VT);
582   inline static unsigned computeVLMAX(unsigned VectorBits, unsigned EltSize,
583                                       unsigned MinSize) {
584     // Original equation:
585     //   VLMAX = (VectorBits / EltSize) * LMUL
586     //   where LMUL = MinSize / RISCV::RVVBitsPerBlock
587     // The following equations have been reordered to prevent loss of precision
588     // when calculating fractional LMUL.
589     return ((VectorBits / EltSize) * MinSize) / RISCV::RVVBitsPerBlock;
590   };
591   static unsigned getRegClassIDForLMUL(RISCVII::VLMUL LMul);
592   static unsigned getSubregIndexByMVT(MVT VT, unsigned Index);
593   static unsigned getRegClassIDForVecVT(MVT VT);
594   static std::pair<unsigned, unsigned>
595   decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT,
596                                            unsigned InsertExtractIdx,
597                                            const RISCVRegisterInfo *TRI);
598   MVT getContainerForFixedLengthVector(MVT VT) const;
599 
600   bool shouldRemoveExtendFromGSIndex(EVT IndexVT, EVT DataVT) const override;
601 
602   bool isLegalElementTypeForRVV(Type *ScalarTy) const;
603 
604   bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override;
605 
606   unsigned getJumpTableEncoding() const override;
607 
608   const MCExpr *LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
609                                           const MachineBasicBlock *MBB,
610                                           unsigned uid,
611                                           MCContext &Ctx) const override;
612 
613   bool isVScaleKnownToBeAPowerOfTwo() const override;
614 
615   bool isLegalScaleForGatherScatter(uint64_t Scale,
616                                     uint64_t ElemSize) const override {
617     // Scaled addressing not supported on indexed load/stores
618     return Scale == 1;
619   }
620 
621   /// If the target has a standard location for the stack protector cookie,
622   /// returns the address of that location. Otherwise, returns nullptr.
623   Value *getIRStackGuard(IRBuilderBase &IRB) const override;
624 
625 private:
626   /// RISCVCCAssignFn - This target-specific function extends the default
627   /// CCValAssign with additional information used to lower RISC-V calling
628   /// conventions.
629   typedef bool RISCVCCAssignFn(const DataLayout &DL, RISCVABI::ABI,
630                                unsigned ValNo, MVT ValVT, MVT LocVT,
631                                CCValAssign::LocInfo LocInfo,
632                                ISD::ArgFlagsTy ArgFlags, CCState &State,
633                                bool IsFixed, bool IsRet, Type *OrigTy,
634                                const RISCVTargetLowering &TLI,
635                                std::optional<unsigned> FirstMaskArgument);
636 
637   void analyzeInputArgs(MachineFunction &MF, CCState &CCInfo,
638                         const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
639                         RISCVCCAssignFn Fn) const;
640   void analyzeOutputArgs(MachineFunction &MF, CCState &CCInfo,
641                          const SmallVectorImpl<ISD::OutputArg> &Outs,
642                          bool IsRet, CallLoweringInfo *CLI,
643                          RISCVCCAssignFn Fn) const;
644 
645   template <class NodeTy>
646   SDValue getAddr(NodeTy *N, SelectionDAG &DAG, bool IsLocal = true) const;
647   SDValue getStaticTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG,
648                            bool UseGOT) const;
649   SDValue getDynamicTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG) const;
650 
651   SDValue lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
652   SDValue lowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
653   SDValue lowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
654   SDValue lowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
655   SDValue lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
656   SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG) const;
657   SDValue lowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
658   SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const;
659   SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
660   SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
661   SDValue lowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
662   SDValue lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, bool IsSRA) const;
663   SDValue lowerSPLAT_VECTOR_PARTS(SDValue Op, SelectionDAG &DAG) const;
664   SDValue lowerVectorMaskSplat(SDValue Op, SelectionDAG &DAG) const;
665   SDValue lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
666                              int64_t ExtTrueVal) const;
667   SDValue lowerVectorMaskTruncLike(SDValue Op, SelectionDAG &DAG) const;
668   SDValue lowerVectorTruncLike(SDValue Op, SelectionDAG &DAG) const;
669   SDValue lowerVectorFPExtendOrRoundLike(SDValue Op, SelectionDAG &DAG) const;
670   SDValue lowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
671   SDValue lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
672   SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
673   SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
674   SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
675   SDValue lowerVPREDUCE(SDValue Op, SelectionDAG &DAG) const;
676   SDValue lowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
677   SDValue lowerVectorMaskVecReduction(SDValue Op, SelectionDAG &DAG,
678                                       bool IsVP) const;
679   SDValue lowerFPVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
680   SDValue lowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
681   SDValue lowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
682   SDValue lowerSTEP_VECTOR(SDValue Op, SelectionDAG &DAG) const;
683   SDValue lowerVECTOR_REVERSE(SDValue Op, SelectionDAG &DAG) const;
684   SDValue lowerVECTOR_SPLICE(SDValue Op, SelectionDAG &DAG) const;
685   SDValue lowerABS(SDValue Op, SelectionDAG &DAG) const;
686   SDValue lowerMaskedLoad(SDValue Op, SelectionDAG &DAG) const;
687   SDValue lowerMaskedStore(SDValue Op, SelectionDAG &DAG) const;
688   SDValue lowerFixedLengthVectorFCOPYSIGNToRVV(SDValue Op,
689                                                SelectionDAG &DAG) const;
690   SDValue lowerMaskedGather(SDValue Op, SelectionDAG &DAG) const;
691   SDValue lowerMaskedScatter(SDValue Op, SelectionDAG &DAG) const;
692   SDValue lowerFixedLengthVectorLoadToRVV(SDValue Op, SelectionDAG &DAG) const;
693   SDValue lowerFixedLengthVectorStoreToRVV(SDValue Op, SelectionDAG &DAG) const;
694   SDValue lowerFixedLengthVectorSetccToRVV(SDValue Op, SelectionDAG &DAG) const;
695   SDValue lowerFixedLengthVectorLogicOpToRVV(SDValue Op, SelectionDAG &DAG,
696                                              unsigned MaskOpc,
697                                              unsigned VecOpc) const;
698   SDValue lowerFixedLengthVectorShiftToRVV(SDValue Op, SelectionDAG &DAG) const;
699   SDValue lowerFixedLengthVectorSelectToRVV(SDValue Op,
700                                             SelectionDAG &DAG) const;
701   SDValue lowerToScalableOp(SDValue Op, SelectionDAG &DAG, unsigned NewOpc,
702                             bool HasMergeOp = false, bool HasMask = true) const;
703   SDValue lowerVPOp(SDValue Op, SelectionDAG &DAG, unsigned RISCVISDOpc,
704                     bool HasMergeOp = false) const;
705   SDValue lowerLogicVPOp(SDValue Op, SelectionDAG &DAG, unsigned MaskOpc,
706                          unsigned VecOpc) const;
707   SDValue lowerVPExtMaskOp(SDValue Op, SelectionDAG &DAG) const;
708   SDValue lowerVPSetCCMaskOp(SDValue Op, SelectionDAG &DAG) const;
709   SDValue lowerVPFPIntConvOp(SDValue Op, SelectionDAG &DAG,
710                              unsigned RISCVISDOpc) const;
711   SDValue lowerVPStridedLoad(SDValue Op, SelectionDAG &DAG) const;
712   SDValue lowerVPStridedStore(SDValue Op, SelectionDAG &DAG) const;
713   SDValue lowerFixedLengthVectorExtendToRVV(SDValue Op, SelectionDAG &DAG,
714                                             unsigned ExtendOpc) const;
715   SDValue lowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
716   SDValue lowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
717 
718   SDValue lowerEH_DWARF_CFA(SDValue Op, SelectionDAG &DAG) const;
719   SDValue lowerCTLZ_CTTZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) const;
720 
721   SDValue expandUnalignedRVVLoad(SDValue Op, SelectionDAG &DAG) const;
722   SDValue expandUnalignedRVVStore(SDValue Op, SelectionDAG &DAG) const;
723 
724   bool isEligibleForTailCallOptimization(
725       CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
726       const SmallVector<CCValAssign, 16> &ArgLocs) const;
727 
728   /// Generate error diagnostics if any register used by CC has been marked
729   /// reserved.
730   void validateCCReservedRegs(
731       const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
732       MachineFunction &MF) const;
733 
734   bool useRVVForFixedLengthVectorVT(MVT VT) const;
735 
736   MVT getVPExplicitVectorLengthTy() const override;
737 
738   /// RVV code generation for fixed length vectors does not lower all
739   /// BUILD_VECTORs. This makes BUILD_VECTOR legalisation a source of stores to
740   /// merge. However, merging them creates a BUILD_VECTOR that is just as
741   /// illegal as the original, thus leading to an infinite legalisation loop.
742   /// NOTE: Once BUILD_VECTOR can be custom lowered for all legal vector types,
743   /// this override can be removed.
744   bool mergeStoresAfterLegalization(EVT VT) const override;
745 
746   /// Disable normalizing
747   /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
748   /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y))
749   /// RISCV doesn't have flags so it's better to perform the and/or in a GPR.
750   bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override {
751     return false;
752   };
753 
754   /// For available scheduling models FDIV + two independent FMULs are much
755   /// faster than two FDIVs.
756   unsigned combineRepeatedFPDivisors() const override;
757 };
758 namespace RISCVVIntrinsicsTable {
759 
760 struct RISCVVIntrinsicInfo {
761   unsigned IntrinsicID;
762   uint8_t ScalarOperand;
763   uint8_t VLOperand;
764   bool hasScalarOperand() const {
765     // 0xF is not valid. See NoScalarOperand in IntrinsicsRISCV.td.
766     return ScalarOperand != 0xF;
767   }
768   bool hasVLOperand() const {
769     // 0x1F is not valid. See NoVLOperand in IntrinsicsRISCV.td.
770     return VLOperand != 0x1F;
771   }
772 };
773 
774 using namespace RISCV;
775 
776 #define GET_RISCVVIntrinsicsTable_DECL
777 #include "RISCVGenSearchableTables.inc"
778 
779 } // end namespace RISCVVIntrinsicsTable
780 
781 } // end namespace llvm
782 
783 #endif
784