1 //===-- RISCVISelLowering.h - RISC-V DAG Lowering Interface -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISC-V uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_LIB_TARGET_RISCV_RISCVISELLOWERING_H
15 #define LLVM_LIB_TARGET_RISCV_RISCVISELLOWERING_H
16 
17 #include "RISCV.h"
18 #include "llvm/CodeGen/CallingConvLower.h"
19 #include "llvm/CodeGen/SelectionDAG.h"
20 #include "llvm/CodeGen/TargetLowering.h"
21 #include "llvm/TargetParser/RISCVTargetParser.h"
22 #include <optional>
23 
24 namespace llvm {
25 class InstructionCost;
26 class RISCVSubtarget;
27 struct RISCVRegisterInfo;
28 
29 namespace RISCVISD {
30 // clang-format off
31 enum NodeType : unsigned {
32   FIRST_NUMBER = ISD::BUILTIN_OP_END,
33   RET_GLUE,
34   SRET_GLUE,
35   MRET_GLUE,
36   CALL,
37   /// Select with condition operator - This selects between a true value and
38   /// a false value (ops #3 and #4) based on the boolean result of comparing
39   /// the lhs and rhs (ops #0 and #1) of a conditional expression with the
40   /// condition code in op #2, a XLenVT constant from the ISD::CondCode enum.
41   /// The lhs and rhs are XLenVT integers. The true and false values can be
42   /// integer or floating point.
43   SELECT_CC,
44   BR_CC,
45   BuildPairF64,
46   SplitF64,
47   TAIL,
48 
49   // Add the Lo 12 bits from an address. Selected to ADDI.
50   ADD_LO,
51   // Get the Hi 20 bits from an address. Selected to LUI.
52   HI,
53 
54   // Represents an AUIPC+ADDI pair. Selected to PseudoLLA.
55   LLA,
56 
57   // Selected as PseudoAddTPRel. Used to emit a TP-relative relocation.
58   ADD_TPREL,
59 
60   // Multiply high for signedxunsigned.
61   MULHSU,
62   // RV64I shifts, directly matching the semantics of the named RISC-V
63   // instructions.
64   SLLW,
65   SRAW,
66   SRLW,
67   // 32-bit operations from RV64M that can't be simply matched with a pattern
68   // at instruction selection time. These have undefined behavior for division
69   // by 0 or overflow (divw) like their target independent counterparts.
70   DIVW,
71   DIVUW,
72   REMUW,
73   // RV64IB rotates, directly matching the semantics of the named RISC-V
74   // instructions.
75   ROLW,
76   RORW,
77   // RV64IZbb bit counting instructions directly matching the semantics of the
78   // named RISC-V instructions.
79   CLZW,
80   CTZW,
81 
82   // RV64IZbb absolute value for i32. Expanded to (max (negw X), X) during isel.
83   ABSW,
84 
85   // FPR<->GPR transfer operations when the FPR is smaller than XLEN, needed as
86   // XLEN is the only legal integer width.
87   //
88   // FMV_H_X matches the semantics of the FMV.H.X.
89   // FMV_X_ANYEXTH is similar to FMV.X.H but has an any-extended result.
90   // FMV_X_SIGNEXTH is similar to FMV.X.H and has a sign-extended result.
91   // FMV_W_X_RV64 matches the semantics of the FMV.W.X.
92   // FMV_X_ANYEXTW_RV64 is similar to FMV.X.W but has an any-extended result.
93   //
94   // This is a more convenient semantic for producing dagcombines that remove
95   // unnecessary GPR->FPR->GPR moves.
96   FMV_H_X,
97   FMV_X_ANYEXTH,
98   FMV_X_SIGNEXTH,
99   FMV_W_X_RV64,
100   FMV_X_ANYEXTW_RV64,
101   // FP to XLen int conversions. Corresponds to fcvt.l(u).s/d/h on RV64 and
102   // fcvt.w(u).s/d/h on RV32. Unlike FP_TO_S/UINT these saturate out of
103   // range inputs. These are used for FP_TO_S/UINT_SAT lowering. Rounding mode
104   // is passed as a TargetConstant operand using the RISCVFPRndMode enum.
105   FCVT_X,
106   FCVT_XU,
107   // FP to 32 bit int conversions for RV64. These are used to keep track of the
108   // result being sign extended to 64 bit. These saturate out of range inputs.
109   // Used for FP_TO_S/UINT and FP_TO_S/UINT_SAT lowering. Rounding mode
110   // is passed as a TargetConstant operand using the RISCVFPRndMode enum.
111   FCVT_W_RV64,
112   FCVT_WU_RV64,
113 
114   FP_ROUND_BF16,
115   FP_EXTEND_BF16,
116 
117   // Rounds an FP value to its corresponding integer in the same FP format.
118   // First operand is the value to round, the second operand is the largest
119   // integer that can be represented exactly in the FP format. This will be
120   // expanded into multiple instructions and basic blocks with a custom
121   // inserter.
122   FROUND,
123 
124   FCLASS,
125 
126   // Floating point fmax and fmin matching the RISC-V instruction semantics.
127   FMAX, FMIN,
128 
129   // READ_CYCLE_WIDE - A read of the 64-bit cycle CSR on a 32-bit target
130   // (returns (Lo, Hi)). It takes a chain operand.
131   READ_CYCLE_WIDE,
132   // brev8, orc.b, zip, and unzip from Zbb and Zbkb. All operands are i32 or
133   // XLenVT.
134   BREV8,
135   ORC_B,
136   ZIP,
137   UNZIP,
138 
139   // Scalar cryptography
140   CLMUL, CLMULH, CLMULR,
141   SHA256SIG0, SHA256SIG1, SHA256SUM0, SHA256SUM1,
142   SM4KS, SM4ED,
143   SM3P0, SM3P1,
144 
145   // Vector Extension
146   FIRST_VL_VECTOR_OP,
147   // VMV_V_V_VL matches the semantics of vmv.v.v but includes an extra operand
148   // for the VL value to be used for the operation. The first operand is
149   // passthru operand.
150   VMV_V_V_VL = FIRST_VL_VECTOR_OP,
151   // VMV_V_X_VL matches the semantics of vmv.v.x but includes an extra operand
152   // for the VL value to be used for the operation. The first operand is
153   // passthru operand.
154   VMV_V_X_VL,
155   // VFMV_V_F_VL matches the semantics of vfmv.v.f but includes an extra operand
156   // for the VL value to be used for the operation. The first operand is
157   // passthru operand.
158   VFMV_V_F_VL,
159   // VMV_X_S matches the semantics of vmv.x.s. The result is always XLenVT sign
160   // extended from the vector element size.
161   VMV_X_S,
162   // VMV_S_X_VL matches the semantics of vmv.s.x. It carries a VL operand.
163   VMV_S_X_VL,
164   // VFMV_S_F_VL matches the semantics of vfmv.s.f. It carries a VL operand.
165   VFMV_S_F_VL,
166   // Splats an 64-bit value that has been split into two i32 parts. This is
167   // expanded late to two scalar stores and a stride 0 vector load.
168   // The first operand is passthru operand.
169   SPLAT_VECTOR_SPLIT_I64_VL,
170   // Truncates a RVV integer vector by one power-of-two. Carries both an extra
171   // mask and VL operand.
172   TRUNCATE_VECTOR_VL,
173   // Matches the semantics of vslideup/vslidedown. The first operand is the
174   // pass-thru operand, the second is the source vector, the third is the XLenVT
175   // index (either constant or non-constant), the fourth is the mask, the fifth
176   // is the VL and the sixth is the policy.
177   VSLIDEUP_VL,
178   VSLIDEDOWN_VL,
179   // Matches the semantics of vslide1up/slide1down. The first operand is
180   // passthru operand, the second is source vector, third is the XLenVT scalar
181   // value. The fourth and fifth operands are the mask and VL operands.
182   VSLIDE1UP_VL,
183   VSLIDE1DOWN_VL,
184   // Matches the semantics of vfslide1up/vfslide1down. The first operand is
185   // passthru operand, the second is source vector, third is a scalar value
186   // whose type matches the element type of the vectors.  The fourth and fifth
187   // operands are the mask and VL operands.
188   VFSLIDE1UP_VL,
189   VFSLIDE1DOWN_VL,
190   // Matches the semantics of the vid.v instruction, with a mask and VL
191   // operand.
192   VID_VL,
193   // Matches the semantics of the vfcnvt.rod function (Convert double-width
194   // float to single-width float, rounding towards odd). Takes a double-width
195   // float vector and produces a single-width float vector. Also has a mask and
196   // VL operand.
197   VFNCVT_ROD_VL,
198   // These nodes match the semantics of the corresponding RVV vector reduction
199   // instructions. They produce a vector result which is the reduction
200   // performed over the second vector operand plus the first element of the
201   // third vector operand. The first operand is the pass-thru operand. The
202   // second operand is an unconstrained vector type, and the result, first, and
203   // third operand's types are expected to be the corresponding full-width
204   // LMUL=1 type for the second operand:
205   //   nxv8i8 = vecreduce_add nxv8i8, nxv32i8, nxv8i8
206   //   nxv2i32 = vecreduce_add nxv2i32, nxv8i32, nxv2i32
207   // The different in types does introduce extra vsetvli instructions but
208   // similarly it reduces the number of registers consumed per reduction.
209   // Also has a mask and VL operand.
210   VECREDUCE_ADD_VL,
211   VECREDUCE_UMAX_VL,
212   VECREDUCE_SMAX_VL,
213   VECREDUCE_UMIN_VL,
214   VECREDUCE_SMIN_VL,
215   VECREDUCE_AND_VL,
216   VECREDUCE_OR_VL,
217   VECREDUCE_XOR_VL,
218   VECREDUCE_FADD_VL,
219   VECREDUCE_SEQ_FADD_VL,
220   VECREDUCE_FMIN_VL,
221   VECREDUCE_FMAX_VL,
222 
223   // Vector binary ops with a merge as a third operand, a mask as a fourth
224   // operand, and VL as a fifth operand.
225   ADD_VL,
226   AND_VL,
227   MUL_VL,
228   OR_VL,
229   SDIV_VL,
230   SHL_VL,
231   SREM_VL,
232   SRA_VL,
233   SRL_VL,
234   ROTL_VL,
235   ROTR_VL,
236   SUB_VL,
237   UDIV_VL,
238   UREM_VL,
239   XOR_VL,
240   SMIN_VL,
241   SMAX_VL,
242   UMIN_VL,
243   UMAX_VL,
244 
245   BITREVERSE_VL,
246   BSWAP_VL,
247   CTLZ_VL,
248   CTTZ_VL,
249   CTPOP_VL,
250 
251   SADDSAT_VL,
252   UADDSAT_VL,
253   SSUBSAT_VL,
254   USUBSAT_VL,
255 
256   MULHS_VL,
257   MULHU_VL,
258   FADD_VL,
259   FSUB_VL,
260   FMUL_VL,
261   FDIV_VL,
262   VFMIN_VL,
263   VFMAX_VL,
264 
265   // Vector unary ops with a mask as a second operand and VL as a third operand.
266   FNEG_VL,
267   FABS_VL,
268   FSQRT_VL,
269   FCLASS_VL,
270   FCOPYSIGN_VL, // Has a merge operand
271   VFCVT_RTZ_X_F_VL,
272   VFCVT_RTZ_XU_F_VL,
273   VFCVT_X_F_VL,
274   VFCVT_XU_F_VL,
275   VFROUND_NOEXCEPT_VL,
276   VFCVT_RM_X_F_VL,  // Has a rounding mode operand.
277   VFCVT_RM_XU_F_VL, // Has a rounding mode operand.
278   SINT_TO_FP_VL,
279   UINT_TO_FP_VL,
280   VFCVT_RM_F_X_VL,  // Has a rounding mode operand.
281   VFCVT_RM_F_XU_VL, // Has a rounding mode operand.
282   FP_ROUND_VL,
283   FP_EXTEND_VL,
284 
285   // Vector FMA ops with a mask as a fourth operand and VL as a fifth operand.
286   VFMADD_VL,
287   VFNMADD_VL,
288   VFMSUB_VL,
289   VFNMSUB_VL,
290 
291   // Vector widening FMA ops with a mask as a fourth operand and VL as a fifth
292   // operand.
293   VFWMADD_VL,
294   VFWNMADD_VL,
295   VFWMSUB_VL,
296   VFWNMSUB_VL,
297 
298   // Widening instructions with a merge value a third operand, a mask as a
299   // fourth operand, and VL as a fifth operand.
300   VWMUL_VL,
301   VWMULU_VL,
302   VWMULSU_VL,
303   VWADD_VL,
304   VWADDU_VL,
305   VWSUB_VL,
306   VWSUBU_VL,
307   VWADD_W_VL,
308   VWADDU_W_VL,
309   VWSUB_W_VL,
310   VWSUBU_W_VL,
311   VWSLL_VL,
312 
313   VFWMUL_VL,
314   VFWADD_VL,
315   VFWSUB_VL,
316   VFWADD_W_VL,
317   VFWSUB_W_VL,
318 
319   // Widening ternary operations with a mask as the fourth operand and VL as the
320   // fifth operand.
321   VWMACC_VL,
322   VWMACCU_VL,
323   VWMACCSU_VL,
324 
325   // Narrowing logical shift right.
326   // Operands are (source, shift, passthru, mask, vl)
327   VNSRL_VL,
328 
329   // Vector compare producing a mask. Fourth operand is input mask. Fifth
330   // operand is VL.
331   SETCC_VL,
332 
333   // Vector select with an additional VL operand. This operation is unmasked.
334   VSELECT_VL,
335   // General vmerge node with mask, true, false, passthru, and vl operands.
336   VMERGE_VL,
337 
338   // Mask binary operators.
339   VMAND_VL,
340   VMOR_VL,
341   VMXOR_VL,
342 
343   // Set mask vector to all zeros or ones.
344   VMCLR_VL,
345   VMSET_VL,
346 
347   // Matches the semantics of vrgather.vx and vrgather.vv with extra operands
348   // for passthru and VL. Operands are (src, index, mask, passthru, vl).
349   VRGATHER_VX_VL,
350   VRGATHER_VV_VL,
351   VRGATHEREI16_VV_VL,
352 
353   // Vector sign/zero extend with additional mask & VL operands.
354   VSEXT_VL,
355   VZEXT_VL,
356 
357   //  vcpop.m with additional mask and VL operands.
358   VCPOP_VL,
359 
360   //  vfirst.m with additional mask and VL operands.
361   VFIRST_VL,
362 
363   LAST_VL_VECTOR_OP = VFIRST_VL,
364 
365   // Read VLENB CSR
366   READ_VLENB,
367   // Reads value of CSR.
368   // The first operand is a chain pointer. The second specifies address of the
369   // required CSR. Two results are produced, the read value and the new chain
370   // pointer.
371   READ_CSR,
372   // Write value to CSR.
373   // The first operand is a chain pointer, the second specifies address of the
374   // required CSR and the third is the value to write. The result is the new
375   // chain pointer.
376   WRITE_CSR,
377   // Read and write value of CSR.
378   // The first operand is a chain pointer, the second specifies address of the
379   // required CSR and the third is the value to write. Two results are produced,
380   // the value read before the modification and the new chain pointer.
381   SWAP_CSR,
382 
383   // Branchless select operations, matching the semantics of the instructions
384   // defined in Zicond or XVentanaCondOps.
385   CZERO_EQZ, // vt.maskc for XVentanaCondOps.
386   CZERO_NEZ, // vt.maskcn for XVentanaCondOps.
387 
388   // FP to 32 bit int conversions for RV64. These are used to keep track of the
389   // result being sign extended to 64 bit. These saturate out of range inputs.
390   STRICT_FCVT_W_RV64 = ISD::FIRST_TARGET_STRICTFP_OPCODE,
391   STRICT_FCVT_WU_RV64,
392   STRICT_FADD_VL,
393   STRICT_FSUB_VL,
394   STRICT_FMUL_VL,
395   STRICT_FDIV_VL,
396   STRICT_FSQRT_VL,
397   STRICT_VFMADD_VL,
398   STRICT_VFNMADD_VL,
399   STRICT_VFMSUB_VL,
400   STRICT_VFNMSUB_VL,
401   STRICT_FP_ROUND_VL,
402   STRICT_FP_EXTEND_VL,
403   STRICT_VFNCVT_ROD_VL,
404   STRICT_SINT_TO_FP_VL,
405   STRICT_UINT_TO_FP_VL,
406   STRICT_VFCVT_RM_X_F_VL,
407   STRICT_VFCVT_RTZ_X_F_VL,
408   STRICT_VFCVT_RTZ_XU_F_VL,
409   STRICT_FSETCC_VL,
410   STRICT_FSETCCS_VL,
411   STRICT_VFROUND_NOEXCEPT_VL,
412   LAST_RISCV_STRICTFP_OPCODE = STRICT_VFROUND_NOEXCEPT_VL,
413 
414   // WARNING: Do not add anything in the end unless you want the node to
415   // have memop! In fact, starting from FIRST_TARGET_MEMORY_OPCODE all
416   // opcodes will be thought as target memory ops!
417 
418   TH_LWD = ISD::FIRST_TARGET_MEMORY_OPCODE,
419   TH_LWUD,
420   TH_LDD,
421   TH_SWD,
422   TH_SDD,
423 };
424 // clang-format on
425 } // namespace RISCVISD
426 
427 class RISCVTargetLowering : public TargetLowering {
428   const RISCVSubtarget &Subtarget;
429 
430 public:
431   explicit RISCVTargetLowering(const TargetMachine &TM,
432                                const RISCVSubtarget &STI);
433 
434   const RISCVSubtarget &getSubtarget() const { return Subtarget; }
435 
436   bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
437                           MachineFunction &MF,
438                           unsigned Intrinsic) const override;
439   bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
440                              unsigned AS,
441                              Instruction *I = nullptr) const override;
442   bool isLegalICmpImmediate(int64_t Imm) const override;
443   bool isLegalAddImmediate(int64_t Imm) const override;
444   bool isTruncateFree(Type *SrcTy, Type *DstTy) const override;
445   bool isTruncateFree(EVT SrcVT, EVT DstVT) const override;
446   bool isZExtFree(SDValue Val, EVT VT2) const override;
447   bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override;
448   bool signExtendConstant(const ConstantInt *CI) const override;
449   bool isCheapToSpeculateCttz(Type *Ty) const override;
450   bool isCheapToSpeculateCtlz(Type *Ty) const override;
451   bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
452   bool hasAndNotCompare(SDValue Y) const override;
453   bool hasBitTest(SDValue X, SDValue Y) const override;
454   bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
455       SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
456       unsigned OldShiftOpcode, unsigned NewShiftOpcode,
457       SelectionDAG &DAG) const override;
458   /// Return true if the (vector) instruction I will be lowered to an instruction
459   /// with a scalar splat operand for the given Operand number.
460   bool canSplatOperand(Instruction *I, int Operand) const;
461   /// Return true if a vector instruction will lower to a target instruction
462   /// able to splat the given operand.
463   bool canSplatOperand(unsigned Opcode, int Operand) const;
464   bool shouldSinkOperands(Instruction *I,
465                           SmallVectorImpl<Use *> &Ops) const override;
466   bool shouldScalarizeBinop(SDValue VecOp) const override;
467   bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
468   std::pair<int, bool> getLegalZfaFPImm(const APFloat &Imm, EVT VT) const;
469   bool isFPImmLegal(const APFloat &Imm, EVT VT,
470                     bool ForCodeSize) const override;
471   bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
472                                unsigned Index) const override;
473 
474   bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
475 
476   bool preferScalarizeSplat(SDNode *N) const override;
477 
478   bool softPromoteHalfType() const override { return true; }
479 
480   /// Return the register type for a given MVT, ensuring vectors are treated
481   /// as a series of gpr sized integers.
482   MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC,
483                                     EVT VT) const override;
484 
485   /// Return the number of registers for a given MVT, ensuring vectors are
486   /// treated as a series of gpr sized integers.
487   unsigned getNumRegistersForCallingConv(LLVMContext &Context,
488                                          CallingConv::ID CC,
489                                          EVT VT) const override;
490 
491   unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context,
492                                                 CallingConv::ID CC, EVT VT,
493                                                 EVT &IntermediateVT,
494                                                 unsigned &NumIntermediates,
495                                                 MVT &RegisterVT) const override;
496 
497   bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode,
498                                             EVT VT) const override;
499 
500   /// Return true if the given shuffle mask can be codegen'd directly, or if it
501   /// should be stack expanded.
502   bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
503 
504   bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const override {
505     // If the pair to store is a mixture of float and int values, we will
506     // save two bitwise instructions and one float-to-int instruction and
507     // increase one store instruction. There is potentially a more
508     // significant benefit because it avoids the float->int domain switch
509     // for input value. So It is more likely a win.
510     if ((LTy.isFloatingPoint() && HTy.isInteger()) ||
511         (LTy.isInteger() && HTy.isFloatingPoint()))
512       return true;
513     // If the pair only contains int values, we will save two bitwise
514     // instructions and increase one store instruction (costing one more
515     // store buffer). Since the benefit is more blurred we leave such a pair
516     // out until we get testcase to prove it is a win.
517     return false;
518   }
519 
520   bool
521   shouldExpandBuildVectorWithShuffles(EVT VT,
522                                       unsigned DefinedValues) const override;
523 
524   /// Return the cost of LMUL for linear operations.
525   InstructionCost getLMULCost(MVT VT) const;
526 
527   InstructionCost getVRGatherVVCost(MVT VT) const;
528   InstructionCost getVRGatherVICost(MVT VT) const;
529   InstructionCost getVSlideCost(MVT VT) const;
530 
531   // Provide custom lowering hooks for some operations.
532   SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
533   void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
534                           SelectionDAG &DAG) const override;
535 
536   SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
537 
538   bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits,
539                                     const APInt &DemandedElts,
540                                     TargetLoweringOpt &TLO) const override;
541 
542   void computeKnownBitsForTargetNode(const SDValue Op,
543                                      KnownBits &Known,
544                                      const APInt &DemandedElts,
545                                      const SelectionDAG &DAG,
546                                      unsigned Depth) const override;
547   unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
548                                            const APInt &DemandedElts,
549                                            const SelectionDAG &DAG,
550                                            unsigned Depth) const override;
551 
552   const Constant *getTargetConstantFromLoad(LoadSDNode *LD) const override;
553 
554   // This method returns the name of a target specific DAG node.
555   const char *getTargetNodeName(unsigned Opcode) const override;
556 
557   MachineMemOperand::Flags
558   getTargetMMOFlags(const Instruction &I) const override;
559 
560   MachineMemOperand::Flags
561   getTargetMMOFlags(const MemSDNode &Node) const override;
562 
563   bool
564   areTwoSDNodeTargetMMOFlagsMergeable(const MemSDNode &NodeX,
565                                       const MemSDNode &NodeY) const override;
566 
567   ConstraintType getConstraintType(StringRef Constraint) const override;
568 
569   InlineAsm::ConstraintCode
570   getInlineAsmMemConstraint(StringRef ConstraintCode) const override;
571 
572   std::pair<unsigned, const TargetRegisterClass *>
573   getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
574                                StringRef Constraint, MVT VT) const override;
575 
576   void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint,
577                                     std::vector<SDValue> &Ops,
578                                     SelectionDAG &DAG) const override;
579 
580   MachineBasicBlock *
581   EmitInstrWithCustomInserter(MachineInstr &MI,
582                               MachineBasicBlock *BB) const override;
583 
584   void AdjustInstrPostInstrSelection(MachineInstr &MI,
585                                      SDNode *Node) const override;
586 
587   EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
588                          EVT VT) const override;
589 
590   bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
591                             bool MathUsed) const override {
592     if (VT == MVT::i8 || VT == MVT::i16)
593       return false;
594 
595     return TargetLowering::shouldFormOverflowOp(Opcode, VT, MathUsed);
596   }
597 
598   bool storeOfVectorConstantIsCheap(bool IsZero, EVT MemVT, unsigned NumElem,
599                                     unsigned AddrSpace) const override {
600     // If we can replace 4 or more scalar stores, there will be a reduction
601     // in instructions even after we add a vector constant load.
602     return NumElem >= 4;
603   }
604 
605   bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
606     return VT.isScalarInteger();
607   }
608   bool convertSelectOfConstantsToMath(EVT VT) const override { return true; }
609 
610   bool isCtpopFast(EVT VT) const override;
611 
612   unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const override;
613 
614   bool preferZeroCompareBranch() const override { return true; }
615 
616   bool shouldInsertFencesForAtomic(const Instruction *I) const override {
617     return isa<LoadInst>(I) || isa<StoreInst>(I);
618   }
619   Instruction *emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst,
620                                 AtomicOrdering Ord) const override;
621   Instruction *emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst,
622                                  AtomicOrdering Ord) const override;
623 
624   bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
625                                   EVT VT) const override;
626 
627   ISD::NodeType getExtendForAtomicOps() const override {
628     return ISD::SIGN_EXTEND;
629   }
630 
631   ISD::NodeType getExtendForAtomicCmpSwapArg() const override {
632     return ISD::SIGN_EXTEND;
633   }
634 
635   bool shouldTransformSignedTruncationCheck(EVT XVT,
636                                             unsigned KeptBits) const override;
637 
638   TargetLowering::ShiftLegalizationStrategy
639   preferredShiftLegalizationStrategy(SelectionDAG &DAG, SDNode *N,
640                                      unsigned ExpansionFactor) const override {
641     if (DAG.getMachineFunction().getFunction().hasMinSize())
642       return ShiftLegalizationStrategy::LowerToLibcall;
643     return TargetLowering::preferredShiftLegalizationStrategy(DAG, N,
644                                                               ExpansionFactor);
645   }
646 
647   bool isDesirableToCommuteWithShift(const SDNode *N,
648                                      CombineLevel Level) const override;
649 
650   /// If a physical register, this returns the register that receives the
651   /// exception address on entry to an EH pad.
652   Register
653   getExceptionPointerRegister(const Constant *PersonalityFn) const override;
654 
655   /// If a physical register, this returns the register that receives the
656   /// exception typeid on entry to a landing pad.
657   Register
658   getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
659 
660   bool shouldExtendTypeInLibCall(EVT Type) const override;
661   bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const override;
662 
663   /// Returns the register with the specified architectural or ABI name. This
664   /// method is necessary to lower the llvm.read_register.* and
665   /// llvm.write_register.* intrinsics. Allocatable registers must be reserved
666   /// with the clang -ffixed-xX flag for access to be allowed.
667   Register getRegisterByName(const char *RegName, LLT VT,
668                              const MachineFunction &MF) const override;
669 
670   // Lower incoming arguments, copy physregs into vregs
671   SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
672                                bool IsVarArg,
673                                const SmallVectorImpl<ISD::InputArg> &Ins,
674                                const SDLoc &DL, SelectionDAG &DAG,
675                                SmallVectorImpl<SDValue> &InVals) const override;
676   bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
677                       bool IsVarArg,
678                       const SmallVectorImpl<ISD::OutputArg> &Outs,
679                       LLVMContext &Context) const override;
680   SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
681                       const SmallVectorImpl<ISD::OutputArg> &Outs,
682                       const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
683                       SelectionDAG &DAG) const override;
684   SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
685                     SmallVectorImpl<SDValue> &InVals) const override;
686 
687   bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
688                                          Type *Ty) const override;
689   bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
690   bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
691   bool shouldConsiderGEPOffsetSplit() const override { return true; }
692 
693   bool decomposeMulByConstant(LLVMContext &Context, EVT VT,
694                               SDValue C) const override;
695 
696   bool isMulAddWithConstProfitable(SDValue AddNode,
697                                    SDValue ConstNode) const override;
698 
699   TargetLowering::AtomicExpansionKind
700   shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
701   Value *emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI,
702                                       Value *AlignedAddr, Value *Incr,
703                                       Value *Mask, Value *ShiftAmt,
704                                       AtomicOrdering Ord) const override;
705   TargetLowering::AtomicExpansionKind
706   shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CI) const override;
707   Value *emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder,
708                                           AtomicCmpXchgInst *CI,
709                                           Value *AlignedAddr, Value *CmpVal,
710                                           Value *NewVal, Value *Mask,
711                                           AtomicOrdering Ord) const override;
712 
713   /// Returns true if the target allows unaligned memory accesses of the
714   /// specified type.
715   bool allowsMisalignedMemoryAccesses(
716       EVT VT, unsigned AddrSpace = 0, Align Alignment = Align(1),
717       MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
718       unsigned *Fast = nullptr) const override;
719 
720   EVT getOptimalMemOpType(const MemOp &Op,
721                           const AttributeList &FuncAttributes) const override;
722 
723   bool splitValueIntoRegisterParts(
724       SelectionDAG & DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
725       unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC)
726       const override;
727 
728   SDValue joinRegisterPartsIntoValue(
729       SelectionDAG & DAG, const SDLoc &DL, const SDValue *Parts,
730       unsigned NumParts, MVT PartVT, EVT ValueVT,
731       std::optional<CallingConv::ID> CC) const override;
732 
733   // Return the value of VLMax for the given vector type (i.e. SEW and LMUL)
734   SDValue computeVLMax(MVT VecVT, const SDLoc &DL, SelectionDAG &DAG) const;
735 
736   static RISCVII::VLMUL getLMUL(MVT VT);
737   inline static unsigned computeVLMAX(unsigned VectorBits, unsigned EltSize,
738                                       unsigned MinSize) {
739     // Original equation:
740     //   VLMAX = (VectorBits / EltSize) * LMUL
741     //   where LMUL = MinSize / RISCV::RVVBitsPerBlock
742     // The following equations have been reordered to prevent loss of precision
743     // when calculating fractional LMUL.
744     return ((VectorBits / EltSize) * MinSize) / RISCV::RVVBitsPerBlock;
745   }
746 
747   // Return inclusive (low, high) bounds on the value of VLMAX for the
748   // given scalable container type given known bounds on VLEN.
749   static std::pair<unsigned, unsigned>
750   computeVLMAXBounds(MVT ContainerVT, const RISCVSubtarget &Subtarget);
751 
752   static unsigned getRegClassIDForLMUL(RISCVII::VLMUL LMul);
753   static unsigned getSubregIndexByMVT(MVT VT, unsigned Index);
754   static unsigned getRegClassIDForVecVT(MVT VT);
755   static std::pair<unsigned, unsigned>
756   decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT,
757                                            unsigned InsertExtractIdx,
758                                            const RISCVRegisterInfo *TRI);
759   MVT getContainerForFixedLengthVector(MVT VT) const;
760 
761   bool shouldRemoveExtendFromGSIndex(SDValue Extend, EVT DataVT) const override;
762 
763   bool isLegalElementTypeForRVV(EVT ScalarTy) const;
764 
765   bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override;
766 
767   unsigned getJumpTableEncoding() const override;
768 
769   const MCExpr *LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
770                                           const MachineBasicBlock *MBB,
771                                           unsigned uid,
772                                           MCContext &Ctx) const override;
773 
774   bool isVScaleKnownToBeAPowerOfTwo() const override;
775 
776   bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,
777                               ISD::MemIndexedMode &AM, bool &IsInc,
778                               SelectionDAG &DAG) const;
779   bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
780                                  ISD::MemIndexedMode &AM,
781                                  SelectionDAG &DAG) const override;
782   bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
783                                   SDValue &Offset, ISD::MemIndexedMode &AM,
784                                   SelectionDAG &DAG) const override;
785 
786   bool isLegalScaleForGatherScatter(uint64_t Scale,
787                                     uint64_t ElemSize) const override {
788     // Scaled addressing not supported on indexed load/stores
789     return Scale == 1;
790   }
791 
792   /// If the target has a standard location for the stack protector cookie,
793   /// returns the address of that location. Otherwise, returns nullptr.
794   Value *getIRStackGuard(IRBuilderBase &IRB) const override;
795 
796   /// Returns whether or not generating a interleaved load/store intrinsic for
797   /// this type will be legal.
798   bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor,
799                                     Align Alignment, unsigned AddrSpace,
800                                     const DataLayout &) const;
801 
802   /// Return true if a stride load store of the given result type and
803   /// alignment is legal.
804   bool isLegalStridedLoadStore(EVT DataType, Align Alignment) const;
805 
806   unsigned getMaxSupportedInterleaveFactor() const override { return 8; }
807 
808   bool fallBackToDAGISel(const Instruction &Inst) const override;
809 
810   bool lowerInterleavedLoad(LoadInst *LI,
811                             ArrayRef<ShuffleVectorInst *> Shuffles,
812                             ArrayRef<unsigned> Indices,
813                             unsigned Factor) const override;
814 
815   bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
816                              unsigned Factor) const override;
817 
818   bool lowerDeinterleaveIntrinsicToLoad(IntrinsicInst *II,
819                                         LoadInst *LI) const override;
820 
821   bool lowerInterleaveIntrinsicToStore(IntrinsicInst *II,
822                                        StoreInst *SI) const override;
823 
824   bool supportKCFIBundles() const override { return true; }
825 
826   MachineInstr *EmitKCFICheck(MachineBasicBlock &MBB,
827                               MachineBasicBlock::instr_iterator &MBBI,
828                               const TargetInstrInfo *TII) const override;
829 
830   /// RISCVCCAssignFn - This target-specific function extends the default
831   /// CCValAssign with additional information used to lower RISC-V calling
832   /// conventions.
833   typedef bool RISCVCCAssignFn(const DataLayout &DL, RISCVABI::ABI,
834                                unsigned ValNo, MVT ValVT, MVT LocVT,
835                                CCValAssign::LocInfo LocInfo,
836                                ISD::ArgFlagsTy ArgFlags, CCState &State,
837                                bool IsFixed, bool IsRet, Type *OrigTy,
838                                const RISCVTargetLowering &TLI,
839                                std::optional<unsigned> FirstMaskArgument);
840 
841 private:
842   void analyzeInputArgs(MachineFunction &MF, CCState &CCInfo,
843                         const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
844                         RISCVCCAssignFn Fn) const;
845   void analyzeOutputArgs(MachineFunction &MF, CCState &CCInfo,
846                          const SmallVectorImpl<ISD::OutputArg> &Outs,
847                          bool IsRet, CallLoweringInfo *CLI,
848                          RISCVCCAssignFn Fn) const;
849 
850   template <class NodeTy>
851   SDValue getAddr(NodeTy *N, SelectionDAG &DAG, bool IsLocal = true,
852                   bool IsExternWeak = false) const;
853   SDValue getStaticTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG,
854                            bool UseGOT) const;
855   SDValue getDynamicTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG) const;
856 
857   SDValue lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
858   SDValue lowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
859   SDValue lowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
860   SDValue lowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
861   SDValue lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
862   SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG) const;
863   SDValue lowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
864   SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const;
865   SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
866   SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
867   SDValue lowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
868   SDValue lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, bool IsSRA) const;
869   SDValue lowerSPLAT_VECTOR_PARTS(SDValue Op, SelectionDAG &DAG) const;
870   SDValue lowerVectorMaskSplat(SDValue Op, SelectionDAG &DAG) const;
871   SDValue lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
872                              int64_t ExtTrueVal) const;
873   SDValue lowerVectorMaskTruncLike(SDValue Op, SelectionDAG &DAG) const;
874   SDValue lowerVectorTruncLike(SDValue Op, SelectionDAG &DAG) const;
875   SDValue lowerVectorFPExtendOrRoundLike(SDValue Op, SelectionDAG &DAG) const;
876   SDValue lowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
877   SDValue lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
878   SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
879   SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
880   SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
881   SDValue lowerVPREDUCE(SDValue Op, SelectionDAG &DAG) const;
882   SDValue lowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
883   SDValue lowerVectorMaskVecReduction(SDValue Op, SelectionDAG &DAG,
884                                       bool IsVP) const;
885   SDValue lowerFPVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
886   SDValue lowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
887   SDValue lowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
888   SDValue lowerVECTOR_DEINTERLEAVE(SDValue Op, SelectionDAG &DAG) const;
889   SDValue lowerVECTOR_INTERLEAVE(SDValue Op, SelectionDAG &DAG) const;
890   SDValue lowerSTEP_VECTOR(SDValue Op, SelectionDAG &DAG) const;
891   SDValue lowerVECTOR_REVERSE(SDValue Op, SelectionDAG &DAG) const;
892   SDValue lowerVECTOR_SPLICE(SDValue Op, SelectionDAG &DAG) const;
893   SDValue lowerABS(SDValue Op, SelectionDAG &DAG) const;
894   SDValue lowerMaskedLoad(SDValue Op, SelectionDAG &DAG) const;
895   SDValue lowerMaskedStore(SDValue Op, SelectionDAG &DAG) const;
896   SDValue lowerFixedLengthVectorFCOPYSIGNToRVV(SDValue Op,
897                                                SelectionDAG &DAG) const;
898   SDValue lowerMaskedGather(SDValue Op, SelectionDAG &DAG) const;
899   SDValue lowerMaskedScatter(SDValue Op, SelectionDAG &DAG) const;
900   SDValue lowerFixedLengthVectorLoadToRVV(SDValue Op, SelectionDAG &DAG) const;
901   SDValue lowerFixedLengthVectorStoreToRVV(SDValue Op, SelectionDAG &DAG) const;
902   SDValue lowerFixedLengthVectorSetccToRVV(SDValue Op, SelectionDAG &DAG) const;
903   SDValue lowerFixedLengthVectorSelectToRVV(SDValue Op,
904                                             SelectionDAG &DAG) const;
905   SDValue lowerToScalableOp(SDValue Op, SelectionDAG &DAG) const;
906   SDValue LowerIS_FPCLASS(SDValue Op, SelectionDAG &DAG) const;
907   SDValue lowerVPOp(SDValue Op, SelectionDAG &DAG) const;
908   SDValue lowerLogicVPOp(SDValue Op, SelectionDAG &DAG) const;
909   SDValue lowerVPExtMaskOp(SDValue Op, SelectionDAG &DAG) const;
910   SDValue lowerVPSetCCMaskOp(SDValue Op, SelectionDAG &DAG) const;
911   SDValue lowerVPSpliceExperimental(SDValue Op, SelectionDAG &DAG) const;
912   SDValue lowerVPReverseExperimental(SDValue Op, SelectionDAG &DAG) const;
913   SDValue lowerVPFPIntConvOp(SDValue Op, SelectionDAG &DAG) const;
914   SDValue lowerVPStridedLoad(SDValue Op, SelectionDAG &DAG) const;
915   SDValue lowerVPStridedStore(SDValue Op, SelectionDAG &DAG) const;
916   SDValue lowerFixedLengthVectorExtendToRVV(SDValue Op, SelectionDAG &DAG,
917                                             unsigned ExtendOpc) const;
918   SDValue lowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
919   SDValue lowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
920 
921   SDValue lowerEH_DWARF_CFA(SDValue Op, SelectionDAG &DAG) const;
922   SDValue lowerCTLZ_CTTZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) const;
923 
924   SDValue lowerStrictFPExtendOrRoundLike(SDValue Op, SelectionDAG &DAG) const;
925 
926   SDValue lowerVectorStrictFSetcc(SDValue Op, SelectionDAG &DAG) const;
927 
928   SDValue expandUnalignedRVVLoad(SDValue Op, SelectionDAG &DAG) const;
929   SDValue expandUnalignedRVVStore(SDValue Op, SelectionDAG &DAG) const;
930 
931   bool isEligibleForTailCallOptimization(
932       CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
933       const SmallVector<CCValAssign, 16> &ArgLocs) const;
934 
935   /// Generate error diagnostics if any register used by CC has been marked
936   /// reserved.
937   void validateCCReservedRegs(
938       const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
939       MachineFunction &MF) const;
940 
941   bool useRVVForFixedLengthVectorVT(MVT VT) const;
942 
943   MVT getVPExplicitVectorLengthTy() const override;
944 
945   bool shouldExpandGetVectorLength(EVT TripCountVT, unsigned VF,
946                                    bool IsScalable) const override;
947 
948   /// RVV code generation for fixed length vectors does not lower all
949   /// BUILD_VECTORs. This makes BUILD_VECTOR legalisation a source of stores to
950   /// merge. However, merging them creates a BUILD_VECTOR that is just as
951   /// illegal as the original, thus leading to an infinite legalisation loop.
952   /// NOTE: Once BUILD_VECTOR can be custom lowered for all legal vector types,
953   /// this override can be removed.
954   bool mergeStoresAfterLegalization(EVT VT) const override;
955 
956   /// Disable normalizing
957   /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
958   /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y))
959   /// RISC-V doesn't have flags so it's better to perform the and/or in a GPR.
960   bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override {
961     return false;
962   };
963 
964   /// For available scheduling models FDIV + two independent FMULs are much
965   /// faster than two FDIVs.
966   unsigned combineRepeatedFPDivisors() const override;
967 
968   SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
969                         SmallVectorImpl<SDNode *> &Created) const override;
970 
971   bool shouldFoldSelectWithSingleBitTest(EVT VT,
972                                          const APInt &AndMask) const override;
973 
974   unsigned getMinimumJumpTableEntries() const override;
975 };
976 
977 namespace RISCV {
978 
979 bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
980               MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
981               ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
982               bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
983               std::optional<unsigned> FirstMaskArgument);
984 
985 bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
986                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
987                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
988                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
989                      std::optional<unsigned> FirstMaskArgument);
990 
991 bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
992                   CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
993                   CCState &State);
994 
995 ArrayRef<MCPhysReg> getArgGPRs();
996 
997 } // end namespace RISCV
998 
999 namespace RISCVVIntrinsicsTable {
1000 
1001 struct RISCVVIntrinsicInfo {
1002   unsigned IntrinsicID;
1003   uint8_t ScalarOperand;
1004   uint8_t VLOperand;
1005   bool hasScalarOperand() const {
1006     // 0xF is not valid. See NoScalarOperand in IntrinsicsRISCV.td.
1007     return ScalarOperand != 0xF;
1008   }
1009   bool hasVLOperand() const {
1010     // 0x1F is not valid. See NoVLOperand in IntrinsicsRISCV.td.
1011     return VLOperand != 0x1F;
1012   }
1013 };
1014 
1015 using namespace RISCV;
1016 
1017 #define GET_RISCVVIntrinsicsTable_DECL
1018 #include "RISCVGenSearchableTables.inc"
1019 #undef GET_RISCVVIntrinsicsTable_DECL
1020 
1021 } // end namespace RISCVVIntrinsicsTable
1022 
1023 } // end namespace llvm
1024 
1025 #endif
1026