1//===- IntrinsicsRISCV.td - Defines RISCV intrinsics -------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines all of the RISCV-specific intrinsics.
10//
11//===----------------------------------------------------------------------===//
12
13//===----------------------------------------------------------------------===//
14// Atomics
15
16// Atomic Intrinsics have multiple versions for different access widths, which
17// all follow one of the following signatures (depending on how many arguments
18// they require). We carefully instantiate only specific versions of these for
19// specific integer widths, rather than using `llvm_anyint_ty`.
20//
21// In fact, as these intrinsics take `llvm_anyptr_ty`, the given names are the
22// canonical names, and the intrinsics used in the code will have a name
23// suffixed with the pointer type they are specialised for (denoted `<p>` in the
24// names below), in order to avoid type conflicts.
25
26let TargetPrefix = "riscv" in {
27
28  // T @llvm.<name>.T.<p>(any*, T, T, T imm);
29  class MaskedAtomicRMWFourArg<LLVMType itype>
30      : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype],
31                  [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<3>>]>;
32  // T @llvm.<name>.T.<p>(any*, T, T, T, T imm);
33  class MaskedAtomicRMWFiveArg<LLVMType itype>
34      : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype, itype],
35                  [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<4>>]>;
36
37  // We define 32-bit and 64-bit variants of the above, where T stands for i32
38  // or i64 respectively:
39  multiclass MaskedAtomicRMWFourArgIntrinsics {
40    // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32 imm);
41    def _i32 : MaskedAtomicRMWFourArg<llvm_i32_ty>;
42    // i64 @llvm.<name>.i32.<p>(any*, i64, i64, i64 imm);
43    def _i64 : MaskedAtomicRMWFourArg<llvm_i64_ty>;
44  }
45
46  multiclass MaskedAtomicRMWFiveArgIntrinsics {
47    // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32, i32 imm);
48    def _i32 : MaskedAtomicRMWFiveArg<llvm_i32_ty>;
49    // i64 @llvm.<name>.i64.<p>(any*, i64, i64, i64, i64 imm);
50    def _i64 : MaskedAtomicRMWFiveArg<llvm_i64_ty>;
51  }
52
53  // @llvm.riscv.masked.atomicrmw.*.{i32,i64}.<p>(...)
54  defm int_riscv_masked_atomicrmw_xchg : MaskedAtomicRMWFourArgIntrinsics;
55  defm int_riscv_masked_atomicrmw_add : MaskedAtomicRMWFourArgIntrinsics;
56  defm int_riscv_masked_atomicrmw_sub : MaskedAtomicRMWFourArgIntrinsics;
57  defm int_riscv_masked_atomicrmw_nand : MaskedAtomicRMWFourArgIntrinsics;
58  // Signed min and max need an extra operand to do sign extension with.
59  defm int_riscv_masked_atomicrmw_max : MaskedAtomicRMWFiveArgIntrinsics;
60  defm int_riscv_masked_atomicrmw_min : MaskedAtomicRMWFiveArgIntrinsics;
61  // Unsigned min and max don't need the extra operand.
62  defm int_riscv_masked_atomicrmw_umax : MaskedAtomicRMWFourArgIntrinsics;
63  defm int_riscv_masked_atomicrmw_umin : MaskedAtomicRMWFourArgIntrinsics;
64
65  // @llvm.riscv.masked.cmpxchg.{i32,i64}.<p>(...)
66  defm int_riscv_masked_cmpxchg : MaskedAtomicRMWFiveArgIntrinsics;
67
68} // TargetPrefix = "riscv"
69
70//===----------------------------------------------------------------------===//
71// Bitmanip (Bit Manipulation) Extension
72
73let TargetPrefix = "riscv" in {
74
75  class BitManipGPRIntrinsics
76      : Intrinsic<[llvm_any_ty],
77                  [LLVMMatchType<0>],
78                  [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
79  class BitManipGPRGPRIntrinsics
80      : Intrinsic<[llvm_any_ty],
81                  [LLVMMatchType<0>, LLVMMatchType<0>],
82                  [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
83  class BitManipGPRGPRGRIntrinsics
84      : Intrinsic<[llvm_any_ty],
85                  [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
86                  [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
87
88  // Zbb
89  def int_riscv_orc_b : BitManipGPRIntrinsics;
90
91  // Zbc or Zbkc
92  def int_riscv_clmul  : BitManipGPRGPRIntrinsics;
93  def int_riscv_clmulh : BitManipGPRGPRIntrinsics;
94
95  // Zbc
96  def int_riscv_clmulr : BitManipGPRGPRIntrinsics;
97
98  // Zbe
99  def int_riscv_bcompress   : BitManipGPRGPRIntrinsics;
100  def int_riscv_bdecompress : BitManipGPRGPRIntrinsics;
101
102  // Zbf
103  def int_riscv_bfp  : BitManipGPRGPRIntrinsics;
104
105  // Zbp
106  def int_riscv_grev  : BitManipGPRGPRIntrinsics;
107  def int_riscv_gorc  : BitManipGPRGPRIntrinsics;
108  def int_riscv_shfl  : BitManipGPRGPRIntrinsics;
109  def int_riscv_unshfl  : BitManipGPRGPRIntrinsics;
110  def int_riscv_xperm_n  : BitManipGPRGPRIntrinsics;
111  def int_riscv_xperm_b  : BitManipGPRGPRIntrinsics;
112  def int_riscv_xperm_h  : BitManipGPRGPRIntrinsics;
113  def int_riscv_xperm_w  : BitManipGPRGPRIntrinsics;
114
115  // Zbr
116  def int_riscv_crc32_b : BitManipGPRIntrinsics;
117  def int_riscv_crc32_h : BitManipGPRIntrinsics;
118  def int_riscv_crc32_w : BitManipGPRIntrinsics;
119  def int_riscv_crc32_d : BitManipGPRIntrinsics;
120  def int_riscv_crc32c_b : BitManipGPRIntrinsics;
121  def int_riscv_crc32c_h : BitManipGPRIntrinsics;
122  def int_riscv_crc32c_w : BitManipGPRIntrinsics;
123  def int_riscv_crc32c_d : BitManipGPRIntrinsics;
124
125  // Zbt
126  def int_riscv_fsl : BitManipGPRGPRGRIntrinsics;
127  def int_riscv_fsr : BitManipGPRGPRGRIntrinsics;
128
129  // Zbkb
130  def int_riscv_brev8 : BitManipGPRIntrinsics;
131  def int_riscv_zip   : BitManipGPRIntrinsics;
132  def int_riscv_unzip : BitManipGPRIntrinsics;
133
134  // Zbkx
135  def int_riscv_xperm4  : BitManipGPRGPRIntrinsics;
136  def int_riscv_xperm8  : BitManipGPRGPRIntrinsics;
137} // TargetPrefix = "riscv"
138
139//===----------------------------------------------------------------------===//
140// Vectors
141
142// The intrinsic does not have any operand that must be extended.
143defvar NoScalarOperand = 0xF;
144
145// The intrinsic does not have a VL operand.
146// (e.g., riscv_vmv_x_s and riscv_vfmv_f_s)
147defvar NoVLOperand = 0x1F;
148
149class RISCVVIntrinsic {
150  // These intrinsics may accept illegal integer values in their llvm_any_ty
151  // operand, so they have to be extended.
152  Intrinsic IntrinsicID = !cast<Intrinsic>(NAME);
153  bits<4> ScalarOperand = NoScalarOperand;
154  bits<5> VLOperand = NoVLOperand;
155}
156
157let TargetPrefix = "riscv" in {
158  // We use anyint here but we only support XLen.
159  def int_riscv_vsetvli   : Intrinsic<[llvm_anyint_ty],
160                           /* AVL */  [LLVMMatchType<0>,
161                           /* VSEW */  LLVMMatchType<0>,
162                           /* VLMUL */ LLVMMatchType<0>],
163                                      [IntrNoMem, IntrHasSideEffects,
164                                       ImmArg<ArgIndex<1>>,
165                                       ImmArg<ArgIndex<2>>]>;
166  def int_riscv_vsetvlimax : Intrinsic<[llvm_anyint_ty],
167                            /* VSEW */ [LLVMMatchType<0>,
168                            /* VLMUL */ LLVMMatchType<0>],
169                                      [IntrNoMem, IntrHasSideEffects,
170                                       ImmArg<ArgIndex<0>>,
171                                       ImmArg<ArgIndex<1>>]>;
172
173  // Versions without side effects: better optimizable and usable if only the
174  // returned vector length is important.
175  def int_riscv_vsetvli_opt   : Intrinsic<[llvm_anyint_ty],
176                               /* AVL */  [LLVMMatchType<0>,
177                               /* VSEW */  LLVMMatchType<0>,
178                               /* VLMUL */ LLVMMatchType<0>],
179                                          [IntrNoMem,
180                                           ImmArg<ArgIndex<1>>,
181                                           ImmArg<ArgIndex<2>>]>;
182  def int_riscv_vsetvlimax_opt : Intrinsic<[llvm_anyint_ty],
183                                /* VSEW */ [LLVMMatchType<0>,
184                                /* VLMUL */ LLVMMatchType<0>],
185                                          [IntrNoMem,
186                                           ImmArg<ArgIndex<0>>,
187                                           ImmArg<ArgIndex<1>>]>;
188
189  // For unit stride mask load
190  // Input: (pointer, vl)
191  class RISCVUSMLoad
192        : Intrinsic<[llvm_anyvector_ty],
193                    [LLVMPointerType<LLVMMatchType<0>>,
194                     llvm_anyint_ty],
195                    [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic {
196    let VLOperand = 1;
197  }
198  // For unit stride load
199  // Input: (passthru, pointer, vl)
200  class RISCVUSLoad
201        : Intrinsic<[llvm_anyvector_ty],
202                    [LLVMMatchType<0>,
203                     LLVMPointerType<LLVMMatchType<0>>,
204                     llvm_anyint_ty],
205                    [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
206    let VLOperand = 2;
207  }
208  // For unit stride fault-only-first load
209  // Input: (passthru, pointer, vl)
210  // Output: (data, vl)
211  // NOTE: We model this with default memory properties since we model writing
212  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
213  class RISCVUSLoadFF
214        : Intrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
215                    [LLVMMatchType<0>,
216                     LLVMPointerType<LLVMMatchType<0>>, LLVMMatchType<1>],
217                    [NoCapture<ArgIndex<1>>]>,
218                    RISCVVIntrinsic {
219    let VLOperand = 2;
220  }
221  // For unit stride load with mask
222  // Input: (maskedoff, pointer, mask, vl, policy)
223  class RISCVUSLoadMasked
224        : Intrinsic<[llvm_anyvector_ty ],
225                    [LLVMMatchType<0>,
226                     LLVMPointerType<LLVMMatchType<0>>,
227                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
228                     llvm_anyint_ty, LLVMMatchType<1>],
229                    [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>, IntrReadMem]>,
230                    RISCVVIntrinsic {
231    let VLOperand = 3;
232  }
233  // For unit stride fault-only-first load with mask
234  // Input: (maskedoff, pointer, mask, vl, policy)
235  // Output: (data, vl)
236  // NOTE: We model this with default memory properties since we model writing
237  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
238  class RISCVUSLoadFFMasked
239        : Intrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
240                    [LLVMMatchType<0>,
241                     LLVMPointerType<LLVMMatchType<0>>,
242                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
243                     LLVMMatchType<1>, LLVMMatchType<1>],
244                    [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic {
245    let VLOperand = 3;
246  }
247  // For strided load with passthru operand
248  // Input: (passthru, pointer, stride, vl)
249  class RISCVSLoad
250        : Intrinsic<[llvm_anyvector_ty],
251                    [LLVMMatchType<0>,
252                     LLVMPointerType<LLVMMatchType<0>>,
253                     llvm_anyint_ty, LLVMMatchType<1>],
254                    [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
255    let VLOperand = 3;
256  }
257  // For strided load with mask
258  // Input: (maskedoff, pointer, stride, mask, vl, policy)
259  class RISCVSLoadMasked
260        : Intrinsic<[llvm_anyvector_ty ],
261                    [LLVMMatchType<0>,
262                     LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty,
263                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>,
264                     LLVMMatchType<1>],
265                    [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>,
266                    RISCVVIntrinsic {
267    let VLOperand = 4;
268  }
269  // For indexed load with passthru operand
270  // Input: (passthru, pointer, index, vl)
271  class RISCVILoad
272        : Intrinsic<[llvm_anyvector_ty],
273                    [LLVMMatchType<0>,
274                     LLVMPointerType<LLVMMatchType<0>>,
275                     llvm_anyvector_ty, llvm_anyint_ty],
276                    [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
277    let VLOperand = 3;
278  }
279  // For indexed load with mask
280  // Input: (maskedoff, pointer, index, mask, vl, policy)
281  class RISCVILoadMasked
282        : Intrinsic<[llvm_anyvector_ty ],
283                    [LLVMMatchType<0>,
284                     LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty,
285                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
286                     LLVMMatchType<2>],
287                    [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>,
288                    RISCVVIntrinsic {
289    let VLOperand = 4;
290  }
291  // For unit stride store
292  // Input: (vector_in, pointer, vl)
293  class RISCVUSStore
294        : Intrinsic<[],
295                    [llvm_anyvector_ty,
296                     LLVMPointerType<LLVMMatchType<0>>,
297                     llvm_anyint_ty],
298                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
299    let VLOperand = 2;
300  }
301  // For unit stride store with mask
302  // Input: (vector_in, pointer, mask, vl)
303  class RISCVUSStoreMasked
304        : Intrinsic<[],
305                    [llvm_anyvector_ty,
306                     LLVMPointerType<LLVMMatchType<0>>,
307                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
308                     llvm_anyint_ty],
309                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
310    let VLOperand = 3;
311  }
312  // For strided store
313  // Input: (vector_in, pointer, stride, vl)
314  class RISCVSStore
315        : Intrinsic<[],
316                    [llvm_anyvector_ty,
317                     LLVMPointerType<LLVMMatchType<0>>,
318                     llvm_anyint_ty, LLVMMatchType<1>],
319                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
320    let VLOperand = 3;
321  }
322  // For stride store with mask
323  // Input: (vector_in, pointer, stirde, mask, vl)
324  class RISCVSStoreMasked
325        : Intrinsic<[],
326                    [llvm_anyvector_ty,
327                     LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty,
328                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
329                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
330    let VLOperand = 4;
331  }
332  // For indexed store
333  // Input: (vector_in, pointer, index, vl)
334  class RISCVIStore
335        : Intrinsic<[],
336                    [llvm_anyvector_ty,
337                     LLVMPointerType<LLVMMatchType<0>>,
338                     llvm_anyint_ty, llvm_anyint_ty],
339                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
340    let VLOperand = 3;
341  }
342  // For indexed store with mask
343  // Input: (vector_in, pointer, index, mask, vl)
344  class RISCVIStoreMasked
345        : Intrinsic<[],
346                    [llvm_anyvector_ty,
347                     LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty,
348                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
349                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
350    let VLOperand = 4;
351  }
352  // For destination vector type is the same as source vector.
353  // Input: (passthru, vector_in, vl)
354  class RISCVUnaryAAUnMasked
355        : Intrinsic<[llvm_anyvector_ty],
356                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
357                    [IntrNoMem]>, RISCVVIntrinsic {
358    let VLOperand = 2;
359  }
360  // For destination vector type is the same as first source vector (with mask).
361  // Input: (vector_in, vector_in, mask, vl, policy)
362  class RISCVUnaryAAMasked
363        : Intrinsic<[llvm_anyvector_ty],
364                    [LLVMMatchType<0>, LLVMMatchType<0>,
365                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
366                     LLVMMatchType<1>],
367                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
368    let VLOperand = 3;
369  }
370  // Input: (passthru, vector_in, vector_in, mask, vl)
371  class RISCVCompress
372        : Intrinsic<[llvm_anyvector_ty],
373                    [LLVMMatchType<0>, LLVMMatchType<0>,
374                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
375                    [IntrNoMem]>, RISCVVIntrinsic {
376    let VLOperand = 3;
377  }
378  // For destination vector type is the same as first and second source vector.
379  // Input: (vector_in, vector_in, vl)
380  class RISCVBinaryAAAUnMasked
381        : Intrinsic<[llvm_anyvector_ty],
382                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
383                    [IntrNoMem]>, RISCVVIntrinsic {
384    let VLOperand = 2;
385  }
386  // For destination vector type is the same as first and second source vector.
387  // Input: (passthru, vector_in, int_vector_in, vl)
388  class RISCVRGatherVVUnMasked
389        : Intrinsic<[llvm_anyvector_ty],
390                    [LLVMMatchType<0>, LLVMMatchType<0>,
391                     LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty],
392                    [IntrNoMem]>, RISCVVIntrinsic {
393    let VLOperand = 3;
394  }
395  // For destination vector type is the same as first and second source vector.
396  // Input: (vector_in, vector_in, int_vector_in, vl, policy)
397  class RISCVRGatherVVMasked
398        : Intrinsic<[llvm_anyvector_ty],
399                    [LLVMMatchType<0>, LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>,
400                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
401                     LLVMMatchType<1>],
402                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
403    let VLOperand = 4;
404  }
405  // Input: (passthru, vector_in, int16_vector_in, vl)
406  class RISCVRGatherEI16VVUnMasked
407        : Intrinsic<[llvm_anyvector_ty],
408                    [LLVMMatchType<0>, LLVMMatchType<0>,
409                     LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
410                     llvm_anyint_ty],
411                    [IntrNoMem]>, RISCVVIntrinsic {
412    let VLOperand = 3;
413  }
414  // For destination vector type is the same as first and second source vector.
415  // Input: (vector_in, vector_in, int16_vector_in, vl, policy)
416  class RISCVRGatherEI16VVMasked
417        : Intrinsic<[llvm_anyvector_ty],
418                    [LLVMMatchType<0>, LLVMMatchType<0>,
419                     LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
420                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
421                     LLVMMatchType<1>],
422                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
423    let VLOperand = 4;
424  }
425  // For destination vector type is the same as first source vector, and the
426  // second operand is XLen.
427  // Input: (passthru, vector_in, xlen_in, vl)
428  class RISCVGatherVXUnMasked
429        : Intrinsic<[llvm_anyvector_ty],
430                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
431                     LLVMMatchType<1>],
432                    [IntrNoMem]>, RISCVVIntrinsic {
433    let VLOperand = 3;
434  }
435  // For destination vector type is the same as first source vector (with mask).
436  // Second operand is XLen.
437  // Input: (maskedoff, vector_in, xlen_in, mask, vl, policy)
438  class RISCVGatherVXMasked
439       : Intrinsic<[llvm_anyvector_ty],
440                   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
441                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>,
442                    LLVMMatchType<1>],
443                   [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
444    let VLOperand = 4;
445  }
446  // For destination vector type is the same as first source vector.
447  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
448  class RISCVBinaryAAXUnMasked
449        : Intrinsic<[llvm_anyvector_ty],
450                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
451                     llvm_anyint_ty],
452                    [IntrNoMem]>, RISCVVIntrinsic {
453    let ScalarOperand = 2;
454    let VLOperand = 3;
455  }
456  // For destination vector type is the same as first source vector (with mask).
457  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
458  class RISCVBinaryAAXMasked
459       : Intrinsic<[llvm_anyvector_ty],
460                   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
461                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
462                    LLVMMatchType<2>],
463                   [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
464    let ScalarOperand = 2;
465    let VLOperand = 4;
466  }
467  // For destination vector type is the same as first source vector. The
468  // second source operand must match the destination type or be an XLen scalar.
469  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
470  class RISCVBinaryAAShiftUnMasked
471        : Intrinsic<[llvm_anyvector_ty],
472                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
473                     llvm_anyint_ty],
474                    [IntrNoMem]>, RISCVVIntrinsic {
475    let VLOperand = 3;
476  }
477  // For destination vector type is the same as first source vector (with mask).
478  // The second source operand must match the destination type or be an XLen scalar.
479  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
480  class RISCVBinaryAAShiftMasked
481       : Intrinsic<[llvm_anyvector_ty],
482                   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
483                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
484                    LLVMMatchType<2>],
485                   [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
486    let VLOperand = 4;
487  }
488  // For destination vector type is NOT the same as first source vector.
489  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
490  class RISCVBinaryABXUnMasked
491        : Intrinsic<[llvm_anyvector_ty],
492                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
493                     llvm_anyint_ty],
494                    [IntrNoMem]>, RISCVVIntrinsic {
495    let ScalarOperand = 2;
496    let VLOperand = 3;
497  }
498  // For destination vector type is NOT the same as first source vector (with mask).
499  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
500  class RISCVBinaryABXMasked
501        : Intrinsic<[llvm_anyvector_ty],
502                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
503                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
504                     LLVMMatchType<3>],
505                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
506    let ScalarOperand = 2;
507    let VLOperand = 4;
508  }
509  // For destination vector type is NOT the same as first source vector. The
510  // second source operand must match the destination type or be an XLen scalar.
511  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
512  class RISCVBinaryABShiftUnMasked
513        : Intrinsic<[llvm_anyvector_ty],
514                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
515                     llvm_anyint_ty],
516                    [IntrNoMem]>, RISCVVIntrinsic {
517    let VLOperand = 3;
518  }
519  // For destination vector type is NOT the same as first source vector (with mask).
520  // The second source operand must match the destination type or be an XLen scalar.
521  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
522  class RISCVBinaryABShiftMasked
523        : Intrinsic<[llvm_anyvector_ty],
524                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
525                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
526                     LLVMMatchType<3>],
527                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
528    let VLOperand = 4;
529  }
530  // For binary operations with V0 as input.
531  // Input: (passthru, vector_in, vector_in/scalar_in, V0, vl)
532  class RISCVBinaryWithV0
533        : Intrinsic<[llvm_anyvector_ty],
534                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
535                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
536                     llvm_anyint_ty],
537                    [IntrNoMem]>, RISCVVIntrinsic {
538    let ScalarOperand = 2;
539    let VLOperand = 4;
540  }
541  // For binary operations with mask type output and V0 as input.
542  // Output: (mask type output)
543  // Input: (vector_in, vector_in/scalar_in, V0, vl)
544  class RISCVBinaryMOutWithV0
545        :Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
546                   [llvm_anyvector_ty, llvm_any_ty,
547                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
548                    llvm_anyint_ty],
549                   [IntrNoMem]>, RISCVVIntrinsic {
550    let ScalarOperand = 1;
551    let VLOperand = 3;
552  }
553  // For binary operations with mask type output.
554  // Output: (mask type output)
555  // Input: (vector_in, vector_in/scalar_in, vl)
556  class RISCVBinaryMOut
557        : Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
558                    [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
559                    [IntrNoMem]>, RISCVVIntrinsic {
560    let ScalarOperand = 1;
561    let VLOperand = 2;
562  }
563  // For binary operations with mask type output without mask.
564  // Output: (mask type output)
565  // Input: (vector_in, vector_in/scalar_in, vl)
566  class RISCVCompareUnMasked
567        : Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
568                    [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
569                    [IntrNoMem]>, RISCVVIntrinsic {
570    let ScalarOperand = 1;
571    let VLOperand = 2;
572  }
573  // For binary operations with mask type output with mask.
574  // Output: (mask type output)
575  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
576  class RISCVCompareMasked
577        : Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
578                    [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
579                     llvm_anyvector_ty, llvm_any_ty,
580                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
581                    [IntrNoMem]>, RISCVVIntrinsic {
582    let ScalarOperand = 2;
583    let VLOperand = 4;
584  }
585  // For FP classify operations.
586  // Output: (bit mask type output)
587  // Input: (passthru, vector_in, vl)
588  class RISCVClassifyUnMasked
589        : Intrinsic<[LLVMVectorOfBitcastsToInt<0>],
590                    [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty,
591                      llvm_anyint_ty],
592                    [IntrNoMem]>, RISCVVIntrinsic {
593    let VLOperand = 1;
594  }
595  // For FP classify operations with mask.
596  // Output: (bit mask type output)
597  // Input: (maskedoff, vector_in, mask, vl, policy)
598  class RISCVClassifyMasked
599        : Intrinsic<[LLVMVectorOfBitcastsToInt<0>],
600                    [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty,
601                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
602                     llvm_anyint_ty, LLVMMatchType<1>],
603                    [IntrNoMem, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic {
604    let VLOperand = 3;
605  }
606  // For Saturating binary operations.
607  // The destination vector type is the same as first source vector.
608  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
609  class RISCVSaturatingBinaryAAXUnMasked
610        : Intrinsic<[llvm_anyvector_ty],
611                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
612                     llvm_anyint_ty],
613                    [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
614    let ScalarOperand = 2;
615    let VLOperand = 3;
616  }
617  // For Saturating binary operations with mask.
618  // The destination vector type is the same as first source vector.
619  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
620  class RISCVSaturatingBinaryAAXMasked
621        : Intrinsic<[llvm_anyvector_ty],
622                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
623                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
624                     LLVMMatchType<2>],
625                    [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
626    let ScalarOperand = 2;
627    let VLOperand = 4;
628  }
629  // For Saturating binary operations.
630  // The destination vector type is the same as first source vector.
631  // The second source operand matches the destination type or is an XLen scalar.
632  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
633  class RISCVSaturatingBinaryAAShiftUnMasked
634        : Intrinsic<[llvm_anyvector_ty],
635                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
636                     llvm_anyint_ty],
637                    [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
638    let VLOperand = 3;
639  }
640  // For Saturating binary operations with mask.
641  // The destination vector type is the same as first source vector.
642  // The second source operand matches the destination type or is an XLen scalar.
643  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
644  class RISCVSaturatingBinaryAAShiftMasked
645        : Intrinsic<[llvm_anyvector_ty],
646                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
647                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
648                     LLVMMatchType<2>],
649                    [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
650    let VLOperand = 4;
651  }
652  // For Saturating binary operations.
653  // The destination vector type is NOT the same as first source vector.
654  // The second source operand matches the destination type or is an XLen scalar.
655  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
656  class RISCVSaturatingBinaryABShiftUnMasked
657        : Intrinsic<[llvm_anyvector_ty],
658                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
659                     llvm_anyint_ty],
660                    [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
661    let VLOperand = 3;
662  }
663  // For Saturating binary operations with mask.
664  // The destination vector type is NOT the same as first source vector (with mask).
665  // The second source operand matches the destination type or is an XLen scalar.
666  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
667  class RISCVSaturatingBinaryABShiftMasked
668        : Intrinsic<[llvm_anyvector_ty],
669                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
670                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
671                     LLVMMatchType<3>],
672                    [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
673    let VLOperand = 4;
674  }
675  // Input: (vector_in, vector_in, scalar_in, vl, policy)
676  class RVVSlideUnMasked
677        : Intrinsic<[llvm_anyvector_ty],
678                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
679                     LLVMMatchType<1>, LLVMMatchType<1>],
680                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
681    let VLOperand = 3;
682  }
683  // Input: (vector_in, vector_in, vector_in/scalar_in, mask, vl, policy)
684  class RVVSlideMasked
685        : Intrinsic<[llvm_anyvector_ty],
686                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
687                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
688                     LLVMMatchType<1>, LLVMMatchType<1>],
689                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
690    let VLOperand = 4;
691  }
692  // UnMasked Vector Multiply-Add operations, its first operand can not be undef.
693  // Input: (vector_in, vector_in/scalar, vector_in, vl, policy)
694  class RISCVTernaryAAXAUnMasked
695        : Intrinsic<[llvm_anyvector_ty],
696                    [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
697                     llvm_anyint_ty, LLVMMatchType<2>],
698                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
699    let ScalarOperand = 1;
700    let VLOperand = 3;
701  }
702  // Masked Vector Multiply-Add operations, its first operand can not be undef.
703  // Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy
704  class RISCVTernaryAAXAMasked
705        : Intrinsic<[llvm_anyvector_ty],
706                    [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
707                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
708                     llvm_anyint_ty, LLVMMatchType<2>],
709                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
710    let ScalarOperand = 1;
711    let VLOperand = 4;
712  }
713  // UnMasked Widening Vector Multiply-Add operations, its first operand can not be undef.
714  // Input: (vector_in, vector_in/scalar, vector_in, vl, policy)
715  class RISCVTernaryWideUnMasked
716        : Intrinsic< [llvm_anyvector_ty],
717                     [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
718                      llvm_anyint_ty, LLVMMatchType<3>],
719                     [ImmArg<ArgIndex<4>>, IntrNoMem] >, RISCVVIntrinsic {
720    let ScalarOperand = 1;
721    let VLOperand = 3;
722  }
723  // Masked Widening Vector Multiply-Add operations, its first operand can not be undef.
724  // Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy
725  class RISCVTernaryWideMasked
726        : Intrinsic< [llvm_anyvector_ty],
727                     [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
728                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
729                      llvm_anyint_ty, LLVMMatchType<3>],
730                     [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
731    let ScalarOperand = 1;
732    let VLOperand = 4;
733  }
734  // For Reduction ternary operations.
735  // For destination vector type is the same as first and third source vector.
736  // Input: (vector_in, vector_in, vector_in, vl)
737  class RISCVReductionUnMasked
738        : Intrinsic<[llvm_anyvector_ty],
739                    [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
740                     llvm_anyint_ty],
741                    [IntrNoMem]>, RISCVVIntrinsic {
742    let VLOperand = 3;
743  }
744  // For Reduction ternary operations with mask.
745  // For destination vector type is the same as first and third source vector.
746  // The mask type come from second source vector.
747  // Input: (maskedoff, vector_in, vector_in, vector_in, mask, vl)
748  class RISCVReductionMasked
749        : Intrinsic<[llvm_anyvector_ty],
750                    [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
751                     LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty],
752                    [IntrNoMem]>, RISCVVIntrinsic {
753    let VLOperand = 4;
754  }
755  // For unary operations with scalar type output without mask
756  // Output: (scalar type)
757  // Input: (vector_in, vl)
758  class RISCVMaskedUnarySOutUnMasked
759        : Intrinsic<[LLVMMatchType<1>],
760                    [llvm_anyvector_ty, llvm_anyint_ty],
761                    [IntrNoMem]>, RISCVVIntrinsic {
762    let VLOperand = 1;
763  }
764  // For unary operations with scalar type output with mask
765  // Output: (scalar type)
766  // Input: (vector_in, mask, vl)
767  class RISCVMaskedUnarySOutMasked
768        : Intrinsic<[LLVMMatchType<1>],
769                    [llvm_anyvector_ty, LLVMMatchType<0>, llvm_anyint_ty],
770                    [IntrNoMem]>, RISCVVIntrinsic {
771    let VLOperand = 2;
772  }
773  // For destination vector type is NOT the same as source vector.
774  // Input: (passthru, vector_in, vl)
775  class RISCVUnaryABUnMasked
776        : Intrinsic<[llvm_anyvector_ty],
777                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty],
778                    [IntrNoMem]>, RISCVVIntrinsic {
779    let VLOperand = 2;
780  }
781  // For destination vector type is NOT the same as source vector (with mask).
782  // Input: (maskedoff, vector_in, mask, vl, policy)
783  class RISCVUnaryABMasked
784        : Intrinsic<[llvm_anyvector_ty],
785                    [LLVMMatchType<0>, llvm_anyvector_ty,
786                     LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>,
787                     llvm_anyint_ty, LLVMMatchType<2>],
788                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
789    let VLOperand = 3;
790  }
791  // For unary operations with the same vector type in/out without mask
792  // Output: (vector)
793  // Input: (vector_in, vl)
794  class RISCVUnaryUnMasked
795        : Intrinsic<[llvm_anyvector_ty],
796                    [LLVMMatchType<0>, llvm_anyint_ty],
797                    [IntrNoMem]>, RISCVVIntrinsic {
798    let VLOperand = 1;
799  }
800  // For mask unary operations with mask type in/out with mask
801  // Output: (mask type output)
802  // Input: (mask type maskedoff, mask type vector_in, mask, vl)
803  class RISCVMaskedUnaryMOutMasked
804        : Intrinsic<[llvm_anyint_ty],
805                    [LLVMMatchType<0>, LLVMMatchType<0>,
806                     LLVMMatchType<0>, llvm_anyint_ty],
807                    [IntrNoMem]>, RISCVVIntrinsic {
808    let VLOperand = 3;
809  }
810  // Output: (vector)
811  // Input: (vl)
812  class RISCVNullaryIntrinsic
813        : Intrinsic<[llvm_anyvector_ty],
814                    [llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic {
815    let VLOperand = 1;
816  }
817  // Output: (vector)
818  // Input: (passthru, vl)
819  class RISCVID
820        : Intrinsic<[llvm_anyvector_ty],
821                    [LLVMMatchType<0>, llvm_anyint_ty],
822                    [IntrNoMem]>, RISCVVIntrinsic {
823    let VLOperand = 1;
824  }
825  // For Conversion unary operations.
826  // Input: (passthru, vector_in, vl)
827  class RISCVConversionUnMasked
828        : Intrinsic<[llvm_anyvector_ty],
829                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty],
830                    [IntrNoMem]>, RISCVVIntrinsic {
831    let VLOperand = 2;
832  }
833  // For Conversion unary operations with mask.
834  // Input: (maskedoff, vector_in, mask, vl, policy)
835  class RISCVConversionMasked
836        : Intrinsic<[llvm_anyvector_ty],
837                    [LLVMMatchType<0>, llvm_anyvector_ty,
838                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
839                     LLVMMatchType<2>],
840                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
841    let VLOperand = 3;
842  }
843
844  // For unit stride segment load
845  // Input: (passthru, pointer, vl)
846  class RISCVUSSegLoad<int nf>
847        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
848                                !add(nf, -1))),
849                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
850                                [LLVMPointerToElt<0>, llvm_anyint_ty]),
851                    [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic {
852    let VLOperand = !add(nf, 1);
853  }
854  // For unit stride segment load with mask
855  // Input: (maskedoff, pointer, mask, vl, policy)
856  class RISCVUSSegLoadMasked<int nf>
857        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
858                                !add(nf, -1))),
859                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
860                                [LLVMPointerToElt<0>,
861                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
862                                 llvm_anyint_ty, LLVMMatchType<1>]),
863                    [ImmArg<ArgIndex<!add(nf, 3)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
864                    RISCVVIntrinsic {
865    let VLOperand = !add(nf, 2);
866  }
867
868  // For unit stride fault-only-first segment load
869  // Input: (passthru, pointer, vl)
870  // Output: (data, vl)
871  // NOTE: We model this with default memory properties since we model writing
872  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
873  class RISCVUSSegLoadFF<int nf>
874        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
875                                !add(nf, -1)), [llvm_anyint_ty]),
876                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
877                    [LLVMPointerToElt<0>, LLVMMatchType<1>]),
878                    [NoCapture<ArgIndex<nf>>]>, RISCVVIntrinsic {
879    let VLOperand = !add(nf, 1);
880  }
881  // For unit stride fault-only-first segment load with mask
882  // Input: (maskedoff, pointer, mask, vl, policy)
883  // Output: (data, vl)
884  // NOTE: We model this with default memory properties since we model writing
885  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
886  class RISCVUSSegLoadFFMasked<int nf>
887        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
888                                !add(nf, -1)), [llvm_anyint_ty]),
889                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
890                     [LLVMPointerToElt<0>,
891                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
892                      LLVMMatchType<1>, LLVMMatchType<1>]),
893                    [ImmArg<ArgIndex<!add(nf, 3)>>, NoCapture<ArgIndex<nf>>]>,
894                    RISCVVIntrinsic {
895    let VLOperand = !add(nf, 2);
896  }
897
898  // For stride segment load
899  // Input: (passthru, pointer, offset, vl)
900  class RISCVSSegLoad<int nf>
901        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
902                                !add(nf, -1))),
903                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
904                    [LLVMPointerToElt<0>, llvm_anyint_ty, LLVMMatchType<1>]),
905                    [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic {
906    let VLOperand = !add(nf, 2);
907  }
908  // For stride segment load with mask
909  // Input: (maskedoff, pointer, offset, mask, vl, policy)
910  class RISCVSSegLoadMasked<int nf>
911        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
912                                !add(nf, -1))),
913                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
914                                [LLVMPointerToElt<0>,
915                                 llvm_anyint_ty,
916                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
917                                 LLVMMatchType<1>, LLVMMatchType<1>]),
918                    [ImmArg<ArgIndex<!add(nf, 4)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
919                    RISCVVIntrinsic {
920    let VLOperand = !add(nf, 3);
921  }
922
923  // For indexed segment load
924  // Input: (passthru, pointer, index, vl)
925  class RISCVISegLoad<int nf>
926        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
927                                !add(nf, -1))),
928                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
929                    [LLVMPointerToElt<0>, llvm_anyvector_ty, llvm_anyint_ty]),
930                    [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic {
931    let VLOperand = !add(nf, 2);
932  }
933  // For indexed segment load with mask
934  // Input: (maskedoff, pointer, index, mask, vl, policy)
935  class RISCVISegLoadMasked<int nf>
936        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
937                                !add(nf, -1))),
938                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
939                                [LLVMPointerToElt<0>,
940                                 llvm_anyvector_ty,
941                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
942                                 llvm_anyint_ty, LLVMMatchType<2>]),
943                    [ImmArg<ArgIndex<!add(nf, 4)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
944                    RISCVVIntrinsic {
945    let VLOperand = !add(nf, 3);
946  }
947
948  // For unit stride segment store
949  // Input: (value, pointer, vl)
950  class RISCVUSSegStore<int nf>
951        : Intrinsic<[],
952                    !listconcat([llvm_anyvector_ty],
953                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
954                                [LLVMPointerToElt<0>, llvm_anyint_ty]),
955                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
956    let VLOperand = !add(nf, 1);
957  }
958  // For unit stride segment store with mask
959  // Input: (value, pointer, mask, vl)
960  class RISCVUSSegStoreMasked<int nf>
961        : Intrinsic<[],
962                    !listconcat([llvm_anyvector_ty],
963                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
964                                [LLVMPointerToElt<0>,
965                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
966                                 llvm_anyint_ty]),
967                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
968    let VLOperand = !add(nf, 2);
969  }
970
971  // For stride segment store
972  // Input: (value, pointer, offset, vl)
973  class RISCVSSegStore<int nf>
974        : Intrinsic<[],
975                    !listconcat([llvm_anyvector_ty],
976                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
977                                [LLVMPointerToElt<0>, llvm_anyint_ty,
978                                 LLVMMatchType<1>]),
979                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
980    let VLOperand = !add(nf, 2);
981  }
982  // For stride segment store with mask
983  // Input: (value, pointer, offset, mask, vl)
984  class RISCVSSegStoreMasked<int nf>
985        : Intrinsic<[],
986                    !listconcat([llvm_anyvector_ty],
987                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
988                                [LLVMPointerToElt<0>, llvm_anyint_ty,
989                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
990                                 LLVMMatchType<1>]),
991                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
992    let VLOperand = !add(nf, 3);
993  }
994
995  // For indexed segment store
996  // Input: (value, pointer, offset, vl)
997  class RISCVISegStore<int nf>
998        : Intrinsic<[],
999                    !listconcat([llvm_anyvector_ty],
1000                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
1001                                [LLVMPointerToElt<0>, llvm_anyvector_ty,
1002                                 llvm_anyint_ty]),
1003                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
1004    let VLOperand = !add(nf, 2);
1005  }
1006  // For indexed segment store with mask
1007  // Input: (value, pointer, offset, mask, vl)
1008  class RISCVISegStoreMasked<int nf>
1009        : Intrinsic<[],
1010                    !listconcat([llvm_anyvector_ty],
1011                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
1012                                [LLVMPointerToElt<0>, llvm_anyvector_ty,
1013                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1014                                 llvm_anyint_ty]),
1015                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
1016    let VLOperand = !add(nf, 3);
1017  }
1018
1019  multiclass RISCVUSLoad {
1020    def "int_riscv_" # NAME : RISCVUSLoad;
1021    def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMasked;
1022  }
1023  multiclass RISCVUSLoadFF {
1024    def "int_riscv_" # NAME : RISCVUSLoadFF;
1025    def "int_riscv_" # NAME # "_mask" : RISCVUSLoadFFMasked;
1026  }
1027  multiclass RISCVSLoad {
1028    def "int_riscv_" # NAME : RISCVSLoad;
1029    def "int_riscv_" # NAME # "_mask" : RISCVSLoadMasked;
1030  }
1031  multiclass RISCVILoad {
1032    def "int_riscv_" # NAME : RISCVILoad;
1033    def "int_riscv_" # NAME # "_mask" : RISCVILoadMasked;
1034  }
1035  multiclass RISCVUSStore {
1036    def "int_riscv_" # NAME : RISCVUSStore;
1037    def "int_riscv_" # NAME # "_mask" : RISCVUSStoreMasked;
1038  }
1039  multiclass RISCVSStore {
1040    def "int_riscv_" # NAME : RISCVSStore;
1041    def "int_riscv_" # NAME # "_mask" : RISCVSStoreMasked;
1042  }
1043
1044  multiclass RISCVIStore {
1045    def "int_riscv_" # NAME : RISCVIStore;
1046    def "int_riscv_" # NAME # "_mask" : RISCVIStoreMasked;
1047  }
1048  multiclass RISCVUnaryAA {
1049    def "int_riscv_" # NAME : RISCVUnaryAAUnMasked;
1050    def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMasked;
1051  }
1052  multiclass RISCVUnaryAB {
1053    def "int_riscv_" # NAME : RISCVUnaryABUnMasked;
1054    def "int_riscv_" # NAME # "_mask" : RISCVUnaryABMasked;
1055  }
1056  // AAX means the destination type(A) is the same as the first source
1057  // type(A). X means any type for the second source operand.
1058  multiclass RISCVBinaryAAX {
1059    def "int_riscv_" # NAME : RISCVBinaryAAXUnMasked;
1060    def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMasked;
1061  }
1062  // Like RISCVBinaryAAX, but the second operand is used a shift amount so it
1063  // must be a vector or an XLen scalar.
1064  multiclass RISCVBinaryAAShift {
1065    def "int_riscv_" # NAME : RISCVBinaryAAShiftUnMasked;
1066    def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAShiftMasked;
1067  }
1068  multiclass RISCVRGatherVV {
1069    def "int_riscv_" # NAME : RISCVRGatherVVUnMasked;
1070    def "int_riscv_" # NAME # "_mask" : RISCVRGatherVVMasked;
1071  }
1072  multiclass RISCVRGatherVX {
1073    def "int_riscv_" # NAME : RISCVGatherVXUnMasked;
1074    def "int_riscv_" # NAME # "_mask" : RISCVGatherVXMasked;
1075  }
1076  multiclass RISCVRGatherEI16VV {
1077    def "int_riscv_" # NAME : RISCVRGatherEI16VVUnMasked;
1078    def "int_riscv_" # NAME # "_mask" : RISCVRGatherEI16VVMasked;
1079  }
1080  // ABX means the destination type(A) is different from the first source
1081  // type(B). X means any type for the second source operand.
1082  multiclass RISCVBinaryABX {
1083    def "int_riscv_" # NAME : RISCVBinaryABXUnMasked;
1084    def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMasked;
1085  }
1086  // Like RISCVBinaryABX, but the second operand is used a shift amount so it
1087  // must be a vector or an XLen scalar.
1088  multiclass RISCVBinaryABShift {
1089    def "int_riscv_" # NAME : RISCVBinaryABShiftUnMasked;
1090    def "int_riscv_" # NAME # "_mask" : RISCVBinaryABShiftMasked;
1091  }
1092  multiclass RISCVBinaryWithV0 {
1093    def "int_riscv_" # NAME : RISCVBinaryWithV0;
1094  }
1095  multiclass RISCVBinaryMaskOutWithV0 {
1096    def "int_riscv_" # NAME : RISCVBinaryMOutWithV0;
1097  }
1098  multiclass RISCVBinaryMaskOut {
1099    def "int_riscv_" # NAME : RISCVBinaryMOut;
1100  }
1101  multiclass RISCVSaturatingBinaryAAX {
1102    def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXUnMasked;
1103    def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMasked;
1104  }
1105  multiclass RISCVSaturatingBinaryAAShift {
1106    def "int_riscv_" # NAME : RISCVSaturatingBinaryAAShiftUnMasked;
1107    def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAShiftMasked;
1108  }
1109  multiclass RISCVSaturatingBinaryABShift {
1110    def "int_riscv_" # NAME : RISCVSaturatingBinaryABShiftUnMasked;
1111    def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryABShiftMasked;
1112  }
1113  multiclass RVVSlide {
1114    def "int_riscv_" # NAME : RVVSlideUnMasked;
1115    def "int_riscv_" # NAME # "_mask" : RVVSlideMasked;
1116  }
1117  multiclass RISCVTernaryAAXA {
1118    def "int_riscv_" # NAME : RISCVTernaryAAXAUnMasked;
1119    def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAXAMasked;
1120  }
1121  multiclass RISCVCompare {
1122    def "int_riscv_" # NAME : RISCVCompareUnMasked;
1123    def "int_riscv_" # NAME # "_mask" : RISCVCompareMasked;
1124  }
1125  multiclass RISCVClassify {
1126    def "int_riscv_" # NAME : RISCVClassifyUnMasked;
1127    def "int_riscv_" # NAME # "_mask" : RISCVClassifyMasked;
1128  }
1129  multiclass RISCVTernaryWide {
1130    def "int_riscv_" # NAME : RISCVTernaryWideUnMasked;
1131    def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMasked;
1132  }
1133  multiclass RISCVReduction {
1134    def "int_riscv_" # NAME : RISCVReductionUnMasked;
1135    def "int_riscv_" # NAME # "_mask" : RISCVReductionMasked;
1136  }
1137  multiclass RISCVMaskedUnarySOut {
1138    def "int_riscv_" # NAME : RISCVMaskedUnarySOutUnMasked;
1139    def "int_riscv_" # NAME # "_mask" : RISCVMaskedUnarySOutMasked;
1140  }
1141  multiclass RISCVMaskedUnaryMOut {
1142    def "int_riscv_" # NAME : RISCVUnaryUnMasked;
1143    def "int_riscv_" # NAME # "_mask" : RISCVMaskedUnaryMOutMasked;
1144  }
1145  multiclass RISCVConversion {
1146    def "int_riscv_" #NAME :RISCVConversionUnMasked;
1147    def "int_riscv_" # NAME # "_mask" : RISCVConversionMasked;
1148  }
1149  multiclass RISCVUSSegLoad<int nf> {
1150    def "int_riscv_" # NAME : RISCVUSSegLoad<nf>;
1151    def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadMasked<nf>;
1152  }
1153  multiclass RISCVUSSegLoadFF<int nf> {
1154    def "int_riscv_" # NAME : RISCVUSSegLoadFF<nf>;
1155    def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadFFMasked<nf>;
1156  }
1157  multiclass RISCVSSegLoad<int nf> {
1158    def "int_riscv_" # NAME : RISCVSSegLoad<nf>;
1159    def "int_riscv_" # NAME # "_mask" : RISCVSSegLoadMasked<nf>;
1160  }
1161  multiclass RISCVISegLoad<int nf> {
1162    def "int_riscv_" # NAME : RISCVISegLoad<nf>;
1163    def "int_riscv_" # NAME # "_mask" : RISCVISegLoadMasked<nf>;
1164  }
1165  multiclass RISCVUSSegStore<int nf> {
1166    def "int_riscv_" # NAME : RISCVUSSegStore<nf>;
1167    def "int_riscv_" # NAME # "_mask" : RISCVUSSegStoreMasked<nf>;
1168  }
1169  multiclass RISCVSSegStore<int nf> {
1170    def "int_riscv_" # NAME : RISCVSSegStore<nf>;
1171    def "int_riscv_" # NAME # "_mask" : RISCVSSegStoreMasked<nf>;
1172  }
1173  multiclass RISCVISegStore<int nf> {
1174    def "int_riscv_" # NAME : RISCVISegStore<nf>;
1175    def "int_riscv_" # NAME # "_mask" : RISCVISegStoreMasked<nf>;
1176  }
1177
1178  defm vle : RISCVUSLoad;
1179  defm vleff : RISCVUSLoadFF;
1180  defm vse : RISCVUSStore;
1181  defm vlse: RISCVSLoad;
1182  defm vsse: RISCVSStore;
1183  defm vluxei : RISCVILoad;
1184  defm vloxei : RISCVILoad;
1185  defm vsoxei : RISCVIStore;
1186  defm vsuxei : RISCVIStore;
1187
1188  def int_riscv_vlm : RISCVUSMLoad;
1189  def int_riscv_vsm : RISCVUSStore;
1190
1191  defm vadd : RISCVBinaryAAX;
1192  defm vsub : RISCVBinaryAAX;
1193  defm vrsub : RISCVBinaryAAX;
1194
1195  defm vwaddu : RISCVBinaryABX;
1196  defm vwadd : RISCVBinaryABX;
1197  defm vwaddu_w : RISCVBinaryAAX;
1198  defm vwadd_w : RISCVBinaryAAX;
1199  defm vwsubu : RISCVBinaryABX;
1200  defm vwsub : RISCVBinaryABX;
1201  defm vwsubu_w : RISCVBinaryAAX;
1202  defm vwsub_w : RISCVBinaryAAX;
1203
1204  defm vzext : RISCVUnaryAB;
1205  defm vsext : RISCVUnaryAB;
1206
1207  defm vadc : RISCVBinaryWithV0;
1208  defm vmadc_carry_in : RISCVBinaryMaskOutWithV0;
1209  defm vmadc : RISCVBinaryMaskOut;
1210
1211  defm vsbc : RISCVBinaryWithV0;
1212  defm vmsbc_borrow_in : RISCVBinaryMaskOutWithV0;
1213  defm vmsbc : RISCVBinaryMaskOut;
1214
1215  defm vand : RISCVBinaryAAX;
1216  defm vor : RISCVBinaryAAX;
1217  defm vxor : RISCVBinaryAAX;
1218
1219  defm vsll : RISCVBinaryAAShift;
1220  defm vsrl : RISCVBinaryAAShift;
1221  defm vsra : RISCVBinaryAAShift;
1222
1223  defm vnsrl : RISCVBinaryABShift;
1224  defm vnsra : RISCVBinaryABShift;
1225
1226  defm vmseq : RISCVCompare;
1227  defm vmsne : RISCVCompare;
1228  defm vmsltu : RISCVCompare;
1229  defm vmslt : RISCVCompare;
1230  defm vmsleu : RISCVCompare;
1231  defm vmsle : RISCVCompare;
1232  defm vmsgtu : RISCVCompare;
1233  defm vmsgt : RISCVCompare;
1234  defm vmsgeu : RISCVCompare;
1235  defm vmsge : RISCVCompare;
1236
1237  defm vminu : RISCVBinaryAAX;
1238  defm vmin : RISCVBinaryAAX;
1239  defm vmaxu : RISCVBinaryAAX;
1240  defm vmax : RISCVBinaryAAX;
1241
1242  defm vmul : RISCVBinaryAAX;
1243  defm vmulh : RISCVBinaryAAX;
1244  defm vmulhu : RISCVBinaryAAX;
1245  defm vmulhsu : RISCVBinaryAAX;
1246
1247  defm vdivu : RISCVBinaryAAX;
1248  defm vdiv : RISCVBinaryAAX;
1249  defm vremu : RISCVBinaryAAX;
1250  defm vrem : RISCVBinaryAAX;
1251
1252  defm vwmul : RISCVBinaryABX;
1253  defm vwmulu : RISCVBinaryABX;
1254  defm vwmulsu : RISCVBinaryABX;
1255
1256  defm vmacc : RISCVTernaryAAXA;
1257  defm vnmsac : RISCVTernaryAAXA;
1258  defm vmadd : RISCVTernaryAAXA;
1259  defm vnmsub : RISCVTernaryAAXA;
1260
1261  defm vwmaccu  : RISCVTernaryWide;
1262  defm vwmacc   : RISCVTernaryWide;
1263  defm vwmaccus : RISCVTernaryWide;
1264  defm vwmaccsu : RISCVTernaryWide;
1265
1266  defm vfadd : RISCVBinaryAAX;
1267  defm vfsub : RISCVBinaryAAX;
1268  defm vfrsub : RISCVBinaryAAX;
1269
1270  defm vfwadd : RISCVBinaryABX;
1271  defm vfwsub : RISCVBinaryABX;
1272  defm vfwadd_w : RISCVBinaryAAX;
1273  defm vfwsub_w : RISCVBinaryAAX;
1274
1275  defm vsaddu : RISCVSaturatingBinaryAAX;
1276  defm vsadd : RISCVSaturatingBinaryAAX;
1277  defm vssubu : RISCVSaturatingBinaryAAX;
1278  defm vssub : RISCVSaturatingBinaryAAX;
1279
1280  defm vmerge : RISCVBinaryWithV0;
1281
1282  // Output: (vector)
1283  // Input: (passthru, vector_in, vl)
1284  def int_riscv_vmv_v_v : Intrinsic<[llvm_anyvector_ty],
1285                                    [LLVMMatchType<0>, LLVMMatchType<0>,
1286                                     llvm_anyint_ty],
1287                                    [IntrNoMem]>, RISCVVIntrinsic {
1288    let VLOperand = 2;
1289  }
1290  // Output: (vector)
1291  // Input: (passthru, scalar, vl)
1292  def int_riscv_vmv_v_x : Intrinsic<[llvm_anyint_ty],
1293                                    [LLVMMatchType<0>, LLVMVectorElementType<0>,
1294                                     llvm_anyint_ty],
1295                                    [IntrNoMem]>, RISCVVIntrinsic {
1296    let VLOperand = 2;
1297  }
1298  // Output: (vector)
1299  // Input: (passthru, scalar, vl)
1300  def int_riscv_vfmv_v_f : Intrinsic<[llvm_anyfloat_ty],
1301                                     [LLVMMatchType<0>, LLVMVectorElementType<0>,
1302                                      llvm_anyint_ty],
1303                                     [IntrNoMem]>, RISCVVIntrinsic {
1304    let VLOperand = 2;
1305  }
1306
1307  def int_riscv_vmv_x_s : Intrinsic<[LLVMVectorElementType<0>],
1308                                    [llvm_anyint_ty],
1309                                    [IntrNoMem]>, RISCVVIntrinsic;
1310  def int_riscv_vmv_s_x : Intrinsic<[llvm_anyint_ty],
1311                                    [LLVMMatchType<0>, LLVMVectorElementType<0>,
1312                                     llvm_anyint_ty],
1313                                    [IntrNoMem]>, RISCVVIntrinsic {
1314    let VLOperand = 2;
1315  }
1316
1317  def int_riscv_vfmv_f_s : Intrinsic<[LLVMVectorElementType<0>],
1318                                     [llvm_anyfloat_ty],
1319                                     [IntrNoMem]>, RISCVVIntrinsic;
1320  def int_riscv_vfmv_s_f : Intrinsic<[llvm_anyfloat_ty],
1321                                     [LLVMMatchType<0>, LLVMVectorElementType<0>,
1322                                      llvm_anyint_ty],
1323                                     [IntrNoMem]>, RISCVVIntrinsic {
1324    let VLOperand = 2;
1325  }
1326
1327  defm vfmul : RISCVBinaryAAX;
1328  defm vfdiv : RISCVBinaryAAX;
1329  defm vfrdiv : RISCVBinaryAAX;
1330
1331  defm vfwmul : RISCVBinaryABX;
1332
1333  defm vfmacc : RISCVTernaryAAXA;
1334  defm vfnmacc : RISCVTernaryAAXA;
1335  defm vfmsac : RISCVTernaryAAXA;
1336  defm vfnmsac : RISCVTernaryAAXA;
1337  defm vfmadd : RISCVTernaryAAXA;
1338  defm vfnmadd : RISCVTernaryAAXA;
1339  defm vfmsub : RISCVTernaryAAXA;
1340  defm vfnmsub : RISCVTernaryAAXA;
1341
1342  defm vfwmacc : RISCVTernaryWide;
1343  defm vfwnmacc : RISCVTernaryWide;
1344  defm vfwmsac : RISCVTernaryWide;
1345  defm vfwnmsac : RISCVTernaryWide;
1346
1347  defm vfsqrt : RISCVUnaryAA;
1348  defm vfrsqrt7 : RISCVUnaryAA;
1349  defm vfrec7 : RISCVUnaryAA;
1350
1351  defm vfmin : RISCVBinaryAAX;
1352  defm vfmax : RISCVBinaryAAX;
1353
1354  defm vfsgnj : RISCVBinaryAAX;
1355  defm vfsgnjn : RISCVBinaryAAX;
1356  defm vfsgnjx : RISCVBinaryAAX;
1357
1358  defm vfclass : RISCVClassify;
1359
1360  defm vfmerge : RISCVBinaryWithV0;
1361
1362  defm vslideup : RVVSlide;
1363  defm vslidedown : RVVSlide;
1364
1365  defm vslide1up : RISCVBinaryAAX;
1366  defm vslide1down : RISCVBinaryAAX;
1367  defm vfslide1up : RISCVBinaryAAX;
1368  defm vfslide1down : RISCVBinaryAAX;
1369
1370  defm vrgather_vv : RISCVRGatherVV;
1371  defm vrgather_vx : RISCVRGatherVX;
1372  defm vrgatherei16_vv : RISCVRGatherEI16VV;
1373
1374  def "int_riscv_vcompress" : RISCVCompress;
1375
1376  defm vaaddu : RISCVSaturatingBinaryAAX;
1377  defm vaadd : RISCVSaturatingBinaryAAX;
1378  defm vasubu : RISCVSaturatingBinaryAAX;
1379  defm vasub : RISCVSaturatingBinaryAAX;
1380
1381  defm vsmul : RISCVSaturatingBinaryAAX;
1382
1383  defm vssrl : RISCVSaturatingBinaryAAShift;
1384  defm vssra : RISCVSaturatingBinaryAAShift;
1385
1386  defm vnclipu : RISCVSaturatingBinaryABShift;
1387  defm vnclip : RISCVSaturatingBinaryABShift;
1388
1389  defm vmfeq : RISCVCompare;
1390  defm vmfne : RISCVCompare;
1391  defm vmflt : RISCVCompare;
1392  defm vmfle : RISCVCompare;
1393  defm vmfgt : RISCVCompare;
1394  defm vmfge : RISCVCompare;
1395
1396  defm vredsum : RISCVReduction;
1397  defm vredand : RISCVReduction;
1398  defm vredor : RISCVReduction;
1399  defm vredxor : RISCVReduction;
1400  defm vredminu : RISCVReduction;
1401  defm vredmin : RISCVReduction;
1402  defm vredmaxu : RISCVReduction;
1403  defm vredmax : RISCVReduction;
1404
1405  defm vwredsumu : RISCVReduction;
1406  defm vwredsum : RISCVReduction;
1407
1408  defm vfredosum : RISCVReduction;
1409  defm vfredusum : RISCVReduction;
1410  defm vfredmin : RISCVReduction;
1411  defm vfredmax : RISCVReduction;
1412
1413  defm vfwredusum : RISCVReduction;
1414  defm vfwredosum : RISCVReduction;
1415
1416  def int_riscv_vmand: RISCVBinaryAAAUnMasked;
1417  def int_riscv_vmnand: RISCVBinaryAAAUnMasked;
1418  def int_riscv_vmandn: RISCVBinaryAAAUnMasked;
1419  def int_riscv_vmxor: RISCVBinaryAAAUnMasked;
1420  def int_riscv_vmor: RISCVBinaryAAAUnMasked;
1421  def int_riscv_vmnor: RISCVBinaryAAAUnMasked;
1422  def int_riscv_vmorn: RISCVBinaryAAAUnMasked;
1423  def int_riscv_vmxnor: RISCVBinaryAAAUnMasked;
1424  def int_riscv_vmclr : RISCVNullaryIntrinsic;
1425  def int_riscv_vmset : RISCVNullaryIntrinsic;
1426
1427  defm vcpop : RISCVMaskedUnarySOut;
1428  defm vfirst : RISCVMaskedUnarySOut;
1429  defm vmsbf : RISCVMaskedUnaryMOut;
1430  defm vmsof : RISCVMaskedUnaryMOut;
1431  defm vmsif : RISCVMaskedUnaryMOut;
1432
1433  defm vfcvt_xu_f_v : RISCVConversion;
1434  defm vfcvt_x_f_v : RISCVConversion;
1435  defm vfcvt_rtz_xu_f_v : RISCVConversion;
1436  defm vfcvt_rtz_x_f_v : RISCVConversion;
1437  defm vfcvt_f_xu_v : RISCVConversion;
1438  defm vfcvt_f_x_v : RISCVConversion;
1439
1440  defm vfwcvt_f_xu_v : RISCVConversion;
1441  defm vfwcvt_f_x_v : RISCVConversion;
1442  defm vfwcvt_xu_f_v : RISCVConversion;
1443  defm vfwcvt_x_f_v : RISCVConversion;
1444  defm vfwcvt_rtz_xu_f_v : RISCVConversion;
1445  defm vfwcvt_rtz_x_f_v : RISCVConversion;
1446  defm vfwcvt_f_f_v : RISCVConversion;
1447
1448  defm vfncvt_f_xu_w : RISCVConversion;
1449  defm vfncvt_f_x_w : RISCVConversion;
1450  defm vfncvt_xu_f_w : RISCVConversion;
1451  defm vfncvt_x_f_w : RISCVConversion;
1452  defm vfncvt_rtz_xu_f_w : RISCVConversion;
1453  defm vfncvt_rtz_x_f_w : RISCVConversion;
1454  defm vfncvt_f_f_w : RISCVConversion;
1455  defm vfncvt_rod_f_f_w : RISCVConversion;
1456
1457  // Output: (vector)
1458  // Input: (passthru, mask type input, vl)
1459  def int_riscv_viota : Intrinsic<[llvm_anyvector_ty],
1460                                  [LLVMMatchType<0>,
1461                                   LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1462                                   llvm_anyint_ty],
1463                                  [IntrNoMem]>, RISCVVIntrinsic {
1464    let VLOperand = 2;
1465  }
1466  // Output: (vector)
1467  // Input: (maskedoff, mask type vector_in, mask, vl, policy)
1468  def int_riscv_viota_mask : Intrinsic<[llvm_anyvector_ty],
1469                                       [LLVMMatchType<0>,
1470                                        LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1471                                        LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1472                                        llvm_anyint_ty, LLVMMatchType<1>],
1473                                       [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
1474    let VLOperand = 3;
1475  }
1476  // Output: (vector)
1477  // Input: (passthru, vl)
1478  def int_riscv_vid : RISCVID;
1479
1480  // Output: (vector)
1481  // Input: (maskedoff, mask, vl, policy)
1482  def int_riscv_vid_mask : Intrinsic<[llvm_anyvector_ty],
1483                                     [LLVMMatchType<0>,
1484                                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1485                                      llvm_anyint_ty, LLVMMatchType<1>],
1486                                     [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
1487    let VLOperand = 2;
1488  }
1489
1490  foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
1491    defm vlseg # nf : RISCVUSSegLoad<nf>;
1492    defm vlseg # nf # ff : RISCVUSSegLoadFF<nf>;
1493    defm vlsseg # nf : RISCVSSegLoad<nf>;
1494    defm vloxseg # nf : RISCVISegLoad<nf>;
1495    defm vluxseg # nf : RISCVISegLoad<nf>;
1496    defm vsseg # nf : RISCVUSSegStore<nf>;
1497    defm vssseg # nf : RISCVSSegStore<nf>;
1498    defm vsoxseg # nf : RISCVISegStore<nf>;
1499    defm vsuxseg # nf : RISCVISegStore<nf>;
1500  }
1501
1502  // Strided loads/stores for fixed vectors.
1503  def int_riscv_masked_strided_load
1504        : Intrinsic<[llvm_anyvector_ty],
1505                    [LLVMMatchType<0>, llvm_anyptr_ty,
1506                     llvm_anyint_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
1507                    [NoCapture<ArgIndex<1>>, IntrReadMem]>;
1508  def int_riscv_masked_strided_store
1509        : Intrinsic<[],
1510                    [llvm_anyvector_ty, llvm_anyptr_ty,
1511                     llvm_anyint_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
1512                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>;
1513
1514  // Segment loads for fixed vectors.
1515  foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
1516    def int_riscv_seg # nf # _load
1517          : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
1518                                !add(nf, -1))),
1519                      [llvm_anyptr_ty, llvm_anyint_ty],
1520                      [NoCapture<ArgIndex<0>>, IntrReadMem]>;
1521  }
1522
1523} // TargetPrefix = "riscv"
1524
1525//===----------------------------------------------------------------------===//
1526// Scalar Cryptography
1527//
1528// These intrinsics will lower directly into the corresponding instructions
1529// added by the scalar cyptography extension, if the extension is present.
1530
1531let TargetPrefix = "riscv" in {
1532
1533class ScalarCryptoGprIntrinsicAny
1534    : Intrinsic<[llvm_anyint_ty],
1535                [LLVMMatchType<0>],
1536                [IntrNoMem, IntrSpeculatable]>;
1537
1538class ScalarCryptoByteSelect32
1539    : Intrinsic<[llvm_i32_ty],
1540                [llvm_i32_ty, llvm_i32_ty, llvm_i8_ty],
1541                [IntrNoMem, IntrWillReturn, IntrSpeculatable,
1542                 ImmArg<ArgIndex<2>>]>;
1543
1544class ScalarCryptoGprGprIntrinsic32
1545    : Intrinsic<[llvm_i32_ty],
1546                [llvm_i32_ty, llvm_i32_ty],
1547                [IntrNoMem, IntrWillReturn, IntrSpeculatable]>;
1548
1549class ScalarCryptoGprGprIntrinsic64
1550    : Intrinsic<[llvm_i64_ty],
1551                [llvm_i64_ty, llvm_i64_ty],
1552                [IntrNoMem, IntrWillReturn, IntrSpeculatable]>;
1553
1554class ScalarCryptoGprIntrinsic64
1555    : Intrinsic<[llvm_i64_ty],
1556                [llvm_i64_ty],
1557                [IntrNoMem, IntrWillReturn, IntrSpeculatable]>;
1558
1559class ScalarCryptoByteSelectAny
1560    : Intrinsic<[llvm_anyint_ty],
1561                [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i8_ty],
1562                [IntrNoMem, IntrSpeculatable, IntrWillReturn,
1563                 ImmArg<ArgIndex<2>>]>;
1564
1565// Zknd
1566def int_riscv_aes32dsi  : ScalarCryptoByteSelect32;
1567def int_riscv_aes32dsmi : ScalarCryptoByteSelect32;
1568
1569def int_riscv_aes64ds   : ScalarCryptoGprGprIntrinsic64;
1570def int_riscv_aes64dsm  : ScalarCryptoGprGprIntrinsic64;
1571
1572def int_riscv_aes64im   : ScalarCryptoGprIntrinsic64;
1573
1574// Zkne
1575def int_riscv_aes32esi  : ScalarCryptoByteSelect32;
1576def int_riscv_aes32esmi : ScalarCryptoByteSelect32;
1577
1578def int_riscv_aes64es   : ScalarCryptoGprGprIntrinsic64;
1579def int_riscv_aes64esm  : ScalarCryptoGprGprIntrinsic64;
1580
1581// Zknd & Zkne
1582def int_riscv_aes64ks2  : ScalarCryptoGprGprIntrinsic64;
1583def int_riscv_aes64ks1i : Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty],
1584                                    [IntrNoMem, IntrSpeculatable,
1585                                     IntrWillReturn, ImmArg<ArgIndex<1>>]>;
1586
1587// Zknh
1588def int_riscv_sha256sig0 : ScalarCryptoGprIntrinsicAny;
1589def int_riscv_sha256sig1 : ScalarCryptoGprIntrinsicAny;
1590def int_riscv_sha256sum0 : ScalarCryptoGprIntrinsicAny;
1591def int_riscv_sha256sum1 : ScalarCryptoGprIntrinsicAny;
1592
1593def int_riscv_sha512sig0l : ScalarCryptoGprGprIntrinsic32;
1594def int_riscv_sha512sig0h : ScalarCryptoGprGprIntrinsic32;
1595def int_riscv_sha512sig1l : ScalarCryptoGprGprIntrinsic32;
1596def int_riscv_sha512sig1h : ScalarCryptoGprGprIntrinsic32;
1597def int_riscv_sha512sum0r : ScalarCryptoGprGprIntrinsic32;
1598def int_riscv_sha512sum1r : ScalarCryptoGprGprIntrinsic32;
1599
1600def int_riscv_sha512sig0 : ScalarCryptoGprIntrinsic64;
1601def int_riscv_sha512sig1 : ScalarCryptoGprIntrinsic64;
1602def int_riscv_sha512sum0 : ScalarCryptoGprIntrinsic64;
1603def int_riscv_sha512sum1 : ScalarCryptoGprIntrinsic64;
1604
1605// Zksed
1606def int_riscv_sm4ks      : ScalarCryptoByteSelectAny;
1607def int_riscv_sm4ed      : ScalarCryptoByteSelectAny;
1608
1609// Zksh
1610def int_riscv_sm3p0      : ScalarCryptoGprIntrinsicAny;
1611def int_riscv_sm3p1      : ScalarCryptoGprIntrinsicAny;
1612} // TargetPrefix = "riscv"
1613