1//===- IntrinsicsRISCV.td - Defines RISCV intrinsics -------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines all of the RISCV-specific intrinsics.
10//
11//===----------------------------------------------------------------------===//
12
13//===----------------------------------------------------------------------===//
14// Atomics
15
16// Atomic Intrinsics have multiple versions for different access widths, which
17// all follow one of the following signatures (depending on how many arguments
18// they require). We carefully instantiate only specific versions of these for
19// specific integer widths, rather than using `llvm_anyint_ty`.
20//
21// In fact, as these intrinsics take `llvm_anyptr_ty`, the given names are the
22// canonical names, and the intrinsics used in the code will have a name
23// suffixed with the pointer type they are specialised for (denoted `<p>` in the
24// names below), in order to avoid type conflicts.
25
26let TargetPrefix = "riscv" in {
27
28  // T @llvm.<name>.T.<p>(any*, T, T, T imm);
29  class MaskedAtomicRMWFourArg<LLVMType itype>
30      : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype],
31                  [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<3>>]>;
32  // T @llvm.<name>.T.<p>(any*, T, T, T, T imm);
33  class MaskedAtomicRMWFiveArg<LLVMType itype>
34      : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype, itype],
35                  [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<4>>]>;
36
37  // We define 32-bit and 64-bit variants of the above, where T stands for i32
38  // or i64 respectively:
39  multiclass MaskedAtomicRMWFourArgIntrinsics {
40    // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32 imm);
41    def _i32 : MaskedAtomicRMWFourArg<llvm_i32_ty>;
42    // i64 @llvm.<name>.i32.<p>(any*, i64, i64, i64 imm);
43    def _i64 : MaskedAtomicRMWFourArg<llvm_i64_ty>;
44  }
45
46  multiclass MaskedAtomicRMWFiveArgIntrinsics {
47    // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32, i32 imm);
48    def _i32 : MaskedAtomicRMWFiveArg<llvm_i32_ty>;
49    // i64 @llvm.<name>.i64.<p>(any*, i64, i64, i64, i64 imm);
50    def _i64 : MaskedAtomicRMWFiveArg<llvm_i64_ty>;
51  }
52
53  // @llvm.riscv.masked.atomicrmw.*.{i32,i64}.<p>(...)
54  defm int_riscv_masked_atomicrmw_xchg : MaskedAtomicRMWFourArgIntrinsics;
55  defm int_riscv_masked_atomicrmw_add : MaskedAtomicRMWFourArgIntrinsics;
56  defm int_riscv_masked_atomicrmw_sub : MaskedAtomicRMWFourArgIntrinsics;
57  defm int_riscv_masked_atomicrmw_nand : MaskedAtomicRMWFourArgIntrinsics;
58  // Signed min and max need an extra operand to do sign extension with.
59  defm int_riscv_masked_atomicrmw_max : MaskedAtomicRMWFiveArgIntrinsics;
60  defm int_riscv_masked_atomicrmw_min : MaskedAtomicRMWFiveArgIntrinsics;
61  // Unsigned min and max don't need the extra operand.
62  defm int_riscv_masked_atomicrmw_umax : MaskedAtomicRMWFourArgIntrinsics;
63  defm int_riscv_masked_atomicrmw_umin : MaskedAtomicRMWFourArgIntrinsics;
64
65  // @llvm.riscv.masked.cmpxchg.{i32,i64}.<p>(...)
66  defm int_riscv_masked_cmpxchg : MaskedAtomicRMWFiveArgIntrinsics;
67
68} // TargetPrefix = "riscv"
69
70//===----------------------------------------------------------------------===//
71// Bitmanip (Bit Manipulation) Extension
72
73let TargetPrefix = "riscv" in {
74
75  class BitManipGPRIntrinsics
76      : Intrinsic<[llvm_any_ty],
77                  [LLVMMatchType<0>],
78                  [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
79  class BitManipGPRGPRIntrinsics
80      : Intrinsic<[llvm_any_ty],
81                  [LLVMMatchType<0>, LLVMMatchType<0>],
82                  [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
83  class BitManipGPRGPRGRIntrinsics
84      : Intrinsic<[llvm_any_ty],
85                  [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
86                  [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
87
88  // Zbb
89  def int_riscv_orc_b : BitManipGPRIntrinsics;
90
91  // Zbc or Zbkc
92  def int_riscv_clmul  : BitManipGPRGPRIntrinsics;
93  def int_riscv_clmulh : BitManipGPRGPRIntrinsics;
94
95  // Zbc
96  def int_riscv_clmulr : BitManipGPRGPRIntrinsics;
97
98  // Zbe
99  def int_riscv_bcompress   : BitManipGPRGPRIntrinsics;
100  def int_riscv_bdecompress : BitManipGPRGPRIntrinsics;
101
102  // Zbf
103  def int_riscv_bfp  : BitManipGPRGPRIntrinsics;
104
105  // Zbp
106  def int_riscv_grev  : BitManipGPRGPRIntrinsics;
107  def int_riscv_gorc  : BitManipGPRGPRIntrinsics;
108  def int_riscv_shfl  : BitManipGPRGPRIntrinsics;
109  def int_riscv_unshfl  : BitManipGPRGPRIntrinsics;
110  def int_riscv_xperm_n  : BitManipGPRGPRIntrinsics;
111  def int_riscv_xperm_b  : BitManipGPRGPRIntrinsics;
112  def int_riscv_xperm_h  : BitManipGPRGPRIntrinsics;
113  def int_riscv_xperm_w  : BitManipGPRGPRIntrinsics;
114
115  // Zbr
116  def int_riscv_crc32_b : BitManipGPRIntrinsics;
117  def int_riscv_crc32_h : BitManipGPRIntrinsics;
118  def int_riscv_crc32_w : BitManipGPRIntrinsics;
119  def int_riscv_crc32_d : BitManipGPRIntrinsics;
120  def int_riscv_crc32c_b : BitManipGPRIntrinsics;
121  def int_riscv_crc32c_h : BitManipGPRIntrinsics;
122  def int_riscv_crc32c_w : BitManipGPRIntrinsics;
123  def int_riscv_crc32c_d : BitManipGPRIntrinsics;
124
125  // Zbt
126  def int_riscv_fsl : BitManipGPRGPRGRIntrinsics;
127  def int_riscv_fsr : BitManipGPRGPRGRIntrinsics;
128
129  // Zbkb
130  def int_riscv_brev8 : BitManipGPRIntrinsics;
131  def int_riscv_zip   : BitManipGPRIntrinsics;
132  def int_riscv_unzip : BitManipGPRIntrinsics;
133
134  // Zbkx
135  def int_riscv_xperm4  : BitManipGPRGPRIntrinsics;
136  def int_riscv_xperm8  : BitManipGPRGPRIntrinsics;
137} // TargetPrefix = "riscv"
138
139//===----------------------------------------------------------------------===//
140// Vectors
141
142// The intrinsic does not have any operand that must be extended.
143defvar NoSplatOperand = 0xF;
144
145// The intrinsic does not have a VL operand.
146// (e.g., riscv_vmv_x_s and riscv_vfmv_f_s)
147defvar NoVLOperand = 0x1F;
148
149class RISCVVIntrinsic {
150  // These intrinsics may accept illegal integer values in their llvm_any_ty
151  // operand, so they have to be extended.
152  Intrinsic IntrinsicID = !cast<Intrinsic>(NAME);
153  bits<4> SplatOperand = NoSplatOperand;
154  bits<5> VLOperand = NoVLOperand;
155}
156
157let TargetPrefix = "riscv" in {
158  // We use anyint here but we only support XLen.
159  def int_riscv_vsetvli   : Intrinsic<[llvm_anyint_ty],
160                           /* AVL */  [LLVMMatchType<0>,
161                           /* VSEW */  LLVMMatchType<0>,
162                           /* VLMUL */ LLVMMatchType<0>],
163                                      [IntrNoMem, IntrHasSideEffects,
164                                       ImmArg<ArgIndex<1>>,
165                                       ImmArg<ArgIndex<2>>]>;
166  def int_riscv_vsetvlimax : Intrinsic<[llvm_anyint_ty],
167                            /* VSEW */ [LLVMMatchType<0>,
168                            /* VLMUL */ LLVMMatchType<0>],
169                                      [IntrNoMem, IntrHasSideEffects,
170                                       ImmArg<ArgIndex<0>>,
171                                       ImmArg<ArgIndex<1>>]>;
172
173  // Versions without side effects: better optimizable and usable if only the
174  // returned vector length is important.
175  def int_riscv_vsetvli_opt   : Intrinsic<[llvm_anyint_ty],
176                               /* AVL */  [LLVMMatchType<0>,
177                               /* VSEW */  LLVMMatchType<0>,
178                               /* VLMUL */ LLVMMatchType<0>],
179                                          [IntrNoMem,
180                                           ImmArg<ArgIndex<1>>,
181                                           ImmArg<ArgIndex<2>>]>;
182  def int_riscv_vsetvlimax_opt : Intrinsic<[llvm_anyint_ty],
183                                /* VSEW */ [LLVMMatchType<0>,
184                                /* VLMUL */ LLVMMatchType<0>],
185                                          [IntrNoMem,
186                                           ImmArg<ArgIndex<0>>,
187                                           ImmArg<ArgIndex<1>>]>;
188
189  // For unit stride mask load
190  // Input: (pointer, vl)
191  class RISCVUSMLoad
192        : Intrinsic<[llvm_anyvector_ty],
193                    [LLVMPointerType<LLVMMatchType<0>>,
194                     llvm_anyint_ty],
195                    [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic {
196    let VLOperand = 1;
197  }
198  // For unit stride load
199  // Input: (passthru, pointer, vl)
200  class RISCVUSLoad
201        : Intrinsic<[llvm_anyvector_ty],
202                    [LLVMMatchType<0>,
203                     LLVMPointerType<LLVMMatchType<0>>,
204                     llvm_anyint_ty],
205                    [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
206    let VLOperand = 2;
207  }
208  // For unit stride fault-only-first load
209  // Input: (passthru, pointer, vl)
210  // Output: (data, vl)
211  // NOTE: We model this with default memory properties since we model writing
212  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
213  class RISCVUSLoadFF
214        : Intrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
215                    [LLVMMatchType<0>,
216                     LLVMPointerType<LLVMMatchType<0>>, LLVMMatchType<1>],
217                    [NoCapture<ArgIndex<1>>]>,
218                    RISCVVIntrinsic {
219    let VLOperand = 2;
220  }
221  // For unit stride load with mask
222  // Input: (maskedoff, pointer, mask, vl, ta)
223  class RISCVUSLoadMask
224        : Intrinsic<[llvm_anyvector_ty ],
225                    [LLVMMatchType<0>,
226                     LLVMPointerType<LLVMMatchType<0>>,
227                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
228                     llvm_anyint_ty, LLVMMatchType<1>],
229                    [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>, IntrReadMem]>,
230                    RISCVVIntrinsic {
231    let VLOperand = 3;
232  }
233  // For unit stride fault-only-first load with mask
234  // Input: (maskedoff, pointer, mask, vl, ta)
235  // Output: (data, vl)
236  // NOTE: We model this with default memory properties since we model writing
237  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
238  class RISCVUSLoadFFMask
239        : Intrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
240                    [LLVMMatchType<0>,
241                     LLVMPointerType<LLVMMatchType<0>>,
242                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
243                     LLVMMatchType<1>, LLVMMatchType<1>],
244                    [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic {
245    let VLOperand = 3;
246  }
247  // For strided load with passthru operand
248  // Input: (passthru, pointer, stride, vl)
249  class RISCVSLoad
250        : Intrinsic<[llvm_anyvector_ty],
251                    [LLVMMatchType<0>,
252                     LLVMPointerType<LLVMMatchType<0>>,
253                     llvm_anyint_ty, LLVMMatchType<1>],
254                    [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
255    let VLOperand = 3;
256  }
257  // For strided load with mask
258  // Input: (maskedoff, pointer, stride, mask, vl, ta)
259  class RISCVSLoadMask
260        : Intrinsic<[llvm_anyvector_ty ],
261                    [LLVMMatchType<0>,
262                     LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty,
263                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>,
264                     LLVMMatchType<1>],
265                    [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>,
266                    RISCVVIntrinsic {
267    let VLOperand = 4;
268  }
269  // For indexed load with passthru operand
270  // Input: (passthru, pointer, index, vl)
271  class RISCVILoad
272        : Intrinsic<[llvm_anyvector_ty],
273                    [LLVMMatchType<0>,
274                     LLVMPointerType<LLVMMatchType<0>>,
275                     llvm_anyvector_ty, llvm_anyint_ty],
276                    [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
277    let VLOperand = 3;
278  }
279  // For indexed load with mask
280  // Input: (maskedoff, pointer, index, mask, vl, ta)
281  class RISCVILoadMask
282        : Intrinsic<[llvm_anyvector_ty ],
283                    [LLVMMatchType<0>,
284                     LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty,
285                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
286                     LLVMMatchType<2>],
287                    [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>,
288                    RISCVVIntrinsic {
289    let VLOperand = 4;
290  }
291  // For unit stride store
292  // Input: (vector_in, pointer, vl)
293  class RISCVUSStore
294        : Intrinsic<[],
295                    [llvm_anyvector_ty,
296                     LLVMPointerType<LLVMMatchType<0>>,
297                     llvm_anyint_ty],
298                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
299    let VLOperand = 2;
300  }
301  // For unit stride store with mask
302  // Input: (vector_in, pointer, mask, vl)
303  class RISCVUSStoreMask
304        : Intrinsic<[],
305                    [llvm_anyvector_ty,
306                     LLVMPointerType<LLVMMatchType<0>>,
307                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
308                     llvm_anyint_ty],
309                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
310    let VLOperand = 3;
311  }
312  // For strided store
313  // Input: (vector_in, pointer, stride, vl)
314  class RISCVSStore
315        : Intrinsic<[],
316                    [llvm_anyvector_ty,
317                     LLVMPointerType<LLVMMatchType<0>>,
318                     llvm_anyint_ty, LLVMMatchType<1>],
319                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
320    let VLOperand = 3;
321  }
322  // For stride store with mask
323  // Input: (vector_in, pointer, stirde, mask, vl)
324  class RISCVSStoreMask
325        : Intrinsic<[],
326                    [llvm_anyvector_ty,
327                     LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty,
328                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
329                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
330    let VLOperand = 4;
331  }
332  // For indexed store
333  // Input: (vector_in, pointer, index, vl)
334  class RISCVIStore
335        : Intrinsic<[],
336                    [llvm_anyvector_ty,
337                     LLVMPointerType<LLVMMatchType<0>>,
338                     llvm_anyint_ty, llvm_anyint_ty],
339                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
340    let VLOperand = 3;
341  }
342  // For indexed store with mask
343  // Input: (vector_in, pointer, index, mask, vl)
344  class RISCVIStoreMask
345        : Intrinsic<[],
346                    [llvm_anyvector_ty,
347                     LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty,
348                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
349                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
350    let VLOperand = 4;
351  }
352  // For destination vector type is the same as source vector.
353  // Input: (vector_in, vl)
354  class RISCVUnaryAANoMask
355        : Intrinsic<[llvm_anyvector_ty],
356                    [LLVMMatchType<0>, llvm_anyint_ty],
357                    [IntrNoMem]>, RISCVVIntrinsic {
358    let VLOperand = 1;
359  }
360  // For destination vector type is the same as first source vector (with mask).
361  // Input: (vector_in, mask, vl, ta)
362  class RISCVUnaryAAMask
363        : Intrinsic<[llvm_anyvector_ty],
364                    [LLVMMatchType<0>, LLVMMatchType<0>,
365                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
366                     LLVMMatchType<1>],
367                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
368    let VLOperand = 3;
369  }
370  class RISCVUnaryAAMaskNoTA
371        : Intrinsic<[llvm_anyvector_ty],
372                    [LLVMMatchType<0>, LLVMMatchType<0>,
373                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
374                    [IntrNoMem]>, RISCVVIntrinsic {
375    let VLOperand = 3;
376  }
377  // For destination vector type is the same as first and second source vector.
378  // Input: (vector_in, vector_in, vl)
379  class RISCVBinaryAAANoMask
380        : Intrinsic<[llvm_anyvector_ty],
381                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
382                    [IntrNoMem]>, RISCVVIntrinsic {
383    let VLOperand = 2;
384  }
385  // For destination vector type is the same as first and second source vector.
386  // Input: (vector_in, int_vector_in, vl)
387  class RISCVRGatherVVNoMask
388        : Intrinsic<[llvm_anyvector_ty],
389                    [LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty],
390                    [IntrNoMem]>, RISCVVIntrinsic {
391    let VLOperand = 2;
392  }
393  // For destination vector type is the same as first and second source vector.
394  // Input: (vector_in, vector_in, int_vector_in, vl, ta)
395  class RISCVRGatherVVMask
396        : Intrinsic<[llvm_anyvector_ty],
397                    [LLVMMatchType<0>, LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>,
398                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
399                     LLVMMatchType<1>],
400                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
401    let VLOperand = 4;
402  }
403  // Input: (vector_in, int16_vector_in, vl)
404  class RISCVRGatherEI16VVNoMask
405        : Intrinsic<[llvm_anyvector_ty],
406                    [LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
407                     llvm_anyint_ty],
408                    [IntrNoMem]>, RISCVVIntrinsic {
409    let VLOperand = 2;
410  }
411  // For destination vector type is the same as first and second source vector.
412  // Input: (vector_in, vector_in, int16_vector_in, vl, ta)
413  class RISCVRGatherEI16VVMask
414        : Intrinsic<[llvm_anyvector_ty],
415                    [LLVMMatchType<0>, LLVMMatchType<0>,
416                     LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
417                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
418                     LLVMMatchType<1>],
419                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
420    let VLOperand = 4;
421  }
422  // For destination vector type is the same as first source vector, and the
423  // second operand is XLen.
424  // Input: (vector_in, xlen_in, vl)
425  class RISCVGatherVXNoMask
426        : Intrinsic<[llvm_anyvector_ty],
427                    [LLVMMatchType<0>, llvm_anyint_ty, LLVMMatchType<1>],
428                    [IntrNoMem]>, RISCVVIntrinsic {
429    let VLOperand = 2;
430  }
431  // For destination vector type is the same as first source vector (with mask).
432  // Second operand is XLen.
433  // Input: (maskedoff, vector_in, xlen_in, mask, vl, ta)
434  class RISCVGatherVXMask
435       : Intrinsic<[llvm_anyvector_ty],
436                   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
437                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>,
438                    LLVMMatchType<1>],
439                   [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
440    let VLOperand = 4;
441  }
442  // For destination vector type is the same as first source vector.
443  // Input: (vector_in, vector_in/scalar_in, vl)
444  class RISCVBinaryAAXNoMask
445        : Intrinsic<[llvm_anyvector_ty],
446                    [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
447                    [IntrNoMem]>, RISCVVIntrinsic {
448    let SplatOperand = 1;
449    let VLOperand = 2;
450  }
451  // For destination vector type is the same as first source vector (with mask).
452  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
453  class RISCVBinaryAAXMask
454       : Intrinsic<[llvm_anyvector_ty],
455                   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
456                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
457                    LLVMMatchType<2>],
458                   [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
459    let SplatOperand = 2;
460    let VLOperand = 4;
461  }
462  // For destination vector type is the same as first source vector. The
463  // second source operand must match the destination type or be an XLen scalar.
464  // Input: (vector_in, vector_in/scalar_in, vl)
465  class RISCVBinaryAAShiftNoMask
466        : Intrinsic<[llvm_anyvector_ty],
467                    [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
468                    [IntrNoMem]>, RISCVVIntrinsic {
469    let VLOperand = 2;
470  }
471  // For destination vector type is the same as first source vector (with mask).
472  // The second source operand must match the destination type or be an XLen scalar.
473  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
474  class RISCVBinaryAAShiftMask
475       : Intrinsic<[llvm_anyvector_ty],
476                   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
477                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
478                    LLVMMatchType<2>],
479                   [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
480    let VLOperand = 4;
481  }
482  // For destination vector type is NOT the same as first source vector.
483  // Input: (vector_in, vector_in/scalar_in, vl)
484  class RISCVBinaryABXNoMask
485        : Intrinsic<[llvm_anyvector_ty],
486                    [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
487                    [IntrNoMem]>, RISCVVIntrinsic {
488    let SplatOperand = 1;
489    let VLOperand = 2;
490  }
491  // For destination vector type is NOT the same as first source vector (with mask).
492  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
493  class RISCVBinaryABXMask
494        : Intrinsic<[llvm_anyvector_ty],
495                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
496                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
497                     LLVMMatchType<3>],
498                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
499    let SplatOperand = 2;
500    let VLOperand = 4;
501  }
502  // For destination vector type is NOT the same as first source vector. The
503  // second source operand must match the destination type or be an XLen scalar.
504  // Input: (vector_in, vector_in/scalar_in, vl)
505  class RISCVBinaryABShiftNoMask
506        : Intrinsic<[llvm_anyvector_ty],
507                    [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
508                    [IntrNoMem]>, RISCVVIntrinsic {
509    let VLOperand = 2;
510  }
511  // For destination vector type is NOT the same as first source vector (with mask).
512  // The second source operand must match the destination type or be an XLen scalar.
513  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
514  class RISCVBinaryABShiftMask
515        : Intrinsic<[llvm_anyvector_ty],
516                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
517                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
518                     LLVMMatchType<3>],
519                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
520    let VLOperand = 4;
521  }
522  // For binary operations with V0 as input.
523  // Input: (vector_in, vector_in/scalar_in, V0, vl)
524  class RISCVBinaryWithV0
525        : Intrinsic<[llvm_anyvector_ty],
526                    [LLVMMatchType<0>, llvm_any_ty,
527                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
528                     llvm_anyint_ty],
529                    [IntrNoMem]>, RISCVVIntrinsic {
530    let SplatOperand = 1;
531    let VLOperand = 3;
532  }
533  // For binary operations with mask type output and V0 as input.
534  // Output: (mask type output)
535  // Input: (vector_in, vector_in/scalar_in, V0, vl)
536  class RISCVBinaryMOutWithV0
537        :Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
538                   [llvm_anyvector_ty, llvm_any_ty,
539                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
540                    llvm_anyint_ty],
541                   [IntrNoMem]>, RISCVVIntrinsic {
542    let SplatOperand = 1;
543    let VLOperand = 3;
544  }
545  // For binary operations with mask type output.
546  // Output: (mask type output)
547  // Input: (vector_in, vector_in/scalar_in, vl)
548  class RISCVBinaryMOut
549        : Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
550                    [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
551                    [IntrNoMem]>, RISCVVIntrinsic {
552    let SplatOperand = 1;
553    let VLOperand = 2;
554  }
555  // For binary operations with mask type output without mask.
556  // Output: (mask type output)
557  // Input: (vector_in, vector_in/scalar_in, vl)
558  class RISCVCompareNoMask
559        : Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
560                    [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
561                    [IntrNoMem]>, RISCVVIntrinsic {
562    let SplatOperand = 1;
563    let VLOperand = 2;
564  }
565  // For binary operations with mask type output with mask.
566  // Output: (mask type output)
567  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
568  class RISCVCompareMask
569        : Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
570                    [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
571                     llvm_anyvector_ty, llvm_any_ty,
572                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
573                    [IntrNoMem]>, RISCVVIntrinsic {
574    let SplatOperand = 2;
575    let VLOperand = 4;
576  }
577  // For FP classify operations.
578  // Output: (bit mask type output)
579  // Input: (vector_in, vl)
580  class RISCVClassifyNoMask
581        : Intrinsic<[LLVMVectorOfBitcastsToInt<0>],
582                    [llvm_anyvector_ty, llvm_anyint_ty],
583                    [IntrNoMem]>, RISCVVIntrinsic {
584    let VLOperand = 1;
585  }
586  // For FP classify operations with mask.
587  // Output: (bit mask type output)
588  // Input: (maskedoff, vector_in, mask, vl)
589  class RISCVClassifyMask
590        : Intrinsic<[LLVMVectorOfBitcastsToInt<0>],
591                    [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty,
592                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
593                    [IntrNoMem]>, RISCVVIntrinsic {
594    let VLOperand = 3;
595  }
596  // For Saturating binary operations.
597  // The destination vector type is the same as first source vector.
598  // Input: (vector_in, vector_in/scalar_in, vl)
599  class RISCVSaturatingBinaryAAXNoMask
600        : Intrinsic<[llvm_anyvector_ty],
601                    [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
602                    [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
603    let SplatOperand = 1;
604    let VLOperand = 2;
605  }
606  // For Saturating binary operations with mask.
607  // The destination vector type is the same as first source vector.
608  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
609  class RISCVSaturatingBinaryAAXMask
610        : Intrinsic<[llvm_anyvector_ty],
611                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
612                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
613                     LLVMMatchType<2>],
614                    [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
615    let SplatOperand = 2;
616    let VLOperand = 4;
617  }
618  // For Saturating binary operations.
619  // The destination vector type is the same as first source vector.
620  // The second source operand matches the destination type or is an XLen scalar.
621  // Input: (vector_in, vector_in/scalar_in, vl)
622  class RISCVSaturatingBinaryAAShiftNoMask
623        : Intrinsic<[llvm_anyvector_ty],
624                    [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
625                    [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
626    let VLOperand = 2;
627  }
628  // For Saturating binary operations with mask.
629  // The destination vector type is the same as first source vector.
630  // The second source operand matches the destination type or is an XLen scalar.
631  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
632  class RISCVSaturatingBinaryAAShiftMask
633        : Intrinsic<[llvm_anyvector_ty],
634                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
635                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
636                     LLVMMatchType<2>],
637                    [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
638    let VLOperand = 4;
639  }
640  // For Saturating binary operations.
641  // The destination vector type is NOT the same as first source vector.
642  // The second source operand matches the destination type or is an XLen scalar.
643  // Input: (vector_in, vector_in/scalar_in, vl)
644  class RISCVSaturatingBinaryABShiftNoMask
645        : Intrinsic<[llvm_anyvector_ty],
646                    [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
647                    [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
648    let VLOperand = 2;
649  }
650  // For Saturating binary operations with mask.
651  // The destination vector type is NOT the same as first source vector (with mask).
652  // The second source operand matches the destination type or is an XLen scalar.
653  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, ta)
654  class RISCVSaturatingBinaryABShiftMask
655        : Intrinsic<[llvm_anyvector_ty],
656                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
657                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
658                     LLVMMatchType<3>],
659                    [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
660    let VLOperand = 4;
661  }
662  class RISCVTernaryAAAXNoMask
663        : Intrinsic<[llvm_anyvector_ty],
664                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
665                     LLVMMatchType<1>],
666                    [IntrNoMem]>, RISCVVIntrinsic {
667    let VLOperand = 3;
668  }
669  class RISCVTernaryAAAXMask
670        : Intrinsic<[llvm_anyvector_ty],
671                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
672                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
673                    [IntrNoMem]>, RISCVVIntrinsic {
674    let VLOperand = 4;
675  }
676  class RISCVTernaryAAXANoMask
677        : Intrinsic<[llvm_anyvector_ty],
678                    [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
679                     llvm_anyint_ty],
680                    [IntrNoMem]>, RISCVVIntrinsic {
681    let SplatOperand = 1;
682    let VLOperand = 3;
683  }
684  class RISCVTernaryAAXAMask
685        : Intrinsic<[llvm_anyvector_ty],
686                    [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
687                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
688                    [IntrNoMem]>, RISCVVIntrinsic {
689    let SplatOperand = 1;
690    let VLOperand = 4;
691  }
692  class RISCVTernaryWideNoMask
693        : Intrinsic< [llvm_anyvector_ty],
694                     [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
695                      llvm_anyint_ty],
696                     [IntrNoMem] >, RISCVVIntrinsic {
697    let SplatOperand = 1;
698    let VLOperand = 3;
699  }
700  class RISCVTernaryWideMask
701        : Intrinsic< [llvm_anyvector_ty],
702                     [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
703                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
704                     [IntrNoMem]>, RISCVVIntrinsic {
705    let SplatOperand = 1;
706    let VLOperand = 4;
707  }
708  // For Reduction ternary operations.
709  // For destination vector type is the same as first and third source vector.
710  // Input: (vector_in, vector_in, vector_in, vl)
711  class RISCVReductionNoMask
712        : Intrinsic<[llvm_anyvector_ty],
713                    [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
714                     llvm_anyint_ty],
715                    [IntrNoMem]>, RISCVVIntrinsic {
716    let VLOperand = 3;
717  }
718  // For Reduction ternary operations with mask.
719  // For destination vector type is the same as first and third source vector.
720  // The mask type come from second source vector.
721  // Input: (maskedoff, vector_in, vector_in, vector_in, mask, vl)
722  class RISCVReductionMask
723        : Intrinsic<[llvm_anyvector_ty],
724                    [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
725                     LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty],
726                    [IntrNoMem]>, RISCVVIntrinsic {
727    let VLOperand = 4;
728  }
729  // For unary operations with scalar type output without mask
730  // Output: (scalar type)
731  // Input: (vector_in, vl)
732  class RISCVMaskUnarySOutNoMask
733        : Intrinsic<[LLVMMatchType<1>],
734                    [llvm_anyvector_ty, llvm_anyint_ty],
735                    [IntrNoMem]>, RISCVVIntrinsic {
736    let VLOperand = 1;
737  }
738  // For unary operations with scalar type output with mask
739  // Output: (scalar type)
740  // Input: (vector_in, mask, vl)
741  class RISCVMaskUnarySOutMask
742        : Intrinsic<[LLVMMatchType<1>],
743                    [llvm_anyvector_ty, LLVMMatchType<0>, llvm_anyint_ty],
744                    [IntrNoMem]>, RISCVVIntrinsic {
745    let VLOperand = 2;
746  }
747  // For destination vector type is NOT the same as source vector.
748  // Input: (vector_in, vl)
749  class RISCVUnaryABNoMask
750        : Intrinsic<[llvm_anyvector_ty],
751                    [llvm_anyvector_ty, llvm_anyint_ty],
752                    [IntrNoMem]>, RISCVVIntrinsic {
753    let VLOperand = 1;
754  }
755  // For destination vector type is NOT the same as source vector (with mask).
756  // Input: (maskedoff, vector_in, mask, vl, ta)
757  class RISCVUnaryABMask
758        : Intrinsic<[llvm_anyvector_ty],
759                    [LLVMMatchType<0>, llvm_anyvector_ty,
760                     LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>,
761                     llvm_anyint_ty, LLVMMatchType<2>],
762                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
763    let VLOperand = 3;
764  }
765  // For unary operations with the same vector type in/out without mask
766  // Output: (vector)
767  // Input: (vector_in, vl)
768  class RISCVUnaryNoMask
769        : Intrinsic<[llvm_anyvector_ty],
770                    [LLVMMatchType<0>, llvm_anyint_ty],
771                    [IntrNoMem]>, RISCVVIntrinsic {
772    let VLOperand = 1;
773  }
774  // For mask unary operations with mask type in/out with mask
775  // Output: (mask type output)
776  // Input: (mask type maskedoff, mask type vector_in, mask, vl)
777  class RISCVMaskUnaryMOutMask
778        : Intrinsic<[llvm_anyint_ty],
779                    [LLVMMatchType<0>, LLVMMatchType<0>,
780                     LLVMMatchType<0>, llvm_anyint_ty],
781                    [IntrNoMem]>, RISCVVIntrinsic {
782    let VLOperand = 3;
783  }
784  // Output: (vector)
785  // Input: (vl)
786  class RISCVNullaryIntrinsic
787        : Intrinsic<[llvm_anyvector_ty],
788                    [llvm_anyint_ty],
789                    [IntrNoMem]>, RISCVVIntrinsic {
790    let VLOperand = 0;
791  }
792  // For Conversion unary operations.
793  // Input: (vector_in, vl)
794  class RISCVConversionNoMask
795        : Intrinsic<[llvm_anyvector_ty],
796                    [llvm_anyvector_ty, llvm_anyint_ty],
797                    [IntrNoMem]>, RISCVVIntrinsic {
798    let VLOperand = 1;
799  }
800  // For Conversion unary operations with mask.
801  // Input: (maskedoff, vector_in, mask, vl, ta)
802  class RISCVConversionMask
803        : Intrinsic<[llvm_anyvector_ty],
804                    [LLVMMatchType<0>, llvm_anyvector_ty,
805                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
806                     LLVMMatchType<2>],
807                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
808    let VLOperand = 3;
809  }
810
811  // For unit stride segment load
812  // Input: (pointer, vl)
813  class RISCVUSSegLoad<int nf>
814        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
815                                !add(nf, -1))),
816                    [LLVMPointerToElt<0>, llvm_anyint_ty],
817                    [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic {
818    let VLOperand = 1;
819  }
820  // For unit stride segment load with mask
821  // Input: (maskedoff, pointer, mask, vl, ta)
822  class RISCVUSSegLoadMask<int nf>
823        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
824                                !add(nf, -1))),
825                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
826                                [LLVMPointerToElt<0>,
827                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
828                                 llvm_anyint_ty, LLVMMatchType<1>]),
829                    [ImmArg<ArgIndex<!add(nf, 3)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
830                    RISCVVIntrinsic {
831    let VLOperand = !add(nf, 2);
832  }
833
834  // For unit stride fault-only-first segment load
835  // Input: (pointer, vl)
836  // Output: (data, vl)
837  // NOTE: We model this with default memory properties since we model writing
838  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
839  class RISCVUSSegLoadFF<int nf>
840        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
841                                !add(nf, -1)), [llvm_anyint_ty]),
842                    [LLVMPointerToElt<0>, LLVMMatchType<1>],
843                    [NoCapture<ArgIndex<0>>]>, RISCVVIntrinsic {
844    let VLOperand = 1;
845  }
846  // For unit stride fault-only-first segment load with mask
847  // Input: (maskedoff, pointer, mask, vl, ta)
848  // Output: (data, vl)
849  // NOTE: We model this with default memory properties since we model writing
850  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
851  class RISCVUSSegLoadFFMask<int nf>
852        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
853                                !add(nf, -1)), [llvm_anyint_ty]),
854                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
855                     [LLVMPointerToElt<0>,
856                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
857                      LLVMMatchType<1>, LLVMMatchType<1>]),
858                    [ImmArg<ArgIndex<!add(nf, 3)>>, NoCapture<ArgIndex<nf>>]>,
859                    RISCVVIntrinsic {
860    let VLOperand = !add(nf, 2);
861  }
862
863  // For stride segment load
864  // Input: (pointer, offset, vl)
865  class RISCVSSegLoad<int nf>
866        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
867                                !add(nf, -1))),
868                    [LLVMPointerToElt<0>, llvm_anyint_ty, LLVMMatchType<1>],
869                    [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic {
870    let VLOperand = 2;
871  }
872  // For stride segment load with mask
873  // Input: (maskedoff, pointer, offset, mask, vl, ta)
874  class RISCVSSegLoadMask<int nf>
875        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
876                                !add(nf, -1))),
877                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
878                                [LLVMPointerToElt<0>,
879                                 llvm_anyint_ty,
880                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
881                                 LLVMMatchType<1>, LLVMMatchType<1>]),
882                    [ImmArg<ArgIndex<!add(nf, 4)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
883                    RISCVVIntrinsic {
884    let VLOperand = !add(nf, 3);
885  }
886
887  // For indexed segment load
888  // Input: (pointer, index, vl)
889  class RISCVISegLoad<int nf>
890        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
891                                !add(nf, -1))),
892                    [LLVMPointerToElt<0>, llvm_anyvector_ty, llvm_anyint_ty],
893                    [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic {
894    let VLOperand = 2;
895  }
896  // For indexed segment load with mask
897  // Input: (maskedoff, pointer, index, mask, vl, ta)
898  class RISCVISegLoadMask<int nf>
899        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
900                                !add(nf, -1))),
901                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
902                                [LLVMPointerToElt<0>,
903                                 llvm_anyvector_ty,
904                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
905                                 llvm_anyint_ty, LLVMMatchType<2>]),
906                    [ImmArg<ArgIndex<!add(nf, 4)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
907                    RISCVVIntrinsic {
908    let VLOperand = !add(nf, 3);
909  }
910
911  // For unit stride segment store
912  // Input: (value, pointer, vl)
913  class RISCVUSSegStore<int nf>
914        : Intrinsic<[],
915                    !listconcat([llvm_anyvector_ty],
916                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
917                                [LLVMPointerToElt<0>, llvm_anyint_ty]),
918                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
919    let VLOperand = !add(nf, 1);
920  }
921  // For unit stride segment store with mask
922  // Input: (value, pointer, mask, vl)
923  class RISCVUSSegStoreMask<int nf>
924        : Intrinsic<[],
925                    !listconcat([llvm_anyvector_ty],
926                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
927                                [LLVMPointerToElt<0>,
928                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
929                                 llvm_anyint_ty]),
930                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
931    let VLOperand = !add(nf, 2);
932  }
933
934  // For stride segment store
935  // Input: (value, pointer, offset, vl)
936  class RISCVSSegStore<int nf>
937        : Intrinsic<[],
938                    !listconcat([llvm_anyvector_ty],
939                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
940                                [LLVMPointerToElt<0>, llvm_anyint_ty,
941                                 LLVMMatchType<1>]),
942                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
943    let VLOperand = !add(nf, 2);
944  }
945  // For stride segment store with mask
946  // Input: (value, pointer, offset, mask, vl)
947  class RISCVSSegStoreMask<int nf>
948        : Intrinsic<[],
949                    !listconcat([llvm_anyvector_ty],
950                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
951                                [LLVMPointerToElt<0>, llvm_anyint_ty,
952                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
953                                 LLVMMatchType<1>]),
954                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
955    let VLOperand = !add(nf, 3);
956  }
957
958  // For indexed segment store
959  // Input: (value, pointer, offset, vl)
960  class RISCVISegStore<int nf>
961        : Intrinsic<[],
962                    !listconcat([llvm_anyvector_ty],
963                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
964                                [LLVMPointerToElt<0>, llvm_anyvector_ty,
965                                 llvm_anyint_ty]),
966                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
967    let VLOperand = !add(nf, 2);
968  }
969  // For indexed segment store with mask
970  // Input: (value, pointer, offset, mask, vl)
971  class RISCVISegStoreMask<int nf>
972        : Intrinsic<[],
973                    !listconcat([llvm_anyvector_ty],
974                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
975                                [LLVMPointerToElt<0>, llvm_anyvector_ty,
976                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
977                                 llvm_anyint_ty]),
978                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
979    let VLOperand = !add(nf, 3);
980  }
981
982  multiclass RISCVUSLoad {
983    def "int_riscv_" # NAME : RISCVUSLoad;
984    def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMask;
985  }
986  multiclass RISCVUSLoadFF {
987    def "int_riscv_" # NAME : RISCVUSLoadFF;
988    def "int_riscv_" # NAME # "_mask" : RISCVUSLoadFFMask;
989  }
990  multiclass RISCVSLoad {
991    def "int_riscv_" # NAME : RISCVSLoad;
992    def "int_riscv_" # NAME # "_mask" : RISCVSLoadMask;
993  }
994  multiclass RISCVILoad {
995    def "int_riscv_" # NAME : RISCVILoad;
996    def "int_riscv_" # NAME # "_mask" : RISCVILoadMask;
997  }
998  multiclass RISCVUSStore {
999    def "int_riscv_" # NAME : RISCVUSStore;
1000    def "int_riscv_" # NAME # "_mask" : RISCVUSStoreMask;
1001  }
1002  multiclass RISCVSStore {
1003    def "int_riscv_" # NAME : RISCVSStore;
1004    def "int_riscv_" # NAME # "_mask" : RISCVSStoreMask;
1005  }
1006
1007  multiclass RISCVIStore {
1008    def "int_riscv_" # NAME : RISCVIStore;
1009    def "int_riscv_" # NAME # "_mask" : RISCVIStoreMask;
1010  }
1011  multiclass RISCVUnaryAA {
1012    def "int_riscv_" # NAME : RISCVUnaryAANoMask;
1013    def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMask;
1014  }
1015  multiclass RISCVUnaryAB {
1016    def "int_riscv_" # NAME : RISCVUnaryABNoMask;
1017    def "int_riscv_" # NAME # "_mask" : RISCVUnaryABMask;
1018  }
1019  // AAX means the destination type(A) is the same as the first source
1020  // type(A). X means any type for the second source operand.
1021  multiclass RISCVBinaryAAX {
1022    def "int_riscv_" # NAME : RISCVBinaryAAXNoMask;
1023    def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMask;
1024  }
1025  // Like RISCVBinaryAAX, but the second operand is used a shift amount so it
1026  // must be a vector or an XLen scalar.
1027  multiclass RISCVBinaryAAShift {
1028    def "int_riscv_" # NAME : RISCVBinaryAAShiftNoMask;
1029    def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAShiftMask;
1030  }
1031  multiclass RISCVRGatherVV {
1032    def "int_riscv_" # NAME : RISCVRGatherVVNoMask;
1033    def "int_riscv_" # NAME # "_mask" : RISCVRGatherVVMask;
1034  }
1035  multiclass RISCVRGatherVX {
1036    def "int_riscv_" # NAME : RISCVGatherVXNoMask;
1037    def "int_riscv_" # NAME # "_mask" : RISCVGatherVXMask;
1038  }
1039  multiclass RISCVRGatherEI16VV {
1040    def "int_riscv_" # NAME : RISCVRGatherEI16VVNoMask;
1041    def "int_riscv_" # NAME # "_mask" : RISCVRGatherEI16VVMask;
1042  }
1043  // ABX means the destination type(A) is different from the first source
1044  // type(B). X means any type for the second source operand.
1045  multiclass RISCVBinaryABX {
1046    def "int_riscv_" # NAME : RISCVBinaryABXNoMask;
1047    def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMask;
1048  }
1049  // Like RISCVBinaryABX, but the second operand is used a shift amount so it
1050  // must be a vector or an XLen scalar.
1051  multiclass RISCVBinaryABShift {
1052    def "int_riscv_" # NAME : RISCVBinaryABShiftNoMask;
1053    def "int_riscv_" # NAME # "_mask" : RISCVBinaryABShiftMask;
1054  }
1055  multiclass RISCVBinaryWithV0 {
1056    def "int_riscv_" # NAME : RISCVBinaryWithV0;
1057  }
1058  multiclass RISCVBinaryMaskOutWithV0 {
1059    def "int_riscv_" # NAME : RISCVBinaryMOutWithV0;
1060  }
1061  multiclass RISCVBinaryMaskOut {
1062    def "int_riscv_" # NAME : RISCVBinaryMOut;
1063  }
1064  multiclass RISCVSaturatingBinaryAAX {
1065    def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXNoMask;
1066    def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMask;
1067  }
1068  multiclass RISCVSaturatingBinaryAAShift {
1069    def "int_riscv_" # NAME : RISCVSaturatingBinaryAAShiftNoMask;
1070    def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAShiftMask;
1071  }
1072  multiclass RISCVSaturatingBinaryABShift {
1073    def "int_riscv_" # NAME : RISCVSaturatingBinaryABShiftNoMask;
1074    def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryABShiftMask;
1075  }
1076  multiclass RISCVTernaryAAAX {
1077    def "int_riscv_" # NAME : RISCVTernaryAAAXNoMask;
1078    def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAAXMask;
1079  }
1080  multiclass RISCVTernaryAAXA {
1081    def "int_riscv_" # NAME : RISCVTernaryAAXANoMask;
1082    def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAXAMask;
1083  }
1084  multiclass RISCVCompare {
1085    def "int_riscv_" # NAME : RISCVCompareNoMask;
1086    def "int_riscv_" # NAME # "_mask" : RISCVCompareMask;
1087  }
1088  multiclass RISCVClassify {
1089    def "int_riscv_" # NAME : RISCVClassifyNoMask;
1090    def "int_riscv_" # NAME # "_mask" : RISCVClassifyMask;
1091  }
1092  multiclass RISCVTernaryWide {
1093    def "int_riscv_" # NAME : RISCVTernaryWideNoMask;
1094    def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMask;
1095  }
1096  multiclass RISCVReduction {
1097    def "int_riscv_" # NAME : RISCVReductionNoMask;
1098    def "int_riscv_" # NAME # "_mask" : RISCVReductionMask;
1099  }
1100  multiclass RISCVMaskUnarySOut {
1101    def "int_riscv_" # NAME : RISCVMaskUnarySOutNoMask;
1102    def "int_riscv_" # NAME # "_mask" : RISCVMaskUnarySOutMask;
1103  }
1104  multiclass RISCVMaskUnaryMOut {
1105    def "int_riscv_" # NAME : RISCVUnaryNoMask;
1106    def "int_riscv_" # NAME # "_mask" : RISCVMaskUnaryMOutMask;
1107  }
1108  multiclass RISCVConversion {
1109    def "int_riscv_" #NAME :RISCVConversionNoMask;
1110    def "int_riscv_" # NAME # "_mask" : RISCVConversionMask;
1111  }
1112  multiclass RISCVUSSegLoad<int nf> {
1113    def "int_riscv_" # NAME : RISCVUSSegLoad<nf>;
1114    def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadMask<nf>;
1115  }
1116  multiclass RISCVUSSegLoadFF<int nf> {
1117    def "int_riscv_" # NAME : RISCVUSSegLoadFF<nf>;
1118    def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadFFMask<nf>;
1119  }
1120  multiclass RISCVSSegLoad<int nf> {
1121    def "int_riscv_" # NAME : RISCVSSegLoad<nf>;
1122    def "int_riscv_" # NAME # "_mask" : RISCVSSegLoadMask<nf>;
1123  }
1124  multiclass RISCVISegLoad<int nf> {
1125    def "int_riscv_" # NAME : RISCVISegLoad<nf>;
1126    def "int_riscv_" # NAME # "_mask" : RISCVISegLoadMask<nf>;
1127  }
1128  multiclass RISCVUSSegStore<int nf> {
1129    def "int_riscv_" # NAME : RISCVUSSegStore<nf>;
1130    def "int_riscv_" # NAME # "_mask" : RISCVUSSegStoreMask<nf>;
1131  }
1132  multiclass RISCVSSegStore<int nf> {
1133    def "int_riscv_" # NAME : RISCVSSegStore<nf>;
1134    def "int_riscv_" # NAME # "_mask" : RISCVSSegStoreMask<nf>;
1135  }
1136  multiclass RISCVISegStore<int nf> {
1137    def "int_riscv_" # NAME : RISCVISegStore<nf>;
1138    def "int_riscv_" # NAME # "_mask" : RISCVISegStoreMask<nf>;
1139  }
1140
1141  defm vle : RISCVUSLoad;
1142  defm vleff : RISCVUSLoadFF;
1143  defm vse : RISCVUSStore;
1144  defm vlse: RISCVSLoad;
1145  defm vsse: RISCVSStore;
1146  defm vluxei : RISCVILoad;
1147  defm vloxei : RISCVILoad;
1148  defm vsoxei : RISCVIStore;
1149  defm vsuxei : RISCVIStore;
1150
1151  def int_riscv_vlm : RISCVUSMLoad;
1152  def int_riscv_vsm : RISCVUSStore;
1153
1154  defm vadd : RISCVBinaryAAX;
1155  defm vsub : RISCVBinaryAAX;
1156  defm vrsub : RISCVBinaryAAX;
1157
1158  defm vwaddu : RISCVBinaryABX;
1159  defm vwadd : RISCVBinaryABX;
1160  defm vwaddu_w : RISCVBinaryAAX;
1161  defm vwadd_w : RISCVBinaryAAX;
1162  defm vwsubu : RISCVBinaryABX;
1163  defm vwsub : RISCVBinaryABX;
1164  defm vwsubu_w : RISCVBinaryAAX;
1165  defm vwsub_w : RISCVBinaryAAX;
1166
1167  defm vzext : RISCVUnaryAB;
1168  defm vsext : RISCVUnaryAB;
1169
1170  defm vadc : RISCVBinaryWithV0;
1171  defm vmadc_carry_in : RISCVBinaryMaskOutWithV0;
1172  defm vmadc : RISCVBinaryMaskOut;
1173
1174  defm vsbc : RISCVBinaryWithV0;
1175  defm vmsbc_borrow_in : RISCVBinaryMaskOutWithV0;
1176  defm vmsbc : RISCVBinaryMaskOut;
1177
1178  defm vand : RISCVBinaryAAX;
1179  defm vor : RISCVBinaryAAX;
1180  defm vxor : RISCVBinaryAAX;
1181
1182  defm vsll : RISCVBinaryAAShift;
1183  defm vsrl : RISCVBinaryAAShift;
1184  defm vsra : RISCVBinaryAAShift;
1185
1186  defm vnsrl : RISCVBinaryABShift;
1187  defm vnsra : RISCVBinaryABShift;
1188
1189  defm vmseq : RISCVCompare;
1190  defm vmsne : RISCVCompare;
1191  defm vmsltu : RISCVCompare;
1192  defm vmslt : RISCVCompare;
1193  defm vmsleu : RISCVCompare;
1194  defm vmsle : RISCVCompare;
1195  defm vmsgtu : RISCVCompare;
1196  defm vmsgt : RISCVCompare;
1197  defm vmsgeu : RISCVCompare;
1198  defm vmsge : RISCVCompare;
1199
1200  defm vminu : RISCVBinaryAAX;
1201  defm vmin : RISCVBinaryAAX;
1202  defm vmaxu : RISCVBinaryAAX;
1203  defm vmax : RISCVBinaryAAX;
1204
1205  defm vmul : RISCVBinaryAAX;
1206  defm vmulh : RISCVBinaryAAX;
1207  defm vmulhu : RISCVBinaryAAX;
1208  defm vmulhsu : RISCVBinaryAAX;
1209
1210  defm vdivu : RISCVBinaryAAX;
1211  defm vdiv : RISCVBinaryAAX;
1212  defm vremu : RISCVBinaryAAX;
1213  defm vrem : RISCVBinaryAAX;
1214
1215  defm vwmul : RISCVBinaryABX;
1216  defm vwmulu : RISCVBinaryABX;
1217  defm vwmulsu : RISCVBinaryABX;
1218
1219  defm vmacc : RISCVTernaryAAXA;
1220  defm vnmsac : RISCVTernaryAAXA;
1221  defm vmadd : RISCVTernaryAAXA;
1222  defm vnmsub : RISCVTernaryAAXA;
1223
1224  defm vwmaccu  : RISCVTernaryWide;
1225  defm vwmacc   : RISCVTernaryWide;
1226  defm vwmaccus : RISCVTernaryWide;
1227  defm vwmaccsu : RISCVTernaryWide;
1228
1229  defm vfadd : RISCVBinaryAAX;
1230  defm vfsub : RISCVBinaryAAX;
1231  defm vfrsub : RISCVBinaryAAX;
1232
1233  defm vfwadd : RISCVBinaryABX;
1234  defm vfwsub : RISCVBinaryABX;
1235  defm vfwadd_w : RISCVBinaryAAX;
1236  defm vfwsub_w : RISCVBinaryAAX;
1237
1238  defm vsaddu : RISCVSaturatingBinaryAAX;
1239  defm vsadd : RISCVSaturatingBinaryAAX;
1240  defm vssubu : RISCVSaturatingBinaryAAX;
1241  defm vssub : RISCVSaturatingBinaryAAX;
1242
1243  defm vmerge : RISCVBinaryWithV0;
1244
1245  def int_riscv_vmv_v_v : Intrinsic<[llvm_anyvector_ty],
1246                                    [LLVMMatchType<0>, llvm_anyint_ty],
1247                                    [IntrNoMem]>, RISCVVIntrinsic {
1248    let VLOperand = 1;
1249  }
1250  def int_riscv_vmv_v_x : Intrinsic<[llvm_anyint_ty],
1251                                    [LLVMVectorElementType<0>, llvm_anyint_ty],
1252                                    [IntrNoMem]>, RISCVVIntrinsic {
1253    let VLOperand = 1;
1254  }
1255  def int_riscv_vfmv_v_f : Intrinsic<[llvm_anyfloat_ty],
1256                                     [LLVMVectorElementType<0>, llvm_anyint_ty],
1257                                     [IntrNoMem]>, RISCVVIntrinsic {
1258    let VLOperand = 1;
1259  }
1260
1261  def int_riscv_vmv_x_s : Intrinsic<[LLVMVectorElementType<0>],
1262                                    [llvm_anyint_ty],
1263                                    [IntrNoMem]>, RISCVVIntrinsic;
1264  def int_riscv_vmv_s_x : Intrinsic<[llvm_anyint_ty],
1265                                    [LLVMMatchType<0>, LLVMVectorElementType<0>,
1266                                     llvm_anyint_ty],
1267                                    [IntrNoMem]>, RISCVVIntrinsic {
1268    let VLOperand = 2;
1269  }
1270
1271  def int_riscv_vfmv_f_s : Intrinsic<[LLVMVectorElementType<0>],
1272                                     [llvm_anyfloat_ty],
1273                                     [IntrNoMem]>, RISCVVIntrinsic;
1274  def int_riscv_vfmv_s_f : Intrinsic<[llvm_anyfloat_ty],
1275                                     [LLVMMatchType<0>, LLVMVectorElementType<0>,
1276                                      llvm_anyint_ty],
1277                                     [IntrNoMem]>, RISCVVIntrinsic {
1278    let VLOperand = 2;
1279  }
1280
1281  defm vfmul : RISCVBinaryAAX;
1282  defm vfdiv : RISCVBinaryAAX;
1283  defm vfrdiv : RISCVBinaryAAX;
1284
1285  defm vfwmul : RISCVBinaryABX;
1286
1287  defm vfmacc : RISCVTernaryAAXA;
1288  defm vfnmacc : RISCVTernaryAAXA;
1289  defm vfmsac : RISCVTernaryAAXA;
1290  defm vfnmsac : RISCVTernaryAAXA;
1291  defm vfmadd : RISCVTernaryAAXA;
1292  defm vfnmadd : RISCVTernaryAAXA;
1293  defm vfmsub : RISCVTernaryAAXA;
1294  defm vfnmsub : RISCVTernaryAAXA;
1295
1296  defm vfwmacc : RISCVTernaryWide;
1297  defm vfwnmacc : RISCVTernaryWide;
1298  defm vfwmsac : RISCVTernaryWide;
1299  defm vfwnmsac : RISCVTernaryWide;
1300
1301  defm vfsqrt : RISCVUnaryAA;
1302  defm vfrsqrt7 : RISCVUnaryAA;
1303  defm vfrec7 : RISCVUnaryAA;
1304
1305  defm vfmin : RISCVBinaryAAX;
1306  defm vfmax : RISCVBinaryAAX;
1307
1308  defm vfsgnj : RISCVBinaryAAX;
1309  defm vfsgnjn : RISCVBinaryAAX;
1310  defm vfsgnjx : RISCVBinaryAAX;
1311
1312  defm vfclass : RISCVClassify;
1313
1314  defm vfmerge : RISCVBinaryWithV0;
1315
1316  defm vslideup : RISCVTernaryAAAX;
1317  defm vslidedown : RISCVTernaryAAAX;
1318
1319  defm vslide1up : RISCVBinaryAAX;
1320  defm vslide1down : RISCVBinaryAAX;
1321  defm vfslide1up : RISCVBinaryAAX;
1322  defm vfslide1down : RISCVBinaryAAX;
1323
1324  defm vrgather_vv : RISCVRGatherVV;
1325  defm vrgather_vx : RISCVRGatherVX;
1326  defm vrgatherei16_vv : RISCVRGatherEI16VV;
1327
1328  def "int_riscv_vcompress" : RISCVUnaryAAMaskNoTA;
1329
1330  defm vaaddu : RISCVSaturatingBinaryAAX;
1331  defm vaadd : RISCVSaturatingBinaryAAX;
1332  defm vasubu : RISCVSaturatingBinaryAAX;
1333  defm vasub : RISCVSaturatingBinaryAAX;
1334
1335  defm vsmul : RISCVSaturatingBinaryAAX;
1336
1337  defm vssrl : RISCVSaturatingBinaryAAShift;
1338  defm vssra : RISCVSaturatingBinaryAAShift;
1339
1340  defm vnclipu : RISCVSaturatingBinaryABShift;
1341  defm vnclip : RISCVSaturatingBinaryABShift;
1342
1343  defm vmfeq : RISCVCompare;
1344  defm vmfne : RISCVCompare;
1345  defm vmflt : RISCVCompare;
1346  defm vmfle : RISCVCompare;
1347  defm vmfgt : RISCVCompare;
1348  defm vmfge : RISCVCompare;
1349
1350  defm vredsum : RISCVReduction;
1351  defm vredand : RISCVReduction;
1352  defm vredor : RISCVReduction;
1353  defm vredxor : RISCVReduction;
1354  defm vredminu : RISCVReduction;
1355  defm vredmin : RISCVReduction;
1356  defm vredmaxu : RISCVReduction;
1357  defm vredmax : RISCVReduction;
1358
1359  defm vwredsumu : RISCVReduction;
1360  defm vwredsum : RISCVReduction;
1361
1362  defm vfredosum : RISCVReduction;
1363  defm vfredusum : RISCVReduction;
1364  defm vfredmin : RISCVReduction;
1365  defm vfredmax : RISCVReduction;
1366
1367  defm vfwredusum : RISCVReduction;
1368  defm vfwredosum : RISCVReduction;
1369
1370  def int_riscv_vmand: RISCVBinaryAAANoMask;
1371  def int_riscv_vmnand: RISCVBinaryAAANoMask;
1372  def int_riscv_vmandn: RISCVBinaryAAANoMask;
1373  def int_riscv_vmxor: RISCVBinaryAAANoMask;
1374  def int_riscv_vmor: RISCVBinaryAAANoMask;
1375  def int_riscv_vmnor: RISCVBinaryAAANoMask;
1376  def int_riscv_vmorn: RISCVBinaryAAANoMask;
1377  def int_riscv_vmxnor: RISCVBinaryAAANoMask;
1378  def int_riscv_vmclr : RISCVNullaryIntrinsic;
1379  def int_riscv_vmset : RISCVNullaryIntrinsic;
1380
1381  defm vcpop : RISCVMaskUnarySOut;
1382  defm vfirst : RISCVMaskUnarySOut;
1383  defm vmsbf : RISCVMaskUnaryMOut;
1384  defm vmsof : RISCVMaskUnaryMOut;
1385  defm vmsif : RISCVMaskUnaryMOut;
1386
1387  defm vfcvt_xu_f_v : RISCVConversion;
1388  defm vfcvt_x_f_v : RISCVConversion;
1389  defm vfcvt_rtz_xu_f_v : RISCVConversion;
1390  defm vfcvt_rtz_x_f_v : RISCVConversion;
1391  defm vfcvt_f_xu_v : RISCVConversion;
1392  defm vfcvt_f_x_v : RISCVConversion;
1393
1394  defm vfwcvt_f_xu_v : RISCVConversion;
1395  defm vfwcvt_f_x_v : RISCVConversion;
1396  defm vfwcvt_xu_f_v : RISCVConversion;
1397  defm vfwcvt_x_f_v : RISCVConversion;
1398  defm vfwcvt_rtz_xu_f_v : RISCVConversion;
1399  defm vfwcvt_rtz_x_f_v : RISCVConversion;
1400  defm vfwcvt_f_f_v : RISCVConversion;
1401
1402  defm vfncvt_f_xu_w : RISCVConversion;
1403  defm vfncvt_f_x_w : RISCVConversion;
1404  defm vfncvt_xu_f_w : RISCVConversion;
1405  defm vfncvt_x_f_w : RISCVConversion;
1406  defm vfncvt_rtz_xu_f_w : RISCVConversion;
1407  defm vfncvt_rtz_x_f_w : RISCVConversion;
1408  defm vfncvt_f_f_w : RISCVConversion;
1409  defm vfncvt_rod_f_f_w : RISCVConversion;
1410
1411  // Output: (vector)
1412  // Input: (mask type input, vl)
1413  def int_riscv_viota : Intrinsic<[llvm_anyvector_ty],
1414                                  [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1415                                   llvm_anyint_ty],
1416                                  [IntrNoMem]>, RISCVVIntrinsic {
1417    let VLOperand = 1;
1418  }
1419  // Output: (vector)
1420  // Input: (maskedoff, mask type vector_in, mask, vl)
1421  def int_riscv_viota_mask : Intrinsic<[llvm_anyvector_ty],
1422                                       [LLVMMatchType<0>,
1423                                        LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1424                                        LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1425                                        llvm_anyint_ty],
1426                                       [IntrNoMem]>, RISCVVIntrinsic {
1427    let VLOperand = 3;
1428  }
1429  // Output: (vector)
1430  // Input: (vl)
1431  def int_riscv_vid : RISCVNullaryIntrinsic;
1432
1433  // Output: (vector)
1434  // Input: (maskedoff, mask, vl)
1435  def int_riscv_vid_mask : Intrinsic<[llvm_anyvector_ty],
1436                                     [LLVMMatchType<0>,
1437                                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1438                                      llvm_anyint_ty],
1439                                     [IntrNoMem]>, RISCVVIntrinsic {
1440    let VLOperand = 2;
1441  }
1442
1443  foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
1444    defm vlseg # nf : RISCVUSSegLoad<nf>;
1445    defm vlseg # nf # ff : RISCVUSSegLoadFF<nf>;
1446    defm vlsseg # nf : RISCVSSegLoad<nf>;
1447    defm vloxseg # nf : RISCVISegLoad<nf>;
1448    defm vluxseg # nf : RISCVISegLoad<nf>;
1449    defm vsseg # nf : RISCVUSSegStore<nf>;
1450    defm vssseg # nf : RISCVSSegStore<nf>;
1451    defm vsoxseg # nf : RISCVISegStore<nf>;
1452    defm vsuxseg # nf : RISCVISegStore<nf>;
1453  }
1454
1455  // Strided loads/stores for fixed vectors.
1456  def int_riscv_masked_strided_load
1457        : Intrinsic<[llvm_anyvector_ty],
1458                    [LLVMMatchType<0>, llvm_anyptr_ty,
1459                     llvm_anyint_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
1460                    [NoCapture<ArgIndex<1>>, IntrReadMem]>;
1461  def int_riscv_masked_strided_store
1462        : Intrinsic<[],
1463                    [llvm_anyvector_ty, llvm_anyptr_ty,
1464                     llvm_anyint_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
1465                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>;
1466} // TargetPrefix = "riscv"
1467
1468//===----------------------------------------------------------------------===//
1469// Scalar Cryptography
1470//
1471// These intrinsics will lower directly into the corresponding instructions
1472// added by the scalar cyptography extension, if the extension is present.
1473
1474let TargetPrefix = "riscv" in {
1475
1476class ScalarCryptoGprIntrinsicAny
1477    : Intrinsic<[llvm_anyint_ty],
1478                [LLVMMatchType<0>],
1479                [IntrNoMem, IntrSpeculatable]>;
1480
1481class ScalarCryptoByteSelect32
1482    : Intrinsic<[llvm_i32_ty],
1483                [llvm_i32_ty, llvm_i32_ty, llvm_i8_ty],
1484                [IntrNoMem, IntrWillReturn, IntrSpeculatable,
1485                 ImmArg<ArgIndex<2>>]>;
1486
1487class ScalarCryptoGprGprIntrinsic32
1488    : Intrinsic<[llvm_i32_ty],
1489                [llvm_i32_ty, llvm_i32_ty],
1490                [IntrNoMem, IntrWillReturn, IntrSpeculatable]>;
1491
1492class ScalarCryptoGprGprIntrinsic64
1493    : Intrinsic<[llvm_i64_ty],
1494                [llvm_i64_ty, llvm_i64_ty],
1495                [IntrNoMem, IntrWillReturn, IntrSpeculatable]>;
1496
1497class ScalarCryptoGprIntrinsic64
1498    : Intrinsic<[llvm_i64_ty],
1499                [llvm_i64_ty],
1500                [IntrNoMem, IntrWillReturn, IntrSpeculatable]>;
1501
1502class ScalarCryptoByteSelectAny
1503    : Intrinsic<[llvm_anyint_ty],
1504                [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i8_ty],
1505                [IntrNoMem, IntrSpeculatable, IntrWillReturn,
1506                 ImmArg<ArgIndex<2>>, Returned<ArgIndex<0>>]>;
1507
1508// Zknd
1509def int_riscv_aes32dsi  : ScalarCryptoByteSelect32;
1510def int_riscv_aes32dsmi : ScalarCryptoByteSelect32;
1511
1512def int_riscv_aes64ds   : ScalarCryptoGprGprIntrinsic64;
1513def int_riscv_aes64dsm  : ScalarCryptoGprGprIntrinsic64;
1514
1515def int_riscv_aes64im   : ScalarCryptoGprIntrinsic64;
1516
1517// Zkne
1518def int_riscv_aes32esi  : ScalarCryptoByteSelect32;
1519def int_riscv_aes32esmi : ScalarCryptoByteSelect32;
1520
1521def int_riscv_aes64es   : ScalarCryptoGprGprIntrinsic64;
1522def int_riscv_aes64esm  : ScalarCryptoGprGprIntrinsic64;
1523
1524// Zknd & Zkne
1525def int_riscv_aes64ks2  : ScalarCryptoGprGprIntrinsic64;
1526def int_riscv_aes64ks1i : Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty],
1527                                    [IntrNoMem, IntrSpeculatable,
1528                                     IntrWillReturn, ImmArg<ArgIndex<1>>]>;
1529
1530// Zknh
1531def int_riscv_sha256sig0 : ScalarCryptoGprIntrinsicAny;
1532def int_riscv_sha256sig1 : ScalarCryptoGprIntrinsicAny;
1533def int_riscv_sha256sum0 : ScalarCryptoGprIntrinsicAny;
1534def int_riscv_sha256sum1 : ScalarCryptoGprIntrinsicAny;
1535
1536def int_riscv_sha512sig0l : ScalarCryptoGprGprIntrinsic32;
1537def int_riscv_sha512sig0h : ScalarCryptoGprGprIntrinsic32;
1538def int_riscv_sha512sig1l : ScalarCryptoGprGprIntrinsic32;
1539def int_riscv_sha512sig1h : ScalarCryptoGprGprIntrinsic32;
1540def int_riscv_sha512sum0r : ScalarCryptoGprGprIntrinsic32;
1541def int_riscv_sha512sum1r : ScalarCryptoGprGprIntrinsic32;
1542
1543def int_riscv_sha512sig0 : ScalarCryptoGprIntrinsic64;
1544def int_riscv_sha512sig1 : ScalarCryptoGprIntrinsic64;
1545def int_riscv_sha512sum0 : ScalarCryptoGprIntrinsic64;
1546def int_riscv_sha512sum1 : ScalarCryptoGprIntrinsic64;
1547
1548// Zksed
1549def int_riscv_sm4ks      : ScalarCryptoByteSelectAny;
1550def int_riscv_sm4ed      : ScalarCryptoByteSelectAny;
1551
1552// Zksh
1553def int_riscv_sm3p0      : ScalarCryptoGprIntrinsicAny;
1554def int_riscv_sm3p1      : ScalarCryptoGprIntrinsicAny;
1555} // TargetPrefix = "riscv"
1556