1//===- IntrinsicsRISCV.td - Defines RISCV intrinsics -------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines all of the RISCV-specific intrinsics.
10//
11//===----------------------------------------------------------------------===//
12
13//===----------------------------------------------------------------------===//
14// Atomics
15
16// Atomic Intrinsics have multiple versions for different access widths, which
17// all follow one of the following signatures (depending on how many arguments
18// they require). We carefully instantiate only specific versions of these for
19// specific integer widths, rather than using `llvm_anyint_ty`.
20//
21// In fact, as these intrinsics take `llvm_anyptr_ty`, the given names are the
22// canonical names, and the intrinsics used in the code will have a name
23// suffixed with the pointer type they are specialised for (denoted `<p>` in the
24// names below), in order to avoid type conflicts.
25
26let TargetPrefix = "riscv" in {
27
28  // T @llvm.<name>.T.<p>(any*, T, T, T imm);
29  class MaskedAtomicRMWFourArg<LLVMType itype>
30      : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype],
31                  [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<3>>]>;
32  // T @llvm.<name>.T.<p>(any*, T, T, T, T imm);
33  class MaskedAtomicRMWFiveArg<LLVMType itype>
34      : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype, itype],
35                  [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<4>>]>;
36
37  // We define 32-bit and 64-bit variants of the above, where T stands for i32
38  // or i64 respectively:
39  multiclass MaskedAtomicRMWFourArgIntrinsics {
40    // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32 imm);
41    def _i32 : MaskedAtomicRMWFourArg<llvm_i32_ty>;
42    // i64 @llvm.<name>.i32.<p>(any*, i64, i64, i64 imm);
43    def _i64 : MaskedAtomicRMWFourArg<llvm_i64_ty>;
44  }
45
46  multiclass MaskedAtomicRMWFiveArgIntrinsics {
47    // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32, i32 imm);
48    def _i32 : MaskedAtomicRMWFiveArg<llvm_i32_ty>;
49    // i64 @llvm.<name>.i64.<p>(any*, i64, i64, i64, i64 imm);
50    def _i64 : MaskedAtomicRMWFiveArg<llvm_i64_ty>;
51  }
52
53  // @llvm.riscv.masked.atomicrmw.*.{i32,i64}.<p>(...)
54  defm int_riscv_masked_atomicrmw_xchg : MaskedAtomicRMWFourArgIntrinsics;
55  defm int_riscv_masked_atomicrmw_add : MaskedAtomicRMWFourArgIntrinsics;
56  defm int_riscv_masked_atomicrmw_sub : MaskedAtomicRMWFourArgIntrinsics;
57  defm int_riscv_masked_atomicrmw_nand : MaskedAtomicRMWFourArgIntrinsics;
58  // Signed min and max need an extra operand to do sign extension with.
59  defm int_riscv_masked_atomicrmw_max : MaskedAtomicRMWFiveArgIntrinsics;
60  defm int_riscv_masked_atomicrmw_min : MaskedAtomicRMWFiveArgIntrinsics;
61  // Unsigned min and max don't need the extra operand.
62  defm int_riscv_masked_atomicrmw_umax : MaskedAtomicRMWFourArgIntrinsics;
63  defm int_riscv_masked_atomicrmw_umin : MaskedAtomicRMWFourArgIntrinsics;
64
65  // @llvm.riscv.masked.cmpxchg.{i32,i64}.<p>(...)
66  defm int_riscv_masked_cmpxchg : MaskedAtomicRMWFiveArgIntrinsics;
67
68} // TargetPrefix = "riscv"
69
70//===----------------------------------------------------------------------===//
71// Vectors
72
73class RISCVVIntrinsic {
74  // These intrinsics may accept illegal integer values in their llvm_any_ty
75  // operand, so they have to be extended. If set to zero then the intrinsic
76  // does not have any operand that must be extended.
77  Intrinsic IntrinsicID = !cast<Intrinsic>(NAME);
78  bits<4> ExtendOperand = 0;
79}
80
81let TargetPrefix = "riscv" in {
82  // We use anyint here but we only support XLen.
83  def int_riscv_vsetvli   : Intrinsic<[llvm_anyint_ty],
84                           /* AVL */  [LLVMMatchType<0>,
85                           /* VSEW */  LLVMMatchType<0>,
86                           /* VLMUL */ LLVMMatchType<0>],
87                                      [IntrNoMem, IntrHasSideEffects,
88                                       ImmArg<ArgIndex<1>>,
89                                       ImmArg<ArgIndex<2>>]>;
90  def int_riscv_vsetvlimax : Intrinsic<[llvm_anyint_ty],
91                            /* VSEW */ [LLVMMatchType<0>,
92                            /* VLMUL */ LLVMMatchType<0>],
93                                      [IntrNoMem, IntrHasSideEffects,
94                                       ImmArg<ArgIndex<0>>,
95                                       ImmArg<ArgIndex<1>>]>;
96
97  // For unit stride load
98  // Input: (pointer, vl)
99  class RISCVUSLoad
100        : Intrinsic<[llvm_anyvector_ty],
101                    [LLVMPointerType<LLVMMatchType<0>>,
102                     llvm_anyint_ty],
103                    [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
104  // For unit stride fault-only-first load
105  // Input: (pointer, vl)
106  // Output: (data, vl)
107  // NOTE: We model this with default memory properties since we model writing
108  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
109  class RISCVUSLoadFF
110        : Intrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
111                    [LLVMPointerType<LLVMMatchType<0>>, LLVMMatchType<1>],
112                    [NoCapture<ArgIndex<0>>]>,
113                    RISCVVIntrinsic;
114  // For unit stride load with mask
115  // Input: (maskedoff, pointer, mask, vl)
116  class RISCVUSLoadMask
117        : Intrinsic<[llvm_anyvector_ty ],
118                    [LLVMMatchType<0>,
119                     LLVMPointerType<LLVMMatchType<0>>,
120                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
121                     llvm_anyint_ty],
122                    [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic;
123  // For unit stride fault-only-first load with mask
124  // Input: (maskedoff, pointer, mask, vl)
125  // Output: (data, vl)
126  // NOTE: We model this with default memory properties since we model writing
127  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
128  class RISCVUSLoadFFMask
129        : Intrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
130                    [LLVMMatchType<0>,
131                     LLVMPointerType<LLVMMatchType<0>>,
132                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
133                     LLVMMatchType<1>],
134                    [NoCapture<ArgIndex<1>>]>, RISCVVIntrinsic;
135  // For strided load
136  // Input: (pointer, stride, vl)
137  class RISCVSLoad
138        : Intrinsic<[llvm_anyvector_ty],
139                    [LLVMPointerType<LLVMMatchType<0>>,
140                     llvm_anyint_ty, LLVMMatchType<1>],
141                    [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
142  // For strided load with mask
143  // Input: (maskedoff, pointer, stride, mask, vl)
144  class RISCVSLoadMask
145        : Intrinsic<[llvm_anyvector_ty ],
146                    [LLVMMatchType<0>,
147                     LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty,
148                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
149                    [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic;
150  // For indexed load
151  // Input: (pointer, index, vl)
152  class RISCVILoad
153        : Intrinsic<[llvm_anyvector_ty],
154                    [LLVMPointerType<LLVMMatchType<0>>,
155                     llvm_anyvector_ty, llvm_anyint_ty],
156                    [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
157  // For indexed load with mask
158  // Input: (maskedoff, pointer, index, mask, vl)
159  class RISCVILoadMask
160        : Intrinsic<[llvm_anyvector_ty ],
161                    [LLVMMatchType<0>,
162                     LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty,
163                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
164                    [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic;
165  // For unit stride store
166  // Input: (vector_in, pointer, vl)
167  class RISCVUSStore
168        : Intrinsic<[],
169                    [llvm_anyvector_ty,
170                     LLVMPointerType<LLVMMatchType<0>>,
171                     llvm_anyint_ty],
172                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
173  // For unit stride store with mask
174  // Input: (vector_in, pointer, mask, vl)
175  class RISCVUSStoreMask
176        : Intrinsic<[],
177                    [llvm_anyvector_ty,
178                     LLVMPointerType<LLVMMatchType<0>>,
179                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
180                     llvm_anyint_ty],
181                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
182  // For strided store
183  // Input: (vector_in, pointer, stride, vl)
184  class RISCVSStore
185        : Intrinsic<[],
186                    [llvm_anyvector_ty,
187                     LLVMPointerType<LLVMMatchType<0>>,
188                     llvm_anyint_ty, LLVMMatchType<1>],
189                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
190  // For stride store with mask
191  // Input: (vector_in, pointer, stirde, mask, vl)
192  class RISCVSStoreMask
193        : Intrinsic<[],
194                    [llvm_anyvector_ty,
195                     LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty,
196                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
197                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
198  // For indexed store
199  // Input: (vector_in, pointer, index, vl)
200  class RISCVIStore
201        : Intrinsic<[],
202                    [llvm_anyvector_ty,
203                     LLVMPointerType<LLVMMatchType<0>>,
204                     llvm_anyint_ty, llvm_anyint_ty],
205                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
206  // For indexed store with mask
207  // Input: (vector_in, pointer, index, mask, vl)
208  class RISCVIStoreMask
209        : Intrinsic<[],
210                    [llvm_anyvector_ty,
211                     LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty,
212                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
213                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
214  // For destination vector type is the same as source vector.
215  // Input: (vector_in, vl)
216  class RISCVUnaryAANoMask
217        : Intrinsic<[llvm_anyvector_ty],
218                    [LLVMMatchType<0>, llvm_anyint_ty],
219                    [IntrNoMem]>, RISCVVIntrinsic;
220  // For destination vector type is the same as first source vector (with mask).
221  // Input: (vector_in, mask, vl)
222  class RISCVUnaryAAMask
223        : Intrinsic<[llvm_anyvector_ty],
224                    [LLVMMatchType<0>, LLVMMatchType<0>,
225                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
226                    [IntrNoMem]>, RISCVVIntrinsic;
227  // For destination vector type is the same as first and second source vector.
228  // Input: (vector_in, vector_in, vl)
229  class RISCVBinaryAAANoMask
230        : Intrinsic<[llvm_anyvector_ty],
231                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
232                    [IntrNoMem]>, RISCVVIntrinsic;
233  // For destination vector type is the same as first and second source vector.
234  // Input: (vector_in, vector_in, vl)
235  class RISCVBinaryAAAMask
236        : Intrinsic<[llvm_anyvector_ty],
237                    [LLVMMatchType<0>, LLVMMatchType<0>,
238                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
239                    [IntrNoMem]>, RISCVVIntrinsic;
240  // For destination vector type is the same as first source vector.
241  // Input: (vector_in, vector_in/scalar_in, vl)
242  class RISCVBinaryAAXNoMask
243        : Intrinsic<[llvm_anyvector_ty],
244                    [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
245                    [IntrNoMem]>, RISCVVIntrinsic {
246    let ExtendOperand = 2;
247  }
248  // For destination vector type is the same as first source vector (with mask).
249  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
250  class RISCVBinaryAAXMask
251       : Intrinsic<[llvm_anyvector_ty],
252                   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
253                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
254                   [IntrNoMem]>, RISCVVIntrinsic {
255    let ExtendOperand = 3;
256  }
257  // For destination vector type is NOT the same as first source vector.
258  // Input: (vector_in, vector_in/scalar_in, vl)
259  class RISCVBinaryABXNoMask
260        : Intrinsic<[llvm_anyvector_ty],
261                    [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
262                    [IntrNoMem]>, RISCVVIntrinsic {
263    let ExtendOperand = 2;
264  }
265  // For destination vector type is NOT the same as first source vector (with mask).
266  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
267  class RISCVBinaryABXMask
268        : Intrinsic<[llvm_anyvector_ty],
269                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
270                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
271                    [IntrNoMem]>, RISCVVIntrinsic {
272    let ExtendOperand = 3;
273  }
274  // For binary operations with V0 as input.
275  // Input: (vector_in, vector_in/scalar_in, V0, vl)
276  class RISCVBinaryWithV0
277        : Intrinsic<[llvm_anyvector_ty],
278                    [LLVMMatchType<0>, llvm_any_ty,
279                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
280                     llvm_anyint_ty],
281                    [IntrNoMem]>, RISCVVIntrinsic {
282    let ExtendOperand = 2;
283  }
284  // For binary operations with mask type output and V0 as input.
285  // Output: (mask type output)
286  // Input: (vector_in, vector_in/scalar_in, V0, vl)
287  class RISCVBinaryMOutWithV0
288        :Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
289                   [llvm_anyvector_ty, llvm_any_ty,
290                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
291                    llvm_anyint_ty],
292                   [IntrNoMem]>, RISCVVIntrinsic {
293    let ExtendOperand = 2;
294  }
295  // For binary operations with mask type output.
296  // Output: (mask type output)
297  // Input: (vector_in, vector_in/scalar_in, vl)
298  class RISCVBinaryMOut
299        : Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
300                    [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
301                    [IntrNoMem]>, RISCVVIntrinsic {
302    let ExtendOperand = 2;
303  }
304  // For binary operations with mask type output without mask.
305  // Output: (mask type output)
306  // Input: (vector_in, vector_in/scalar_in, vl)
307  class RISCVCompareNoMask
308        : Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
309                    [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
310                    [IntrNoMem]>, RISCVVIntrinsic {
311    let ExtendOperand = 2;
312  }
313  // For binary operations with mask type output with mask.
314  // Output: (mask type output)
315  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
316  class RISCVCompareMask
317        : Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
318                    [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
319                     llvm_anyvector_ty, llvm_any_ty,
320                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
321                    [IntrNoMem]>, RISCVVIntrinsic {
322    let ExtendOperand = 3;
323  }
324  // For FP classify operations.
325  // Output: (bit mask type output)
326  // Input: (vector_in, vl)
327  class RISCVClassifyNoMask
328        : Intrinsic<[LLVMVectorOfBitcastsToInt<0>],
329                    [llvm_anyvector_ty, llvm_anyint_ty],
330                    [IntrNoMem]>, RISCVVIntrinsic;
331  // For FP classify operations with mask.
332  // Output: (bit mask type output)
333  // Input: (maskedoff, vector_in, mask, vl)
334  class RISCVClassifyMask
335        : Intrinsic<[LLVMVectorOfBitcastsToInt<0>],
336                    [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty,
337                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
338                    [IntrNoMem]>, RISCVVIntrinsic;
339  // For Saturating binary operations.
340  // The destination vector type is the same as first source vector.
341  // Input: (vector_in, vector_in/scalar_in, vl)
342  class RISCVSaturatingBinaryAAXNoMask
343        : Intrinsic<[llvm_anyvector_ty],
344                    [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
345                    [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
346    let ExtendOperand = 2;
347  }
348  // For Saturating binary operations with mask.
349  // The destination vector type is the same as first source vector.
350  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
351  class RISCVSaturatingBinaryAAXMask
352        : Intrinsic<[llvm_anyvector_ty],
353                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
354                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
355                    [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
356    let ExtendOperand = 3;
357  }
358  // For Saturating binary operations.
359  // The destination vector type is NOT the same as first source vector.
360  // Input: (vector_in, vector_in/scalar_in, vl)
361  class RISCVSaturatingBinaryABXNoMask
362        : Intrinsic<[llvm_anyvector_ty],
363                    [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
364                    [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
365    let ExtendOperand = 2;
366  }
367  // For Saturating binary operations with mask.
368  // The destination vector type is NOT the same as first source vector (with mask).
369  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
370  class RISCVSaturatingBinaryABXMask
371        : Intrinsic<[llvm_anyvector_ty],
372                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
373                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
374                    [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
375    let ExtendOperand = 3;
376  }
377  class RISCVTernaryAAAXNoMask
378        : Intrinsic<[llvm_anyvector_ty],
379                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
380                     LLVMMatchType<1>],
381                    [IntrNoMem]>, RISCVVIntrinsic;
382  class RISCVTernaryAAAXMask
383        : Intrinsic<[llvm_anyvector_ty],
384                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
385                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
386                    [IntrNoMem]>, RISCVVIntrinsic;
387  class RISCVTernaryAAXANoMask
388        : Intrinsic<[llvm_anyvector_ty],
389                    [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
390                     llvm_anyint_ty],
391                    [IntrNoMem]>, RISCVVIntrinsic {
392    let ExtendOperand = 2;
393  }
394  class RISCVTernaryAAXAMask
395        : Intrinsic<[llvm_anyvector_ty],
396                    [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
397                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
398                    [IntrNoMem]>, RISCVVIntrinsic {
399    let ExtendOperand = 2;
400  }
401  class RISCVTernaryWideNoMask
402        : Intrinsic< [llvm_anyvector_ty],
403                     [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
404                      llvm_anyint_ty],
405                     [IntrNoMem] >, RISCVVIntrinsic {
406    let ExtendOperand = 2;
407  }
408  class RISCVTernaryWideMask
409        : Intrinsic< [llvm_anyvector_ty],
410                     [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
411                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
412                     [IntrNoMem]>, RISCVVIntrinsic {
413    let ExtendOperand = 2;
414  }
415  // For Reduction ternary operations.
416  // For destination vector type is the same as first and third source vector.
417  // Input: (vector_in, vector_in, vector_in, vl)
418  class RISCVReductionNoMask
419        : Intrinsic<[llvm_anyvector_ty],
420                    [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
421                     llvm_anyint_ty],
422                    [IntrNoMem]>, RISCVVIntrinsic;
423  // For Reduction ternary operations with mask.
424  // For destination vector type is the same as first and third source vector.
425  // The mask type come from second source vector.
426  // Input: (maskedoff, vector_in, vector_in, vector_in, mask, vl)
427  class RISCVReductionMask
428        : Intrinsic<[llvm_anyvector_ty],
429                    [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
430                     LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty],
431                    [IntrNoMem]>, RISCVVIntrinsic;
432  // For unary operations with scalar type output without mask
433  // Output: (scalar type)
434  // Input: (vector_in, vl)
435  class RISCVMaskUnarySOutNoMask
436        : Intrinsic<[llvm_anyint_ty],
437                    [llvm_anyvector_ty, LLVMMatchType<0>],
438                    [IntrNoMem]>, RISCVVIntrinsic;
439  // For unary operations with scalar type output with mask
440  // Output: (scalar type)
441  // Input: (vector_in, mask, vl)
442  class RISCVMaskUnarySOutMask
443        : Intrinsic<[llvm_anyint_ty],
444                    [llvm_anyvector_ty, LLVMMatchType<1>, LLVMMatchType<0>],
445                    [IntrNoMem]>, RISCVVIntrinsic;
446  // For destination vector type is NOT the same as source vector.
447  // Input: (vector_in, vl)
448  class RISCVUnaryABNoMask
449        : Intrinsic<[llvm_anyvector_ty],
450                    [llvm_anyvector_ty, llvm_anyint_ty],
451                    [IntrNoMem]>, RISCVVIntrinsic;
452  // For destination vector type is NOT the same as source vector (with mask).
453  // Input: (maskedoff, vector_in, mask, vl)
454  class RISCVUnaryABMask
455        : Intrinsic<[llvm_anyvector_ty],
456                    [LLVMMatchType<0>, llvm_anyvector_ty,
457                     LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>,
458                     llvm_anyint_ty],
459                    [IntrNoMem]>, RISCVVIntrinsic;
460  // For unary operations with the same vector type in/out without mask
461  // Output: (vector)
462  // Input: (vector_in, vl)
463  class RISCVUnaryNoMask
464        : Intrinsic<[llvm_anyvector_ty],
465                    [LLVMMatchType<0>, llvm_anyint_ty],
466                    [IntrNoMem]>, RISCVVIntrinsic;
467  // For mask unary operations with mask type in/out with mask
468  // Output: (mask type output)
469  // Input: (mask type maskedoff, mask type vector_in, mask, vl)
470  class RISCVMaskUnaryMOutMask
471        : Intrinsic<[llvm_anyint_ty],
472                    [LLVMMatchType<0>, LLVMMatchType<0>,
473                     LLVMMatchType<0>, llvm_anyint_ty],
474                    [IntrNoMem]>, RISCVVIntrinsic;
475  // Output: (vector)
476  // Input: (vl)
477  class RISCVNullaryIntrinsic
478        : Intrinsic<[llvm_anyvector_ty],
479                    [llvm_anyint_ty],
480                    [IntrNoMem]>, RISCVVIntrinsic;
481  // For Conversion unary operations.
482  // Input: (vector_in, vl)
483  class RISCVConversionNoMask
484        : Intrinsic<[llvm_anyvector_ty],
485                    [llvm_anyvector_ty, llvm_anyint_ty],
486                    [IntrNoMem]>, RISCVVIntrinsic;
487  // For Conversion unary operations with mask.
488  // Input: (maskedoff, vector_in, mask, vl)
489  class RISCVConversionMask
490        : Intrinsic<[llvm_anyvector_ty],
491                    [LLVMMatchType<0>, llvm_anyvector_ty,
492                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
493                    [IntrNoMem]>, RISCVVIntrinsic;
494  // For atomic operations without mask
495  // Input: (base, index, value, vl)
496  class RISCVAMONoMask
497        : Intrinsic<[llvm_anyvector_ty],
498                    [LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty, LLVMMatchType<0>,
499                     llvm_anyint_ty],
500                    [NoCapture<ArgIndex<0>>]>, RISCVVIntrinsic;
501  // For atomic operations with mask
502  // Input: (base, index, value, mask, vl)
503  class RISCVAMOMask
504        : Intrinsic<[llvm_anyvector_ty],
505                    [LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty, LLVMMatchType<0>,
506                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
507                    [NoCapture<ArgIndex<0>>]>, RISCVVIntrinsic;
508
509  // For unit stride segment load
510  // Input: (pointer, vl)
511  class RISCVUSSegLoad<int nf>
512        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
513                                !add(nf, -1))),
514                    [LLVMPointerToElt<0>, llvm_anyint_ty],
515                    [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
516  // For unit stride segment load with mask
517  // Input: (maskedoff, pointer, mask, vl)
518  class RISCVUSSegLoadMask<int nf>
519        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
520                                !add(nf, -1))),
521                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
522                                [LLVMPointerToElt<0>,
523                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
524                                 llvm_anyint_ty]),
525                    [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic;
526
527  // For unit stride fault-only-first segment load
528  // Input: (pointer, vl)
529  // Output: (data, vl)
530  // NOTE: We model this with default memory properties since we model writing
531  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
532  class RISCVUSSegLoadFF<int nf>
533        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
534                                !add(nf, -1)), [llvm_anyint_ty]),
535                    [LLVMPointerToElt<0>, LLVMMatchType<1>],
536                    [NoCapture<ArgIndex<0>>]>, RISCVVIntrinsic;
537  // For unit stride fault-only-first segment load with mask
538  // Input: (maskedoff, pointer, mask, vl)
539  // Output: (data, vl)
540  // NOTE: We model this with default memory properties since we model writing
541  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
542  class RISCVUSSegLoadFFMask<int nf>
543        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
544                                !add(nf, -1)), [llvm_anyint_ty]),
545                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
546                     [LLVMPointerToElt<0>,
547                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
548                      LLVMMatchType<1>]),
549                    [NoCapture<ArgIndex<nf>>]>, RISCVVIntrinsic;
550
551  // For stride segment load
552  // Input: (pointer, offset, vl)
553  class RISCVSSegLoad<int nf>
554        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
555                                !add(nf, -1))),
556                    [LLVMPointerToElt<0>, llvm_anyint_ty, LLVMMatchType<1>],
557                    [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
558  // For stride segment load with mask
559  // Input: (maskedoff, pointer, offset, mask, vl)
560  class RISCVSSegLoadMask<int nf>
561        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
562                                !add(nf, -1))),
563                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
564                                [LLVMPointerToElt<0>,
565                                 llvm_anyint_ty,
566                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
567                                 LLVMMatchType<1>]),
568                    [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic;
569
570  // For indexed segment load
571  // Input: (pointer, index, vl)
572  class RISCVISegLoad<int nf>
573        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
574                                !add(nf, -1))),
575                    [LLVMPointerToElt<0>, llvm_anyvector_ty, llvm_anyint_ty],
576                    [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
577  // For indexed segment load with mask
578  // Input: (maskedoff, pointer, index, mask, vl)
579  class RISCVISegLoadMask<int nf>
580        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
581                                !add(nf, -1))),
582                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
583                                [LLVMPointerToElt<0>,
584                                 llvm_anyvector_ty,
585                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
586                                 llvm_anyint_ty]),
587                    [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic;
588
589  // For unit stride segment store
590  // Input: (value, pointer, vl)
591  class RISCVUSSegStore<int nf>
592        : Intrinsic<[],
593                    !listconcat([llvm_anyvector_ty],
594                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
595                                [LLVMPointerToElt<0>, llvm_anyint_ty]),
596                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
597  // For unit stride segment store with mask
598  // Input: (value, pointer, mask, vl)
599  class RISCVUSSegStoreMask<int nf>
600        : Intrinsic<[],
601                    !listconcat([llvm_anyvector_ty],
602                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
603                                [LLVMPointerToElt<0>,
604                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
605                                 llvm_anyint_ty]),
606                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
607
608  // For stride segment store
609  // Input: (value, pointer, offset, vl)
610  class RISCVSSegStore<int nf>
611        : Intrinsic<[],
612                    !listconcat([llvm_anyvector_ty],
613                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
614                                [LLVMPointerToElt<0>, llvm_anyint_ty,
615                                 LLVMMatchType<1>]),
616                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
617  // For stride segment store with mask
618  // Input: (value, pointer, offset, mask, vl)
619  class RISCVSSegStoreMask<int nf>
620        : Intrinsic<[],
621                    !listconcat([llvm_anyvector_ty],
622                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
623                                [LLVMPointerToElt<0>, llvm_anyint_ty,
624                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
625                                 LLVMMatchType<1>]),
626                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
627
628  // For indexed segment store
629  // Input: (value, pointer, offset, vl)
630  class RISCVISegStore<int nf>
631        : Intrinsic<[],
632                    !listconcat([llvm_anyvector_ty],
633                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
634                                [LLVMPointerToElt<0>, llvm_anyvector_ty,
635                                 llvm_anyint_ty]),
636                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
637  // For indexed segment store with mask
638  // Input: (value, pointer, offset, mask, vl)
639  class RISCVISegStoreMask<int nf>
640        : Intrinsic<[],
641                    !listconcat([llvm_anyvector_ty],
642                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
643                                [LLVMPointerToElt<0>, llvm_anyvector_ty,
644                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
645                                 llvm_anyint_ty]),
646                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
647
648  multiclass RISCVUSLoad {
649    def "int_riscv_" # NAME : RISCVUSLoad;
650    def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMask;
651  }
652  multiclass RISCVUSLoadFF {
653    def "int_riscv_" # NAME : RISCVUSLoadFF;
654    def "int_riscv_" # NAME # "_mask" : RISCVUSLoadFFMask;
655  }
656  multiclass RISCVSLoad {
657    def "int_riscv_" # NAME : RISCVSLoad;
658    def "int_riscv_" # NAME # "_mask" : RISCVSLoadMask;
659  }
660  multiclass RISCVILoad {
661    def "int_riscv_" # NAME : RISCVILoad;
662    def "int_riscv_" # NAME # "_mask" : RISCVILoadMask;
663  }
664  multiclass RISCVUSStore {
665    def "int_riscv_" # NAME : RISCVUSStore;
666    def "int_riscv_" # NAME # "_mask" : RISCVUSStoreMask;
667  }
668  multiclass RISCVSStore {
669    def "int_riscv_" # NAME : RISCVSStore;
670    def "int_riscv_" # NAME # "_mask" : RISCVSStoreMask;
671  }
672
673  multiclass RISCVIStore {
674    def "int_riscv_" # NAME : RISCVIStore;
675    def "int_riscv_" # NAME # "_mask" : RISCVIStoreMask;
676  }
677  multiclass RISCVUnaryAA {
678    def "int_riscv_" # NAME : RISCVUnaryAANoMask;
679    def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMask;
680  }
681  multiclass RISCVUnaryAB {
682    def "int_riscv_" # NAME : RISCVUnaryABNoMask;
683    def "int_riscv_" # NAME # "_mask" : RISCVUnaryABMask;
684  }
685  // AAX means the destination type(A) is the same as the first source
686  // type(A). X means any type for the second source operand.
687  multiclass RISCVBinaryAAX {
688    def "int_riscv_" # NAME : RISCVBinaryAAXNoMask;
689    def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMask;
690  }
691  // ABX means the destination type(A) is different from the first source
692  // type(B). X means any type for the second source operand.
693  multiclass RISCVBinaryABX {
694    def "int_riscv_" # NAME : RISCVBinaryABXNoMask;
695    def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMask;
696  }
697  multiclass RISCVBinaryWithV0 {
698    def "int_riscv_" # NAME : RISCVBinaryWithV0;
699  }
700  multiclass RISCVBinaryMaskOutWithV0 {
701    def "int_riscv_" # NAME : RISCVBinaryMOutWithV0;
702  }
703  multiclass RISCVBinaryMaskOut {
704    def "int_riscv_" # NAME : RISCVBinaryMOut;
705  }
706  multiclass RISCVSaturatingBinaryAAX {
707    def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXNoMask;
708    def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMask;
709  }
710  multiclass RISCVSaturatingBinaryABX {
711    def "int_riscv_" # NAME : RISCVSaturatingBinaryABXNoMask;
712    def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryABXMask;
713  }
714  multiclass RISCVTernaryAAAX {
715    def "int_riscv_" # NAME : RISCVTernaryAAAXNoMask;
716    def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAAXMask;
717  }
718  multiclass RISCVTernaryAAXA {
719    def "int_riscv_" # NAME : RISCVTernaryAAXANoMask;
720    def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAXAMask;
721  }
722  multiclass RISCVCompare {
723    def "int_riscv_" # NAME : RISCVCompareNoMask;
724    def "int_riscv_" # NAME # "_mask" : RISCVCompareMask;
725  }
726  multiclass RISCVClassify {
727    def "int_riscv_" # NAME : RISCVClassifyNoMask;
728    def "int_riscv_" # NAME # "_mask" : RISCVClassifyMask;
729  }
730  multiclass RISCVTernaryWide {
731    def "int_riscv_" # NAME : RISCVTernaryWideNoMask;
732    def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMask;
733  }
734  multiclass RISCVReduction {
735    def "int_riscv_" # NAME : RISCVReductionNoMask;
736    def "int_riscv_" # NAME # "_mask" : RISCVReductionMask;
737  }
738  multiclass RISCVMaskUnarySOut {
739    def "int_riscv_" # NAME : RISCVMaskUnarySOutNoMask;
740    def "int_riscv_" # NAME # "_mask" : RISCVMaskUnarySOutMask;
741  }
742  multiclass RISCVMaskUnaryMOut {
743    def "int_riscv_" # NAME : RISCVUnaryNoMask;
744    def "int_riscv_" # NAME # "_mask" : RISCVMaskUnaryMOutMask;
745  }
746  multiclass RISCVConversion {
747    def "int_riscv_" #NAME :RISCVConversionNoMask;
748    def "int_riscv_" # NAME # "_mask" : RISCVConversionMask;
749  }
750  multiclass RISCVAMO {
751    def "int_riscv_" # NAME : RISCVAMONoMask;
752    def "int_riscv_" # NAME # "_mask" : RISCVAMOMask;
753  }
754  multiclass RISCVUSSegLoad<int nf> {
755    def "int_riscv_" # NAME : RISCVUSSegLoad<nf>;
756    def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadMask<nf>;
757  }
758  multiclass RISCVUSSegLoadFF<int nf> {
759    def "int_riscv_" # NAME : RISCVUSSegLoadFF<nf>;
760    def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadFFMask<nf>;
761  }
762  multiclass RISCVSSegLoad<int nf> {
763    def "int_riscv_" # NAME : RISCVSSegLoad<nf>;
764    def "int_riscv_" # NAME # "_mask" : RISCVSSegLoadMask<nf>;
765  }
766  multiclass RISCVISegLoad<int nf> {
767    def "int_riscv_" # NAME : RISCVISegLoad<nf>;
768    def "int_riscv_" # NAME # "_mask" : RISCVISegLoadMask<nf>;
769  }
770  multiclass RISCVUSSegStore<int nf> {
771    def "int_riscv_" # NAME : RISCVUSSegStore<nf>;
772    def "int_riscv_" # NAME # "_mask" : RISCVUSSegStoreMask<nf>;
773  }
774  multiclass RISCVSSegStore<int nf> {
775    def "int_riscv_" # NAME : RISCVSSegStore<nf>;
776    def "int_riscv_" # NAME # "_mask" : RISCVSSegStoreMask<nf>;
777  }
778  multiclass RISCVISegStore<int nf> {
779    def "int_riscv_" # NAME : RISCVISegStore<nf>;
780    def "int_riscv_" # NAME # "_mask" : RISCVISegStoreMask<nf>;
781  }
782
783  defm vle : RISCVUSLoad;
784  defm vleff : RISCVUSLoadFF;
785  defm vse : RISCVUSStore;
786  defm vlse: RISCVSLoad;
787  defm vsse: RISCVSStore;
788  defm vluxei : RISCVILoad;
789  defm vloxei : RISCVILoad;
790  defm vsoxei : RISCVIStore;
791  defm vsuxei : RISCVIStore;
792
793  def int_riscv_vle1 : RISCVUSLoad;
794  def int_riscv_vse1 : RISCVUSStore;
795
796  defm vamoswap : RISCVAMO;
797  defm vamoadd : RISCVAMO;
798  defm vamoxor : RISCVAMO;
799  defm vamoand : RISCVAMO;
800  defm vamoor : RISCVAMO;
801  defm vamomin : RISCVAMO;
802  defm vamomax : RISCVAMO;
803  defm vamominu : RISCVAMO;
804  defm vamomaxu : RISCVAMO;
805
806  defm vadd : RISCVBinaryAAX;
807  defm vsub : RISCVBinaryAAX;
808  defm vrsub : RISCVBinaryAAX;
809
810  defm vwaddu : RISCVBinaryABX;
811  defm vwadd : RISCVBinaryABX;
812  defm vwaddu_w : RISCVBinaryAAX;
813  defm vwadd_w : RISCVBinaryAAX;
814  defm vwsubu : RISCVBinaryABX;
815  defm vwsub : RISCVBinaryABX;
816  defm vwsubu_w : RISCVBinaryAAX;
817  defm vwsub_w : RISCVBinaryAAX;
818
819  defm vzext : RISCVUnaryAB;
820  defm vsext : RISCVUnaryAB;
821
822  defm vadc : RISCVBinaryWithV0;
823  defm vmadc_carry_in : RISCVBinaryMaskOutWithV0;
824  defm vmadc : RISCVBinaryMaskOut;
825
826  defm vsbc : RISCVBinaryWithV0;
827  defm vmsbc_borrow_in : RISCVBinaryMaskOutWithV0;
828  defm vmsbc : RISCVBinaryMaskOut;
829
830  defm vand : RISCVBinaryAAX;
831  defm vor : RISCVBinaryAAX;
832  defm vxor : RISCVBinaryAAX;
833
834  defm vsll : RISCVBinaryAAX;
835  defm vsrl : RISCVBinaryAAX;
836  defm vsra : RISCVBinaryAAX;
837
838  defm vnsrl : RISCVBinaryABX;
839  defm vnsra : RISCVBinaryABX;
840
841  defm vmseq : RISCVCompare;
842  defm vmsne : RISCVCompare;
843  defm vmsltu : RISCVCompare;
844  defm vmslt : RISCVCompare;
845  defm vmsleu : RISCVCompare;
846  defm vmsle : RISCVCompare;
847  defm vmsgtu : RISCVCompare;
848  defm vmsgt : RISCVCompare;
849
850  defm vminu : RISCVBinaryAAX;
851  defm vmin : RISCVBinaryAAX;
852  defm vmaxu : RISCVBinaryAAX;
853  defm vmax : RISCVBinaryAAX;
854
855  defm vmul : RISCVBinaryAAX;
856  defm vmulh : RISCVBinaryAAX;
857  defm vmulhu : RISCVBinaryAAX;
858  defm vmulhsu : RISCVBinaryAAX;
859
860  defm vdivu : RISCVBinaryAAX;
861  defm vdiv : RISCVBinaryAAX;
862  defm vremu : RISCVBinaryAAX;
863  defm vrem : RISCVBinaryAAX;
864
865  defm vwmul : RISCVBinaryABX;
866  defm vwmulu : RISCVBinaryABX;
867  defm vwmulsu : RISCVBinaryABX;
868
869  defm vmacc : RISCVTernaryAAXA;
870  defm vnmsac : RISCVTernaryAAXA;
871  defm vmadd : RISCVTernaryAAXA;
872  defm vnmsub : RISCVTernaryAAXA;
873
874  defm vwmaccu  : RISCVTernaryWide;
875  defm vwmacc   : RISCVTernaryWide;
876  defm vwmaccus : RISCVTernaryWide;
877  defm vwmaccsu : RISCVTernaryWide;
878
879  defm vfadd : RISCVBinaryAAX;
880  defm vfsub : RISCVBinaryAAX;
881  defm vfrsub : RISCVBinaryAAX;
882
883  defm vfwadd : RISCVBinaryABX;
884  defm vfwsub : RISCVBinaryABX;
885  defm vfwadd_w : RISCVBinaryAAX;
886  defm vfwsub_w : RISCVBinaryAAX;
887
888  defm vsaddu : RISCVSaturatingBinaryAAX;
889  defm vsadd : RISCVSaturatingBinaryAAX;
890  defm vssubu : RISCVSaturatingBinaryAAX;
891  defm vssub : RISCVSaturatingBinaryAAX;
892
893  def int_riscv_vmerge : RISCVBinaryWithV0;
894
895  def int_riscv_vmv_v_v : Intrinsic<[llvm_anyvector_ty],
896                                    [LLVMMatchType<0>, llvm_anyint_ty],
897                                    [IntrNoMem]>, RISCVVIntrinsic;
898  def int_riscv_vmv_v_x : Intrinsic<[llvm_anyint_ty],
899                                    [LLVMVectorElementType<0>, llvm_anyint_ty],
900                                    [IntrNoMem]>, RISCVVIntrinsic {
901    let ExtendOperand = 1;
902  }
903  def int_riscv_vfmv_v_f : Intrinsic<[llvm_anyfloat_ty],
904                                     [LLVMVectorElementType<0>, llvm_anyint_ty],
905                                     [IntrNoMem]>, RISCVVIntrinsic;
906
907  def int_riscv_vmv_x_s : Intrinsic<[LLVMVectorElementType<0>],
908                                    [llvm_anyint_ty],
909                                    [IntrNoMem]>, RISCVVIntrinsic;
910  def int_riscv_vmv_s_x : Intrinsic<[llvm_anyint_ty],
911                                    [LLVMMatchType<0>, LLVMVectorElementType<0>,
912                                     llvm_anyint_ty],
913                                    [IntrNoMem]>, RISCVVIntrinsic {
914    let ExtendOperand = 2;
915  }
916
917  def int_riscv_vfmv_f_s : Intrinsic<[LLVMVectorElementType<0>],
918                                     [llvm_anyfloat_ty],
919                                     [IntrNoMem]>, RISCVVIntrinsic;
920  def int_riscv_vfmv_s_f : Intrinsic<[llvm_anyfloat_ty],
921                                     [LLVMMatchType<0>, LLVMVectorElementType<0>,
922                                      llvm_anyint_ty],
923                                     [IntrNoMem]>, RISCVVIntrinsic;
924
925  defm vfmul : RISCVBinaryAAX;
926  defm vfdiv : RISCVBinaryAAX;
927  defm vfrdiv : RISCVBinaryAAX;
928
929  defm vfwmul : RISCVBinaryABX;
930
931  defm vfmacc : RISCVTernaryAAXA;
932  defm vfnmacc : RISCVTernaryAAXA;
933  defm vfmsac : RISCVTernaryAAXA;
934  defm vfnmsac : RISCVTernaryAAXA;
935  defm vfmadd : RISCVTernaryAAXA;
936  defm vfnmadd : RISCVTernaryAAXA;
937  defm vfmsub : RISCVTernaryAAXA;
938  defm vfnmsub : RISCVTernaryAAXA;
939
940  defm vfwmacc : RISCVTernaryWide;
941  defm vfwnmacc : RISCVTernaryWide;
942  defm vfwmsac : RISCVTernaryWide;
943  defm vfwnmsac : RISCVTernaryWide;
944
945  defm vfsqrt : RISCVUnaryAA;
946  defm vfrsqrt7 : RISCVUnaryAA;
947  defm vfrec7 : RISCVUnaryAA;
948
949  defm vfmin : RISCVBinaryAAX;
950  defm vfmax : RISCVBinaryAAX;
951
952  defm vfsgnj : RISCVBinaryAAX;
953  defm vfsgnjn : RISCVBinaryAAX;
954  defm vfsgnjx : RISCVBinaryAAX;
955
956  defm vfclass : RISCVClassify;
957
958  defm vfmerge : RISCVBinaryWithV0;
959
960  defm vslideup : RISCVTernaryAAAX;
961  defm vslidedown : RISCVTernaryAAAX;
962
963  defm vslide1up : RISCVBinaryAAX;
964  defm vslide1down : RISCVBinaryAAX;
965  defm vfslide1up : RISCVBinaryAAX;
966  defm vfslide1down : RISCVBinaryAAX;
967
968  defm vrgather : RISCVBinaryAAX;
969  defm vrgatherei16 : RISCVBinaryAAX;
970
971  def "int_riscv_vcompress" : RISCVBinaryAAAMask;
972
973  defm vaaddu : RISCVSaturatingBinaryAAX;
974  defm vaadd : RISCVSaturatingBinaryAAX;
975  defm vasubu : RISCVSaturatingBinaryAAX;
976  defm vasub : RISCVSaturatingBinaryAAX;
977
978  defm vsmul : RISCVSaturatingBinaryAAX;
979
980  defm vssrl : RISCVSaturatingBinaryAAX;
981  defm vssra : RISCVSaturatingBinaryAAX;
982
983  defm vnclipu : RISCVSaturatingBinaryABX;
984  defm vnclip : RISCVSaturatingBinaryABX;
985
986  defm vmfeq : RISCVCompare;
987  defm vmfne : RISCVCompare;
988  defm vmflt : RISCVCompare;
989  defm vmfle : RISCVCompare;
990  defm vmfgt : RISCVCompare;
991  defm vmfge : RISCVCompare;
992
993  defm vredsum : RISCVReduction;
994  defm vredand : RISCVReduction;
995  defm vredor : RISCVReduction;
996  defm vredxor : RISCVReduction;
997  defm vredminu : RISCVReduction;
998  defm vredmin : RISCVReduction;
999  defm vredmaxu : RISCVReduction;
1000  defm vredmax : RISCVReduction;
1001
1002  defm vwredsumu : RISCVReduction;
1003  defm vwredsum : RISCVReduction;
1004
1005  defm vfredosum : RISCVReduction;
1006  defm vfredsum : RISCVReduction;
1007  defm vfredmin : RISCVReduction;
1008  defm vfredmax : RISCVReduction;
1009
1010  defm vfwredsum : RISCVReduction;
1011  defm vfwredosum : RISCVReduction;
1012
1013  def int_riscv_vmand: RISCVBinaryAAANoMask;
1014  def int_riscv_vmnand: RISCVBinaryAAANoMask;
1015  def int_riscv_vmandnot: RISCVBinaryAAANoMask;
1016  def int_riscv_vmxor: RISCVBinaryAAANoMask;
1017  def int_riscv_vmor: RISCVBinaryAAANoMask;
1018  def int_riscv_vmnor: RISCVBinaryAAANoMask;
1019  def int_riscv_vmornot: RISCVBinaryAAANoMask;
1020  def int_riscv_vmxnor: RISCVBinaryAAANoMask;
1021  def int_riscv_vmclr : RISCVNullaryIntrinsic;
1022  def int_riscv_vmset : RISCVNullaryIntrinsic;
1023
1024  defm vpopc : RISCVMaskUnarySOut;
1025  defm vfirst : RISCVMaskUnarySOut;
1026  defm vmsbf : RISCVMaskUnaryMOut;
1027  defm vmsof : RISCVMaskUnaryMOut;
1028  defm vmsif : RISCVMaskUnaryMOut;
1029
1030  defm vfcvt_xu_f_v : RISCVConversion;
1031  defm vfcvt_x_f_v : RISCVConversion;
1032  defm vfcvt_rtz_xu_f_v : RISCVConversion;
1033  defm vfcvt_rtz_x_f_v : RISCVConversion;
1034  defm vfcvt_f_xu_v : RISCVConversion;
1035  defm vfcvt_f_x_v : RISCVConversion;
1036
1037  defm vfwcvt_f_xu_v : RISCVConversion;
1038  defm vfwcvt_f_x_v : RISCVConversion;
1039  defm vfwcvt_xu_f_v : RISCVConversion;
1040  defm vfwcvt_x_f_v : RISCVConversion;
1041  defm vfwcvt_rtz_xu_f_v : RISCVConversion;
1042  defm vfwcvt_rtz_x_f_v : RISCVConversion;
1043  defm vfwcvt_f_f_v : RISCVConversion;
1044
1045  defm vfncvt_f_xu_w : RISCVConversion;
1046  defm vfncvt_f_x_w : RISCVConversion;
1047  defm vfncvt_xu_f_w : RISCVConversion;
1048  defm vfncvt_x_f_w : RISCVConversion;
1049  defm vfncvt_rtz_xu_f_w : RISCVConversion;
1050  defm vfncvt_rtz_x_f_w : RISCVConversion;
1051  defm vfncvt_f_f_w : RISCVConversion;
1052  defm vfncvt_rod_f_f_w : RISCVConversion;
1053
1054  // Output: (vector)
1055  // Input: (mask type input, vl)
1056  def int_riscv_viota : Intrinsic<[llvm_anyvector_ty],
1057                                  [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1058                                   llvm_anyint_ty],
1059                                  [IntrNoMem]>, RISCVVIntrinsic;
1060  // Output: (vector)
1061  // Input: (maskedoff, mask type vector_in, mask, vl)
1062  def int_riscv_viota_mask : Intrinsic<[llvm_anyvector_ty],
1063                                       [LLVMMatchType<0>,
1064                                        LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1065                                        LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1066                                        llvm_anyint_ty],
1067                                       [IntrNoMem]>, RISCVVIntrinsic;
1068  // Output: (vector)
1069  // Input: (vl)
1070  def int_riscv_vid : RISCVNullaryIntrinsic;
1071
1072  // Output: (vector)
1073  // Input: (maskedoff, mask, vl)
1074  def int_riscv_vid_mask : Intrinsic<[llvm_anyvector_ty],
1075                                     [LLVMMatchType<0>,
1076                                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1077                                      llvm_anyint_ty],
1078                                     [IntrNoMem]>, RISCVVIntrinsic;
1079
1080  foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
1081    defm vlseg # nf : RISCVUSSegLoad<nf>;
1082    defm vlseg # nf # ff : RISCVUSSegLoadFF<nf>;
1083    defm vlsseg # nf : RISCVSSegLoad<nf>;
1084    defm vloxseg # nf : RISCVISegLoad<nf>;
1085    defm vluxseg # nf : RISCVISegLoad<nf>;
1086    defm vsseg # nf : RISCVUSSegStore<nf>;
1087    defm vssseg # nf : RISCVSSegStore<nf>;
1088    defm vsoxseg # nf : RISCVISegStore<nf>;
1089    defm vsuxseg # nf : RISCVISegStore<nf>;
1090  }
1091
1092} // TargetPrefix = "riscv"
1093