1//===- IntrinsicsRISCV.td - Defines RISCV intrinsics -------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines all of the RISCV-specific intrinsics.
10//
11//===----------------------------------------------------------------------===//
12
13//===----------------------------------------------------------------------===//
14// Atomics
15
16// Atomic Intrinsics have multiple versions for different access widths, which
17// all follow one of the following signatures (depending on how many arguments
18// they require). We carefully instantiate only specific versions of these for
19// specific integer widths, rather than using `llvm_anyint_ty`.
20//
21// In fact, as these intrinsics take `llvm_anyptr_ty`, the given names are the
22// canonical names, and the intrinsics used in the code will have a name
23// suffixed with the pointer type they are specialised for (denoted `<p>` in the
24// names below), in order to avoid type conflicts.
25
26let TargetPrefix = "riscv" in {
27
28  // T @llvm.<name>.T.<p>(any*, T, T, T imm);
29  class MaskedAtomicRMWFourArg<LLVMType itype>
30      : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype],
31                  [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<3>>]>;
32  // T @llvm.<name>.T.<p>(any*, T, T, T, T imm);
33  class MaskedAtomicRMWFiveArg<LLVMType itype>
34      : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype, itype],
35                  [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<4>>]>;
36
37  // We define 32-bit and 64-bit variants of the above, where T stands for i32
38  // or i64 respectively:
39  multiclass MaskedAtomicRMWFourArgIntrinsics {
40    // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32 imm);
41    def _i32 : MaskedAtomicRMWFourArg<llvm_i32_ty>;
42    // i64 @llvm.<name>.i32.<p>(any*, i64, i64, i64 imm);
43    def _i64 : MaskedAtomicRMWFourArg<llvm_i64_ty>;
44  }
45
46  multiclass MaskedAtomicRMWFiveArgIntrinsics {
47    // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32, i32 imm);
48    def _i32 : MaskedAtomicRMWFiveArg<llvm_i32_ty>;
49    // i64 @llvm.<name>.i64.<p>(any*, i64, i64, i64, i64 imm);
50    def _i64 : MaskedAtomicRMWFiveArg<llvm_i64_ty>;
51  }
52
53  // @llvm.riscv.masked.atomicrmw.*.{i32,i64}.<p>(...)
54  defm int_riscv_masked_atomicrmw_xchg : MaskedAtomicRMWFourArgIntrinsics;
55  defm int_riscv_masked_atomicrmw_add : MaskedAtomicRMWFourArgIntrinsics;
56  defm int_riscv_masked_atomicrmw_sub : MaskedAtomicRMWFourArgIntrinsics;
57  defm int_riscv_masked_atomicrmw_nand : MaskedAtomicRMWFourArgIntrinsics;
58  // Signed min and max need an extra operand to do sign extension with.
59  defm int_riscv_masked_atomicrmw_max : MaskedAtomicRMWFiveArgIntrinsics;
60  defm int_riscv_masked_atomicrmw_min : MaskedAtomicRMWFiveArgIntrinsics;
61  // Unsigned min and max don't need the extra operand.
62  defm int_riscv_masked_atomicrmw_umax : MaskedAtomicRMWFourArgIntrinsics;
63  defm int_riscv_masked_atomicrmw_umin : MaskedAtomicRMWFourArgIntrinsics;
64
65  // @llvm.riscv.masked.cmpxchg.{i32,i64}.<p>(...)
66  defm int_riscv_masked_cmpxchg : MaskedAtomicRMWFiveArgIntrinsics;
67
68} // TargetPrefix = "riscv"
69
70//===----------------------------------------------------------------------===//
71// Bitmanip (Bit Manipulation) Extension
72
73let TargetPrefix = "riscv" in {
74
75  class BitManipGPRIntrinsics
76      : Intrinsic<[llvm_any_ty],
77                  [LLVMMatchType<0>],
78                  [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
79  class BitManipGPRGPRIntrinsics
80      : Intrinsic<[llvm_any_ty],
81                  [LLVMMatchType<0>, LLVMMatchType<0>],
82                  [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
83
84  // Zbb
85  def int_riscv_orc_b : BitManipGPRIntrinsics;
86
87  // Zbc
88  def int_riscv_clmul  : BitManipGPRGPRIntrinsics;
89  def int_riscv_clmulh : BitManipGPRGPRIntrinsics;
90  def int_riscv_clmulr : BitManipGPRGPRIntrinsics;
91
92  // Zbe
93  def int_riscv_bcompress   : BitManipGPRGPRIntrinsics;
94  def int_riscv_bdecompress : BitManipGPRGPRIntrinsics;
95
96  // Zbp
97  def int_riscv_grev  : BitManipGPRGPRIntrinsics;
98  def int_riscv_gorc  : BitManipGPRGPRIntrinsics;
99  def int_riscv_shfl  : BitManipGPRGPRIntrinsics;
100  def int_riscv_unshfl  : BitManipGPRGPRIntrinsics;
101  def int_riscv_xperm_n  : BitManipGPRGPRIntrinsics;
102  def int_riscv_xperm_b  : BitManipGPRGPRIntrinsics;
103  def int_riscv_xperm_h  : BitManipGPRGPRIntrinsics;
104  def int_riscv_xperm_w  : BitManipGPRGPRIntrinsics;
105
106  // Zbr
107  def int_riscv_crc32_b : BitManipGPRIntrinsics;
108  def int_riscv_crc32_h : BitManipGPRIntrinsics;
109  def int_riscv_crc32_w : BitManipGPRIntrinsics;
110  def int_riscv_crc32_d : BitManipGPRIntrinsics;
111  def int_riscv_crc32c_b : BitManipGPRIntrinsics;
112  def int_riscv_crc32c_h : BitManipGPRIntrinsics;
113  def int_riscv_crc32c_w : BitManipGPRIntrinsics;
114  def int_riscv_crc32c_d : BitManipGPRIntrinsics;
115} // TargetPrefix = "riscv"
116
117//===----------------------------------------------------------------------===//
118// Vectors
119
120class RISCVVIntrinsic {
121  // These intrinsics may accept illegal integer values in their llvm_any_ty
122  // operand, so they have to be extended. If set to zero then the intrinsic
123  // does not have any operand that must be extended.
124  Intrinsic IntrinsicID = !cast<Intrinsic>(NAME);
125  bits<4> SplatOperand = 0;
126}
127
128let TargetPrefix = "riscv" in {
129  // We use anyint here but we only support XLen.
130  def int_riscv_vsetvli   : Intrinsic<[llvm_anyint_ty],
131                           /* AVL */  [LLVMMatchType<0>,
132                           /* VSEW */  LLVMMatchType<0>,
133                           /* VLMUL */ LLVMMatchType<0>],
134                                      [IntrNoMem, IntrHasSideEffects,
135                                       ImmArg<ArgIndex<1>>,
136                                       ImmArg<ArgIndex<2>>]>;
137  def int_riscv_vsetvlimax : Intrinsic<[llvm_anyint_ty],
138                            /* VSEW */ [LLVMMatchType<0>,
139                            /* VLMUL */ LLVMMatchType<0>],
140                                      [IntrNoMem, IntrHasSideEffects,
141                                       ImmArg<ArgIndex<0>>,
142                                       ImmArg<ArgIndex<1>>]>;
143
144  // For unit stride load
145  // Input: (pointer, vl)
146  class RISCVUSLoad
147        : Intrinsic<[llvm_anyvector_ty],
148                    [LLVMPointerType<LLVMMatchType<0>>,
149                     llvm_anyint_ty],
150                    [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
151  // For unit stride fault-only-first load
152  // Input: (pointer, vl)
153  // Output: (data, vl)
154  // NOTE: We model this with default memory properties since we model writing
155  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
156  class RISCVUSLoadFF
157        : Intrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
158                    [LLVMPointerType<LLVMMatchType<0>>, LLVMMatchType<1>],
159                    [NoCapture<ArgIndex<0>>]>,
160                    RISCVVIntrinsic;
161  // For unit stride load with mask
162  // Input: (maskedoff, pointer, mask, vl)
163  class RISCVUSLoadMask
164        : Intrinsic<[llvm_anyvector_ty ],
165                    [LLVMMatchType<0>,
166                     LLVMPointerType<LLVMMatchType<0>>,
167                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
168                     llvm_anyint_ty],
169                    [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic;
170  // For unit stride fault-only-first load with mask
171  // Input: (maskedoff, pointer, mask, vl)
172  // Output: (data, vl)
173  // NOTE: We model this with default memory properties since we model writing
174  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
175  class RISCVUSLoadFFMask
176        : Intrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
177                    [LLVMMatchType<0>,
178                     LLVMPointerType<LLVMMatchType<0>>,
179                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
180                     LLVMMatchType<1>],
181                    [NoCapture<ArgIndex<1>>]>, RISCVVIntrinsic;
182  // For strided load
183  // Input: (pointer, stride, vl)
184  class RISCVSLoad
185        : Intrinsic<[llvm_anyvector_ty],
186                    [LLVMPointerType<LLVMMatchType<0>>,
187                     llvm_anyint_ty, LLVMMatchType<1>],
188                    [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
189  // For strided load with mask
190  // Input: (maskedoff, pointer, stride, mask, vl)
191  class RISCVSLoadMask
192        : Intrinsic<[llvm_anyvector_ty ],
193                    [LLVMMatchType<0>,
194                     LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty,
195                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
196                    [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic;
197  // For indexed load
198  // Input: (pointer, index, vl)
199  class RISCVILoad
200        : Intrinsic<[llvm_anyvector_ty],
201                    [LLVMPointerType<LLVMMatchType<0>>,
202                     llvm_anyvector_ty, llvm_anyint_ty],
203                    [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
204  // For indexed load with mask
205  // Input: (maskedoff, pointer, index, mask, vl)
206  class RISCVILoadMask
207        : Intrinsic<[llvm_anyvector_ty ],
208                    [LLVMMatchType<0>,
209                     LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty,
210                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
211                    [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic;
212  // For unit stride store
213  // Input: (vector_in, pointer, vl)
214  class RISCVUSStore
215        : Intrinsic<[],
216                    [llvm_anyvector_ty,
217                     LLVMPointerType<LLVMMatchType<0>>,
218                     llvm_anyint_ty],
219                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
220  // For unit stride store with mask
221  // Input: (vector_in, pointer, mask, vl)
222  class RISCVUSStoreMask
223        : Intrinsic<[],
224                    [llvm_anyvector_ty,
225                     LLVMPointerType<LLVMMatchType<0>>,
226                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
227                     llvm_anyint_ty],
228                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
229  // For strided store
230  // Input: (vector_in, pointer, stride, vl)
231  class RISCVSStore
232        : Intrinsic<[],
233                    [llvm_anyvector_ty,
234                     LLVMPointerType<LLVMMatchType<0>>,
235                     llvm_anyint_ty, LLVMMatchType<1>],
236                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
237  // For stride store with mask
238  // Input: (vector_in, pointer, stirde, mask, vl)
239  class RISCVSStoreMask
240        : Intrinsic<[],
241                    [llvm_anyvector_ty,
242                     LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty,
243                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
244                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
245  // For indexed store
246  // Input: (vector_in, pointer, index, vl)
247  class RISCVIStore
248        : Intrinsic<[],
249                    [llvm_anyvector_ty,
250                     LLVMPointerType<LLVMMatchType<0>>,
251                     llvm_anyint_ty, llvm_anyint_ty],
252                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
253  // For indexed store with mask
254  // Input: (vector_in, pointer, index, mask, vl)
255  class RISCVIStoreMask
256        : Intrinsic<[],
257                    [llvm_anyvector_ty,
258                     LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty,
259                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
260                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
261  // For destination vector type is the same as source vector.
262  // Input: (vector_in, vl)
263  class RISCVUnaryAANoMask
264        : Intrinsic<[llvm_anyvector_ty],
265                    [LLVMMatchType<0>, llvm_anyint_ty],
266                    [IntrNoMem]>, RISCVVIntrinsic;
267  // For destination vector type is the same as first source vector (with mask).
268  // Input: (vector_in, mask, vl)
269  class RISCVUnaryAAMask
270        : Intrinsic<[llvm_anyvector_ty],
271                    [LLVMMatchType<0>, LLVMMatchType<0>,
272                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
273                    [IntrNoMem]>, RISCVVIntrinsic;
274  // For destination vector type is the same as first and second source vector.
275  // Input: (vector_in, vector_in, vl)
276  class RISCVBinaryAAANoMask
277        : Intrinsic<[llvm_anyvector_ty],
278                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
279                    [IntrNoMem]>, RISCVVIntrinsic;
280  // For destination vector type is the same as first and second source vector.
281  // Input: (vector_in, int_vector_in, vl)
282  class RISCVRGatherVVNoMask
283        : Intrinsic<[llvm_anyvector_ty],
284                    [LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty],
285                    [IntrNoMem]>, RISCVVIntrinsic;
286  // For destination vector type is the same as first and second source vector.
287  // Input: (vector_in, vector_in, int_vector_in, vl)
288  class RISCVRGatherVVMask
289        : Intrinsic<[llvm_anyvector_ty],
290                    [LLVMMatchType<0>, LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>,
291                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
292                    [IntrNoMem]>, RISCVVIntrinsic;
293  // Input: (vector_in, int16_vector_in, vl)
294  class RISCVRGatherEI16VVNoMask
295        : Intrinsic<[llvm_anyvector_ty],
296                    [LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
297                     llvm_anyint_ty],
298                    [IntrNoMem]>, RISCVVIntrinsic;
299  // For destination vector type is the same as first and second source vector.
300  // Input: (vector_in, vector_in, int16_vector_in, vl)
301  class RISCVRGatherEI16VVMask
302        : Intrinsic<[llvm_anyvector_ty],
303                    [LLVMMatchType<0>, LLVMMatchType<0>,
304                    LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
305                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
306                    [IntrNoMem]>, RISCVVIntrinsic;
307  // For destination vector type is the same as first source vector, and the
308  // second operand is XLen.
309  // Input: (vector_in, xlen_in, vl)
310  class RISCVGatherVXNoMask
311        : Intrinsic<[llvm_anyvector_ty],
312                    [LLVMMatchType<0>, llvm_anyint_ty, LLVMMatchType<1>],
313                    [IntrNoMem]>, RISCVVIntrinsic {
314  }
315  // For destination vector type is the same as first source vector (with mask).
316  // Second operand is XLen.
317  // Input: (maskedoff, vector_in, xlen_in, mask, vl)
318  class RISCVGatherVXMask
319       : Intrinsic<[llvm_anyvector_ty],
320                   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
321                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
322                   [IntrNoMem]>, RISCVVIntrinsic {
323  }
324  // For destination vector type is the same as first source vector.
325  // Input: (vector_in, vector_in/scalar_in, vl)
326  class RISCVBinaryAAXNoMask
327        : Intrinsic<[llvm_anyvector_ty],
328                    [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
329                    [IntrNoMem]>, RISCVVIntrinsic {
330    let SplatOperand = 2;
331  }
332  // For destination vector type is the same as first source vector (with mask).
333  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
334  class RISCVBinaryAAXMask
335       : Intrinsic<[llvm_anyvector_ty],
336                   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
337                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
338                   [IntrNoMem]>, RISCVVIntrinsic {
339    let SplatOperand = 3;
340  }
341  // For destination vector type is the same as first source vector. The
342  // second source operand must match the destination type or be an XLen scalar.
343  // Input: (vector_in, vector_in/scalar_in, vl)
344  class RISCVBinaryAAShiftNoMask
345        : Intrinsic<[llvm_anyvector_ty],
346                    [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
347                    [IntrNoMem]>, RISCVVIntrinsic;
348  // For destination vector type is the same as first source vector (with mask).
349  // The second source operand must match the destination type or be an XLen scalar.
350  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
351  class RISCVBinaryAAShiftMask
352       : Intrinsic<[llvm_anyvector_ty],
353                   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
354                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
355                   [IntrNoMem]>, RISCVVIntrinsic;
356  // For destination vector type is NOT the same as first source vector.
357  // Input: (vector_in, vector_in/scalar_in, vl)
358  class RISCVBinaryABXNoMask
359        : Intrinsic<[llvm_anyvector_ty],
360                    [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
361                    [IntrNoMem]>, RISCVVIntrinsic {
362    let SplatOperand = 2;
363  }
364  // For destination vector type is NOT the same as first source vector (with mask).
365  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
366  class RISCVBinaryABXMask
367        : Intrinsic<[llvm_anyvector_ty],
368                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
369                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
370                    [IntrNoMem]>, RISCVVIntrinsic {
371    let SplatOperand = 3;
372  }
373  // For destination vector type is NOT the same as first source vector. The
374  // second source operand must match the destination type or be an XLen scalar.
375  // Input: (vector_in, vector_in/scalar_in, vl)
376  class RISCVBinaryABShiftNoMask
377        : Intrinsic<[llvm_anyvector_ty],
378                    [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
379                    [IntrNoMem]>, RISCVVIntrinsic;
380  // For destination vector type is NOT the same as first source vector (with mask).
381  // The second source operand must match the destination type or be an XLen scalar.
382  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
383  class RISCVBinaryABShiftMask
384        : Intrinsic<[llvm_anyvector_ty],
385                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
386                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
387                    [IntrNoMem]>, RISCVVIntrinsic;
388  // For binary operations with V0 as input.
389  // Input: (vector_in, vector_in/scalar_in, V0, vl)
390  class RISCVBinaryWithV0
391        : Intrinsic<[llvm_anyvector_ty],
392                    [LLVMMatchType<0>, llvm_any_ty,
393                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
394                     llvm_anyint_ty],
395                    [IntrNoMem]>, RISCVVIntrinsic {
396    let SplatOperand = 2;
397  }
398  // For binary operations with mask type output and V0 as input.
399  // Output: (mask type output)
400  // Input: (vector_in, vector_in/scalar_in, V0, vl)
401  class RISCVBinaryMOutWithV0
402        :Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
403                   [llvm_anyvector_ty, llvm_any_ty,
404                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
405                    llvm_anyint_ty],
406                   [IntrNoMem]>, RISCVVIntrinsic {
407    let SplatOperand = 2;
408  }
409  // For binary operations with mask type output.
410  // Output: (mask type output)
411  // Input: (vector_in, vector_in/scalar_in, vl)
412  class RISCVBinaryMOut
413        : Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
414                    [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
415                    [IntrNoMem]>, RISCVVIntrinsic {
416    let SplatOperand = 2;
417  }
418  // For binary operations with mask type output without mask.
419  // Output: (mask type output)
420  // Input: (vector_in, vector_in/scalar_in, vl)
421  class RISCVCompareNoMask
422        : Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
423                    [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
424                    [IntrNoMem]>, RISCVVIntrinsic {
425    let SplatOperand = 2;
426  }
427  // For binary operations with mask type output with mask.
428  // Output: (mask type output)
429  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
430  class RISCVCompareMask
431        : Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
432                    [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
433                     llvm_anyvector_ty, llvm_any_ty,
434                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
435                    [IntrNoMem]>, RISCVVIntrinsic {
436    let SplatOperand = 3;
437  }
438  // For FP classify operations.
439  // Output: (bit mask type output)
440  // Input: (vector_in, vl)
441  class RISCVClassifyNoMask
442        : Intrinsic<[LLVMVectorOfBitcastsToInt<0>],
443                    [llvm_anyvector_ty, llvm_anyint_ty],
444                    [IntrNoMem]>, RISCVVIntrinsic;
445  // For FP classify operations with mask.
446  // Output: (bit mask type output)
447  // Input: (maskedoff, vector_in, mask, vl)
448  class RISCVClassifyMask
449        : Intrinsic<[LLVMVectorOfBitcastsToInt<0>],
450                    [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty,
451                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
452                    [IntrNoMem]>, RISCVVIntrinsic;
453  // For Saturating binary operations.
454  // The destination vector type is the same as first source vector.
455  // Input: (vector_in, vector_in/scalar_in, vl)
456  class RISCVSaturatingBinaryAAXNoMask
457        : Intrinsic<[llvm_anyvector_ty],
458                    [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
459                    [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
460    let SplatOperand = 2;
461  }
462  // For Saturating binary operations with mask.
463  // The destination vector type is the same as first source vector.
464  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
465  class RISCVSaturatingBinaryAAXMask
466        : Intrinsic<[llvm_anyvector_ty],
467                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
468                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
469                    [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
470    let SplatOperand = 3;
471  }
472  // For Saturating binary operations.
473  // The destination vector type is the same as first source vector.
474  // The second source operand matches the destination type or is an XLen scalar.
475  // Input: (vector_in, vector_in/scalar_in, vl)
476  class RISCVSaturatingBinaryAAShiftNoMask
477        : Intrinsic<[llvm_anyvector_ty],
478                    [LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
479                    [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic;
480  // For Saturating binary operations with mask.
481  // The destination vector type is the same as first source vector.
482  // The second source operand matches the destination type or is an XLen scalar.
483  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
484  class RISCVSaturatingBinaryAAShiftMask
485        : Intrinsic<[llvm_anyvector_ty],
486                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
487                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
488                    [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic;
489  // For Saturating binary operations.
490  // The destination vector type is NOT the same as first source vector.
491  // The second source operand matches the destination type or is an XLen scalar.
492  // Input: (vector_in, vector_in/scalar_in, vl)
493  class RISCVSaturatingBinaryABShiftNoMask
494        : Intrinsic<[llvm_anyvector_ty],
495                    [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
496                    [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic;
497  // For Saturating binary operations with mask.
498  // The destination vector type is NOT the same as first source vector (with mask).
499  // The second source operand matches the destination type or is an XLen scalar.
500  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
501  class RISCVSaturatingBinaryABShiftMask
502        : Intrinsic<[llvm_anyvector_ty],
503                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
504                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
505                    [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic;
506  class RISCVTernaryAAAXNoMask
507        : Intrinsic<[llvm_anyvector_ty],
508                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
509                     LLVMMatchType<1>],
510                    [IntrNoMem]>, RISCVVIntrinsic;
511  class RISCVTernaryAAAXMask
512        : Intrinsic<[llvm_anyvector_ty],
513                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
514                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
515                    [IntrNoMem]>, RISCVVIntrinsic;
516  class RISCVTernaryAAXANoMask
517        : Intrinsic<[llvm_anyvector_ty],
518                    [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
519                     llvm_anyint_ty],
520                    [IntrNoMem]>, RISCVVIntrinsic {
521    let SplatOperand = 2;
522  }
523  class RISCVTernaryAAXAMask
524        : Intrinsic<[llvm_anyvector_ty],
525                    [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
526                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
527                    [IntrNoMem]>, RISCVVIntrinsic {
528    let SplatOperand = 2;
529  }
530  class RISCVTernaryWideNoMask
531        : Intrinsic< [llvm_anyvector_ty],
532                     [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
533                      llvm_anyint_ty],
534                     [IntrNoMem] >, RISCVVIntrinsic {
535    let SplatOperand = 2;
536  }
537  class RISCVTernaryWideMask
538        : Intrinsic< [llvm_anyvector_ty],
539                     [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
540                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
541                     [IntrNoMem]>, RISCVVIntrinsic {
542    let SplatOperand = 2;
543  }
544  // For Reduction ternary operations.
545  // For destination vector type is the same as first and third source vector.
546  // Input: (vector_in, vector_in, vector_in, vl)
547  class RISCVReductionNoMask
548        : Intrinsic<[llvm_anyvector_ty],
549                    [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
550                     llvm_anyint_ty],
551                    [IntrNoMem]>, RISCVVIntrinsic;
552  // For Reduction ternary operations with mask.
553  // For destination vector type is the same as first and third source vector.
554  // The mask type come from second source vector.
555  // Input: (maskedoff, vector_in, vector_in, vector_in, mask, vl)
556  class RISCVReductionMask
557        : Intrinsic<[llvm_anyvector_ty],
558                    [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
559                     LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty],
560                    [IntrNoMem]>, RISCVVIntrinsic;
561  // For unary operations with scalar type output without mask
562  // Output: (scalar type)
563  // Input: (vector_in, vl)
564  class RISCVMaskUnarySOutNoMask
565        : Intrinsic<[LLVMMatchType<1>],
566                    [llvm_anyvector_ty, llvm_anyint_ty],
567                    [IntrNoMem]>, RISCVVIntrinsic;
568  // For unary operations with scalar type output with mask
569  // Output: (scalar type)
570  // Input: (vector_in, mask, vl)
571  class RISCVMaskUnarySOutMask
572        : Intrinsic<[LLVMMatchType<1>],
573                    [llvm_anyvector_ty, LLVMMatchType<0>, llvm_anyint_ty],
574                    [IntrNoMem]>, RISCVVIntrinsic;
575  // For destination vector type is NOT the same as source vector.
576  // Input: (vector_in, vl)
577  class RISCVUnaryABNoMask
578        : Intrinsic<[llvm_anyvector_ty],
579                    [llvm_anyvector_ty, llvm_anyint_ty],
580                    [IntrNoMem]>, RISCVVIntrinsic;
581  // For destination vector type is NOT the same as source vector (with mask).
582  // Input: (maskedoff, vector_in, mask, vl)
583  class RISCVUnaryABMask
584        : Intrinsic<[llvm_anyvector_ty],
585                    [LLVMMatchType<0>, llvm_anyvector_ty,
586                     LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>,
587                     llvm_anyint_ty],
588                    [IntrNoMem]>, RISCVVIntrinsic;
589  // For unary operations with the same vector type in/out without mask
590  // Output: (vector)
591  // Input: (vector_in, vl)
592  class RISCVUnaryNoMask
593        : Intrinsic<[llvm_anyvector_ty],
594                    [LLVMMatchType<0>, llvm_anyint_ty],
595                    [IntrNoMem]>, RISCVVIntrinsic;
596  // For mask unary operations with mask type in/out with mask
597  // Output: (mask type output)
598  // Input: (mask type maskedoff, mask type vector_in, mask, vl)
599  class RISCVMaskUnaryMOutMask
600        : Intrinsic<[llvm_anyint_ty],
601                    [LLVMMatchType<0>, LLVMMatchType<0>,
602                     LLVMMatchType<0>, llvm_anyint_ty],
603                    [IntrNoMem]>, RISCVVIntrinsic;
604  // Output: (vector)
605  // Input: (vl)
606  class RISCVNullaryIntrinsic
607        : Intrinsic<[llvm_anyvector_ty],
608                    [llvm_anyint_ty],
609                    [IntrNoMem]>, RISCVVIntrinsic;
610  // For Conversion unary operations.
611  // Input: (vector_in, vl)
612  class RISCVConversionNoMask
613        : Intrinsic<[llvm_anyvector_ty],
614                    [llvm_anyvector_ty, llvm_anyint_ty],
615                    [IntrNoMem]>, RISCVVIntrinsic;
616  // For Conversion unary operations with mask.
617  // Input: (maskedoff, vector_in, mask, vl)
618  class RISCVConversionMask
619        : Intrinsic<[llvm_anyvector_ty],
620                    [LLVMMatchType<0>, llvm_anyvector_ty,
621                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
622                    [IntrNoMem]>, RISCVVIntrinsic;
623  // For atomic operations without mask
624  // Input: (base, index, value, vl)
625  class RISCVAMONoMask
626        : Intrinsic<[llvm_anyvector_ty],
627                    [LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty, LLVMMatchType<0>,
628                     llvm_anyint_ty],
629                    [NoCapture<ArgIndex<0>>]>, RISCVVIntrinsic;
630  // For atomic operations with mask
631  // Input: (base, index, value, mask, vl)
632  class RISCVAMOMask
633        : Intrinsic<[llvm_anyvector_ty],
634                    [LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty, LLVMMatchType<0>,
635                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
636                    [NoCapture<ArgIndex<0>>]>, RISCVVIntrinsic;
637
638  // For unit stride segment load
639  // Input: (pointer, vl)
640  class RISCVUSSegLoad<int nf>
641        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
642                                !add(nf, -1))),
643                    [LLVMPointerToElt<0>, llvm_anyint_ty],
644                    [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
645  // For unit stride segment load with mask
646  // Input: (maskedoff, pointer, mask, vl)
647  class RISCVUSSegLoadMask<int nf>
648        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
649                                !add(nf, -1))),
650                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
651                                [LLVMPointerToElt<0>,
652                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
653                                 llvm_anyint_ty]),
654                    [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic;
655
656  // For unit stride fault-only-first segment load
657  // Input: (pointer, vl)
658  // Output: (data, vl)
659  // NOTE: We model this with default memory properties since we model writing
660  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
661  class RISCVUSSegLoadFF<int nf>
662        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
663                                !add(nf, -1)), [llvm_anyint_ty]),
664                    [LLVMPointerToElt<0>, LLVMMatchType<1>],
665                    [NoCapture<ArgIndex<0>>]>, RISCVVIntrinsic;
666  // For unit stride fault-only-first segment load with mask
667  // Input: (maskedoff, pointer, mask, vl)
668  // Output: (data, vl)
669  // NOTE: We model this with default memory properties since we model writing
670  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
671  class RISCVUSSegLoadFFMask<int nf>
672        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
673                                !add(nf, -1)), [llvm_anyint_ty]),
674                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
675                     [LLVMPointerToElt<0>,
676                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
677                      LLVMMatchType<1>]),
678                    [NoCapture<ArgIndex<nf>>]>, RISCVVIntrinsic;
679
680  // For stride segment load
681  // Input: (pointer, offset, vl)
682  class RISCVSSegLoad<int nf>
683        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
684                                !add(nf, -1))),
685                    [LLVMPointerToElt<0>, llvm_anyint_ty, LLVMMatchType<1>],
686                    [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
687  // For stride segment load with mask
688  // Input: (maskedoff, pointer, offset, mask, vl)
689  class RISCVSSegLoadMask<int nf>
690        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
691                                !add(nf, -1))),
692                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
693                                [LLVMPointerToElt<0>,
694                                 llvm_anyint_ty,
695                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
696                                 LLVMMatchType<1>]),
697                    [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic;
698
699  // For indexed segment load
700  // Input: (pointer, index, vl)
701  class RISCVISegLoad<int nf>
702        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
703                                !add(nf, -1))),
704                    [LLVMPointerToElt<0>, llvm_anyvector_ty, llvm_anyint_ty],
705                    [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
706  // For indexed segment load with mask
707  // Input: (maskedoff, pointer, index, mask, vl)
708  class RISCVISegLoadMask<int nf>
709        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
710                                !add(nf, -1))),
711                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
712                                [LLVMPointerToElt<0>,
713                                 llvm_anyvector_ty,
714                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
715                                 llvm_anyint_ty]),
716                    [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic;
717
718  // For unit stride segment store
719  // Input: (value, pointer, vl)
720  class RISCVUSSegStore<int nf>
721        : Intrinsic<[],
722                    !listconcat([llvm_anyvector_ty],
723                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
724                                [LLVMPointerToElt<0>, llvm_anyint_ty]),
725                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
726  // For unit stride segment store with mask
727  // Input: (value, pointer, mask, vl)
728  class RISCVUSSegStoreMask<int nf>
729        : Intrinsic<[],
730                    !listconcat([llvm_anyvector_ty],
731                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
732                                [LLVMPointerToElt<0>,
733                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
734                                 llvm_anyint_ty]),
735                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
736
737  // For stride segment store
738  // Input: (value, pointer, offset, vl)
739  class RISCVSSegStore<int nf>
740        : Intrinsic<[],
741                    !listconcat([llvm_anyvector_ty],
742                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
743                                [LLVMPointerToElt<0>, llvm_anyint_ty,
744                                 LLVMMatchType<1>]),
745                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
746  // For stride segment store with mask
747  // Input: (value, pointer, offset, mask, vl)
748  class RISCVSSegStoreMask<int nf>
749        : Intrinsic<[],
750                    !listconcat([llvm_anyvector_ty],
751                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
752                                [LLVMPointerToElt<0>, llvm_anyint_ty,
753                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
754                                 LLVMMatchType<1>]),
755                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
756
757  // For indexed segment store
758  // Input: (value, pointer, offset, vl)
759  class RISCVISegStore<int nf>
760        : Intrinsic<[],
761                    !listconcat([llvm_anyvector_ty],
762                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
763                                [LLVMPointerToElt<0>, llvm_anyvector_ty,
764                                 llvm_anyint_ty]),
765                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
766  // For indexed segment store with mask
767  // Input: (value, pointer, offset, mask, vl)
768  class RISCVISegStoreMask<int nf>
769        : Intrinsic<[],
770                    !listconcat([llvm_anyvector_ty],
771                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
772                                [LLVMPointerToElt<0>, llvm_anyvector_ty,
773                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
774                                 llvm_anyint_ty]),
775                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
776
777  multiclass RISCVUSLoad {
778    def "int_riscv_" # NAME : RISCVUSLoad;
779    def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMask;
780  }
781  multiclass RISCVUSLoadFF {
782    def "int_riscv_" # NAME : RISCVUSLoadFF;
783    def "int_riscv_" # NAME # "_mask" : RISCVUSLoadFFMask;
784  }
785  multiclass RISCVSLoad {
786    def "int_riscv_" # NAME : RISCVSLoad;
787    def "int_riscv_" # NAME # "_mask" : RISCVSLoadMask;
788  }
789  multiclass RISCVILoad {
790    def "int_riscv_" # NAME : RISCVILoad;
791    def "int_riscv_" # NAME # "_mask" : RISCVILoadMask;
792  }
793  multiclass RISCVUSStore {
794    def "int_riscv_" # NAME : RISCVUSStore;
795    def "int_riscv_" # NAME # "_mask" : RISCVUSStoreMask;
796  }
797  multiclass RISCVSStore {
798    def "int_riscv_" # NAME : RISCVSStore;
799    def "int_riscv_" # NAME # "_mask" : RISCVSStoreMask;
800  }
801
802  multiclass RISCVIStore {
803    def "int_riscv_" # NAME : RISCVIStore;
804    def "int_riscv_" # NAME # "_mask" : RISCVIStoreMask;
805  }
806  multiclass RISCVUnaryAA {
807    def "int_riscv_" # NAME : RISCVUnaryAANoMask;
808    def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMask;
809  }
810  multiclass RISCVUnaryAB {
811    def "int_riscv_" # NAME : RISCVUnaryABNoMask;
812    def "int_riscv_" # NAME # "_mask" : RISCVUnaryABMask;
813  }
814  // AAX means the destination type(A) is the same as the first source
815  // type(A). X means any type for the second source operand.
816  multiclass RISCVBinaryAAX {
817    def "int_riscv_" # NAME : RISCVBinaryAAXNoMask;
818    def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMask;
819  }
820  // Like RISCVBinaryAAX, but the second operand is used a shift amount so it
821  // must be a vector or an XLen scalar.
822  multiclass RISCVBinaryAAShift {
823    def "int_riscv_" # NAME : RISCVBinaryAAShiftNoMask;
824    def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAShiftMask;
825  }
826  multiclass RISCVRGatherVV {
827    def "int_riscv_" # NAME : RISCVRGatherVVNoMask;
828    def "int_riscv_" # NAME # "_mask" : RISCVRGatherVVMask;
829  }
830  multiclass RISCVRGatherVX {
831    def "int_riscv_" # NAME : RISCVGatherVXNoMask;
832    def "int_riscv_" # NAME # "_mask" : RISCVGatherVXMask;
833  }
834  multiclass RISCVRGatherEI16VV {
835    def "int_riscv_" # NAME : RISCVRGatherEI16VVNoMask;
836    def "int_riscv_" # NAME # "_mask" : RISCVRGatherEI16VVMask;
837  }
838  // ABX means the destination type(A) is different from the first source
839  // type(B). X means any type for the second source operand.
840  multiclass RISCVBinaryABX {
841    def "int_riscv_" # NAME : RISCVBinaryABXNoMask;
842    def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMask;
843  }
844  // Like RISCVBinaryABX, but the second operand is used a shift amount so it
845  // must be a vector or an XLen scalar.
846  multiclass RISCVBinaryABShift {
847    def "int_riscv_" # NAME : RISCVBinaryABShiftNoMask;
848    def "int_riscv_" # NAME # "_mask" : RISCVBinaryABShiftMask;
849  }
850  multiclass RISCVBinaryWithV0 {
851    def "int_riscv_" # NAME : RISCVBinaryWithV0;
852  }
853  multiclass RISCVBinaryMaskOutWithV0 {
854    def "int_riscv_" # NAME : RISCVBinaryMOutWithV0;
855  }
856  multiclass RISCVBinaryMaskOut {
857    def "int_riscv_" # NAME : RISCVBinaryMOut;
858  }
859  multiclass RISCVSaturatingBinaryAAX {
860    def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXNoMask;
861    def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMask;
862  }
863  multiclass RISCVSaturatingBinaryAAShift {
864    def "int_riscv_" # NAME : RISCVSaturatingBinaryAAShiftNoMask;
865    def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAShiftMask;
866  }
867  multiclass RISCVSaturatingBinaryABShift {
868    def "int_riscv_" # NAME : RISCVSaturatingBinaryABShiftNoMask;
869    def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryABShiftMask;
870  }
871  multiclass RISCVTernaryAAAX {
872    def "int_riscv_" # NAME : RISCVTernaryAAAXNoMask;
873    def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAAXMask;
874  }
875  multiclass RISCVTernaryAAXA {
876    def "int_riscv_" # NAME : RISCVTernaryAAXANoMask;
877    def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAXAMask;
878  }
879  multiclass RISCVCompare {
880    def "int_riscv_" # NAME : RISCVCompareNoMask;
881    def "int_riscv_" # NAME # "_mask" : RISCVCompareMask;
882  }
883  multiclass RISCVClassify {
884    def "int_riscv_" # NAME : RISCVClassifyNoMask;
885    def "int_riscv_" # NAME # "_mask" : RISCVClassifyMask;
886  }
887  multiclass RISCVTernaryWide {
888    def "int_riscv_" # NAME : RISCVTernaryWideNoMask;
889    def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMask;
890  }
891  multiclass RISCVReduction {
892    def "int_riscv_" # NAME : RISCVReductionNoMask;
893    def "int_riscv_" # NAME # "_mask" : RISCVReductionMask;
894  }
895  multiclass RISCVMaskUnarySOut {
896    def "int_riscv_" # NAME : RISCVMaskUnarySOutNoMask;
897    def "int_riscv_" # NAME # "_mask" : RISCVMaskUnarySOutMask;
898  }
899  multiclass RISCVMaskUnaryMOut {
900    def "int_riscv_" # NAME : RISCVUnaryNoMask;
901    def "int_riscv_" # NAME # "_mask" : RISCVMaskUnaryMOutMask;
902  }
903  multiclass RISCVConversion {
904    def "int_riscv_" #NAME :RISCVConversionNoMask;
905    def "int_riscv_" # NAME # "_mask" : RISCVConversionMask;
906  }
907  multiclass RISCVAMO {
908    def "int_riscv_" # NAME : RISCVAMONoMask;
909    def "int_riscv_" # NAME # "_mask" : RISCVAMOMask;
910  }
911  multiclass RISCVUSSegLoad<int nf> {
912    def "int_riscv_" # NAME : RISCVUSSegLoad<nf>;
913    def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadMask<nf>;
914  }
915  multiclass RISCVUSSegLoadFF<int nf> {
916    def "int_riscv_" # NAME : RISCVUSSegLoadFF<nf>;
917    def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadFFMask<nf>;
918  }
919  multiclass RISCVSSegLoad<int nf> {
920    def "int_riscv_" # NAME : RISCVSSegLoad<nf>;
921    def "int_riscv_" # NAME # "_mask" : RISCVSSegLoadMask<nf>;
922  }
923  multiclass RISCVISegLoad<int nf> {
924    def "int_riscv_" # NAME : RISCVISegLoad<nf>;
925    def "int_riscv_" # NAME # "_mask" : RISCVISegLoadMask<nf>;
926  }
927  multiclass RISCVUSSegStore<int nf> {
928    def "int_riscv_" # NAME : RISCVUSSegStore<nf>;
929    def "int_riscv_" # NAME # "_mask" : RISCVUSSegStoreMask<nf>;
930  }
931  multiclass RISCVSSegStore<int nf> {
932    def "int_riscv_" # NAME : RISCVSSegStore<nf>;
933    def "int_riscv_" # NAME # "_mask" : RISCVSSegStoreMask<nf>;
934  }
935  multiclass RISCVISegStore<int nf> {
936    def "int_riscv_" # NAME : RISCVISegStore<nf>;
937    def "int_riscv_" # NAME # "_mask" : RISCVISegStoreMask<nf>;
938  }
939
940  defm vle : RISCVUSLoad;
941  defm vleff : RISCVUSLoadFF;
942  defm vse : RISCVUSStore;
943  defm vlse: RISCVSLoad;
944  defm vsse: RISCVSStore;
945  defm vluxei : RISCVILoad;
946  defm vloxei : RISCVILoad;
947  defm vsoxei : RISCVIStore;
948  defm vsuxei : RISCVIStore;
949
950  def int_riscv_vle1 : RISCVUSLoad;
951  def int_riscv_vse1 : RISCVUSStore;
952
953  defm vamoswap : RISCVAMO;
954  defm vamoadd : RISCVAMO;
955  defm vamoxor : RISCVAMO;
956  defm vamoand : RISCVAMO;
957  defm vamoor : RISCVAMO;
958  defm vamomin : RISCVAMO;
959  defm vamomax : RISCVAMO;
960  defm vamominu : RISCVAMO;
961  defm vamomaxu : RISCVAMO;
962
963  defm vadd : RISCVBinaryAAX;
964  defm vsub : RISCVBinaryAAX;
965  defm vrsub : RISCVBinaryAAX;
966
967  defm vwaddu : RISCVBinaryABX;
968  defm vwadd : RISCVBinaryABX;
969  defm vwaddu_w : RISCVBinaryAAX;
970  defm vwadd_w : RISCVBinaryAAX;
971  defm vwsubu : RISCVBinaryABX;
972  defm vwsub : RISCVBinaryABX;
973  defm vwsubu_w : RISCVBinaryAAX;
974  defm vwsub_w : RISCVBinaryAAX;
975
976  defm vzext : RISCVUnaryAB;
977  defm vsext : RISCVUnaryAB;
978
979  defm vadc : RISCVBinaryWithV0;
980  defm vmadc_carry_in : RISCVBinaryMaskOutWithV0;
981  defm vmadc : RISCVBinaryMaskOut;
982
983  defm vsbc : RISCVBinaryWithV0;
984  defm vmsbc_borrow_in : RISCVBinaryMaskOutWithV0;
985  defm vmsbc : RISCVBinaryMaskOut;
986
987  defm vand : RISCVBinaryAAX;
988  defm vor : RISCVBinaryAAX;
989  defm vxor : RISCVBinaryAAX;
990
991  defm vsll : RISCVBinaryAAShift;
992  defm vsrl : RISCVBinaryAAShift;
993  defm vsra : RISCVBinaryAAShift;
994
995  defm vnsrl : RISCVBinaryABShift;
996  defm vnsra : RISCVBinaryABShift;
997
998  defm vmseq : RISCVCompare;
999  defm vmsne : RISCVCompare;
1000  defm vmsltu : RISCVCompare;
1001  defm vmslt : RISCVCompare;
1002  defm vmsleu : RISCVCompare;
1003  defm vmsle : RISCVCompare;
1004  defm vmsgtu : RISCVCompare;
1005  defm vmsgt : RISCVCompare;
1006  defm vmsgeu : RISCVCompare;
1007  defm vmsge : RISCVCompare;
1008
1009  defm vminu : RISCVBinaryAAX;
1010  defm vmin : RISCVBinaryAAX;
1011  defm vmaxu : RISCVBinaryAAX;
1012  defm vmax : RISCVBinaryAAX;
1013
1014  defm vmul : RISCVBinaryAAX;
1015  defm vmulh : RISCVBinaryAAX;
1016  defm vmulhu : RISCVBinaryAAX;
1017  defm vmulhsu : RISCVBinaryAAX;
1018
1019  defm vdivu : RISCVBinaryAAX;
1020  defm vdiv : RISCVBinaryAAX;
1021  defm vremu : RISCVBinaryAAX;
1022  defm vrem : RISCVBinaryAAX;
1023
1024  defm vwmul : RISCVBinaryABX;
1025  defm vwmulu : RISCVBinaryABX;
1026  defm vwmulsu : RISCVBinaryABX;
1027
1028  defm vmacc : RISCVTernaryAAXA;
1029  defm vnmsac : RISCVTernaryAAXA;
1030  defm vmadd : RISCVTernaryAAXA;
1031  defm vnmsub : RISCVTernaryAAXA;
1032
1033  defm vwmaccu  : RISCVTernaryWide;
1034  defm vwmacc   : RISCVTernaryWide;
1035  defm vwmaccus : RISCVTernaryWide;
1036  defm vwmaccsu : RISCVTernaryWide;
1037
1038  defm vfadd : RISCVBinaryAAX;
1039  defm vfsub : RISCVBinaryAAX;
1040  defm vfrsub : RISCVBinaryAAX;
1041
1042  defm vfwadd : RISCVBinaryABX;
1043  defm vfwsub : RISCVBinaryABX;
1044  defm vfwadd_w : RISCVBinaryAAX;
1045  defm vfwsub_w : RISCVBinaryAAX;
1046
1047  defm vsaddu : RISCVSaturatingBinaryAAX;
1048  defm vsadd : RISCVSaturatingBinaryAAX;
1049  defm vssubu : RISCVSaturatingBinaryAAX;
1050  defm vssub : RISCVSaturatingBinaryAAX;
1051
1052  def int_riscv_vmerge : RISCVBinaryWithV0;
1053
1054  def int_riscv_vmv_v_v : Intrinsic<[llvm_anyvector_ty],
1055                                    [LLVMMatchType<0>, llvm_anyint_ty],
1056                                    [IntrNoMem]>, RISCVVIntrinsic;
1057  def int_riscv_vmv_v_x : Intrinsic<[llvm_anyint_ty],
1058                                    [LLVMVectorElementType<0>, llvm_anyint_ty],
1059                                    [IntrNoMem]>, RISCVVIntrinsic;
1060  def int_riscv_vfmv_v_f : Intrinsic<[llvm_anyfloat_ty],
1061                                     [LLVMVectorElementType<0>, llvm_anyint_ty],
1062                                     [IntrNoMem]>, RISCVVIntrinsic;
1063
1064  def int_riscv_vmv_x_s : Intrinsic<[LLVMVectorElementType<0>],
1065                                    [llvm_anyint_ty],
1066                                    [IntrNoMem]>, RISCVVIntrinsic;
1067  def int_riscv_vmv_s_x : Intrinsic<[llvm_anyint_ty],
1068                                    [LLVMMatchType<0>, LLVMVectorElementType<0>,
1069                                     llvm_anyint_ty],
1070                                    [IntrNoMem]>, RISCVVIntrinsic;
1071
1072  def int_riscv_vfmv_f_s : Intrinsic<[LLVMVectorElementType<0>],
1073                                     [llvm_anyfloat_ty],
1074                                     [IntrNoMem]>, RISCVVIntrinsic;
1075  def int_riscv_vfmv_s_f : Intrinsic<[llvm_anyfloat_ty],
1076                                     [LLVMMatchType<0>, LLVMVectorElementType<0>,
1077                                      llvm_anyint_ty],
1078                                     [IntrNoMem]>, RISCVVIntrinsic;
1079
1080  defm vfmul : RISCVBinaryAAX;
1081  defm vfdiv : RISCVBinaryAAX;
1082  defm vfrdiv : RISCVBinaryAAX;
1083
1084  defm vfwmul : RISCVBinaryABX;
1085
1086  defm vfmacc : RISCVTernaryAAXA;
1087  defm vfnmacc : RISCVTernaryAAXA;
1088  defm vfmsac : RISCVTernaryAAXA;
1089  defm vfnmsac : RISCVTernaryAAXA;
1090  defm vfmadd : RISCVTernaryAAXA;
1091  defm vfnmadd : RISCVTernaryAAXA;
1092  defm vfmsub : RISCVTernaryAAXA;
1093  defm vfnmsub : RISCVTernaryAAXA;
1094
1095  defm vfwmacc : RISCVTernaryWide;
1096  defm vfwnmacc : RISCVTernaryWide;
1097  defm vfwmsac : RISCVTernaryWide;
1098  defm vfwnmsac : RISCVTernaryWide;
1099
1100  defm vfsqrt : RISCVUnaryAA;
1101  defm vfrsqrt7 : RISCVUnaryAA;
1102  defm vfrec7 : RISCVUnaryAA;
1103
1104  defm vfmin : RISCVBinaryAAX;
1105  defm vfmax : RISCVBinaryAAX;
1106
1107  defm vfsgnj : RISCVBinaryAAX;
1108  defm vfsgnjn : RISCVBinaryAAX;
1109  defm vfsgnjx : RISCVBinaryAAX;
1110
1111  defm vfclass : RISCVClassify;
1112
1113  defm vfmerge : RISCVBinaryWithV0;
1114
1115  defm vslideup : RISCVTernaryAAAX;
1116  defm vslidedown : RISCVTernaryAAAX;
1117
1118  defm vslide1up : RISCVBinaryAAX;
1119  defm vslide1down : RISCVBinaryAAX;
1120  defm vfslide1up : RISCVBinaryAAX;
1121  defm vfslide1down : RISCVBinaryAAX;
1122
1123  defm vrgather_vv : RISCVRGatherVV;
1124  defm vrgather_vx : RISCVRGatherVX;
1125  defm vrgatherei16_vv : RISCVRGatherEI16VV;
1126
1127  def "int_riscv_vcompress" : RISCVUnaryAAMask;
1128
1129  defm vaaddu : RISCVSaturatingBinaryAAX;
1130  defm vaadd : RISCVSaturatingBinaryAAX;
1131  defm vasubu : RISCVSaturatingBinaryAAX;
1132  defm vasub : RISCVSaturatingBinaryAAX;
1133
1134  defm vsmul : RISCVSaturatingBinaryAAX;
1135
1136  defm vssrl : RISCVSaturatingBinaryAAShift;
1137  defm vssra : RISCVSaturatingBinaryAAShift;
1138
1139  defm vnclipu : RISCVSaturatingBinaryABShift;
1140  defm vnclip : RISCVSaturatingBinaryABShift;
1141
1142  defm vmfeq : RISCVCompare;
1143  defm vmfne : RISCVCompare;
1144  defm vmflt : RISCVCompare;
1145  defm vmfle : RISCVCompare;
1146  defm vmfgt : RISCVCompare;
1147  defm vmfge : RISCVCompare;
1148
1149  defm vredsum : RISCVReduction;
1150  defm vredand : RISCVReduction;
1151  defm vredor : RISCVReduction;
1152  defm vredxor : RISCVReduction;
1153  defm vredminu : RISCVReduction;
1154  defm vredmin : RISCVReduction;
1155  defm vredmaxu : RISCVReduction;
1156  defm vredmax : RISCVReduction;
1157
1158  defm vwredsumu : RISCVReduction;
1159  defm vwredsum : RISCVReduction;
1160
1161  defm vfredosum : RISCVReduction;
1162  defm vfredsum : RISCVReduction;
1163  defm vfredmin : RISCVReduction;
1164  defm vfredmax : RISCVReduction;
1165
1166  defm vfwredsum : RISCVReduction;
1167  defm vfwredosum : RISCVReduction;
1168
1169  def int_riscv_vmand: RISCVBinaryAAANoMask;
1170  def int_riscv_vmnand: RISCVBinaryAAANoMask;
1171  def int_riscv_vmandnot: RISCVBinaryAAANoMask;
1172  def int_riscv_vmxor: RISCVBinaryAAANoMask;
1173  def int_riscv_vmor: RISCVBinaryAAANoMask;
1174  def int_riscv_vmnor: RISCVBinaryAAANoMask;
1175  def int_riscv_vmornot: RISCVBinaryAAANoMask;
1176  def int_riscv_vmxnor: RISCVBinaryAAANoMask;
1177  def int_riscv_vmclr : RISCVNullaryIntrinsic;
1178  def int_riscv_vmset : RISCVNullaryIntrinsic;
1179
1180  defm vpopc : RISCVMaskUnarySOut;
1181  defm vfirst : RISCVMaskUnarySOut;
1182  defm vmsbf : RISCVMaskUnaryMOut;
1183  defm vmsof : RISCVMaskUnaryMOut;
1184  defm vmsif : RISCVMaskUnaryMOut;
1185
1186  defm vfcvt_xu_f_v : RISCVConversion;
1187  defm vfcvt_x_f_v : RISCVConversion;
1188  defm vfcvt_rtz_xu_f_v : RISCVConversion;
1189  defm vfcvt_rtz_x_f_v : RISCVConversion;
1190  defm vfcvt_f_xu_v : RISCVConversion;
1191  defm vfcvt_f_x_v : RISCVConversion;
1192
1193  defm vfwcvt_f_xu_v : RISCVConversion;
1194  defm vfwcvt_f_x_v : RISCVConversion;
1195  defm vfwcvt_xu_f_v : RISCVConversion;
1196  defm vfwcvt_x_f_v : RISCVConversion;
1197  defm vfwcvt_rtz_xu_f_v : RISCVConversion;
1198  defm vfwcvt_rtz_x_f_v : RISCVConversion;
1199  defm vfwcvt_f_f_v : RISCVConversion;
1200
1201  defm vfncvt_f_xu_w : RISCVConversion;
1202  defm vfncvt_f_x_w : RISCVConversion;
1203  defm vfncvt_xu_f_w : RISCVConversion;
1204  defm vfncvt_x_f_w : RISCVConversion;
1205  defm vfncvt_rtz_xu_f_w : RISCVConversion;
1206  defm vfncvt_rtz_x_f_w : RISCVConversion;
1207  defm vfncvt_f_f_w : RISCVConversion;
1208  defm vfncvt_rod_f_f_w : RISCVConversion;
1209
1210  // Output: (vector)
1211  // Input: (mask type input, vl)
1212  def int_riscv_viota : Intrinsic<[llvm_anyvector_ty],
1213                                  [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1214                                   llvm_anyint_ty],
1215                                  [IntrNoMem]>, RISCVVIntrinsic;
1216  // Output: (vector)
1217  // Input: (maskedoff, mask type vector_in, mask, vl)
1218  def int_riscv_viota_mask : Intrinsic<[llvm_anyvector_ty],
1219                                       [LLVMMatchType<0>,
1220                                        LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1221                                        LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1222                                        llvm_anyint_ty],
1223                                       [IntrNoMem]>, RISCVVIntrinsic;
1224  // Output: (vector)
1225  // Input: (vl)
1226  def int_riscv_vid : RISCVNullaryIntrinsic;
1227
1228  // Output: (vector)
1229  // Input: (maskedoff, mask, vl)
1230  def int_riscv_vid_mask : Intrinsic<[llvm_anyvector_ty],
1231                                     [LLVMMatchType<0>,
1232                                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1233                                      llvm_anyint_ty],
1234                                     [IntrNoMem]>, RISCVVIntrinsic;
1235
1236  foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
1237    defm vlseg # nf : RISCVUSSegLoad<nf>;
1238    defm vlseg # nf # ff : RISCVUSSegLoadFF<nf>;
1239    defm vlsseg # nf : RISCVSSegLoad<nf>;
1240    defm vloxseg # nf : RISCVISegLoad<nf>;
1241    defm vluxseg # nf : RISCVISegLoad<nf>;
1242    defm vsseg # nf : RISCVUSSegStore<nf>;
1243    defm vssseg # nf : RISCVSSegStore<nf>;
1244    defm vsoxseg # nf : RISCVISegStore<nf>;
1245    defm vsuxseg # nf : RISCVISegStore<nf>;
1246  }
1247
1248} // TargetPrefix = "riscv"
1249