1//===- IntrinsicsRISCV.td - Defines RISCV intrinsics -------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines all of the RISCV-specific intrinsics.
10//
11//===----------------------------------------------------------------------===//
12
13//===----------------------------------------------------------------------===//
14// Atomics
15
16// Atomic Intrinsics have multiple versions for different access widths, which
17// all follow one of the following signatures (depending on how many arguments
18// they require). We carefully instantiate only specific versions of these for
19// specific integer widths, rather than using `llvm_anyint_ty`.
20//
21// In fact, as these intrinsics take `llvm_anyptr_ty`, the given names are the
22// canonical names, and the intrinsics used in the code will have a name
23// suffixed with the pointer type they are specialised for (denoted `<p>` in the
24// names below), in order to avoid type conflicts.
25
26let TargetPrefix = "riscv" in {
27
28  // T @llvm.<name>.T.<p>(any*, T, T, T imm);
29  class MaskedAtomicRMWFourArg<LLVMType itype>
30      : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype],
31                  [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<3>>]>;
32  // T @llvm.<name>.T.<p>(any*, T, T, T, T imm);
33  class MaskedAtomicRMWFiveArg<LLVMType itype>
34      : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype, itype],
35                  [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<4>>]>;
36
37  // We define 32-bit and 64-bit variants of the above, where T stands for i32
38  // or i64 respectively:
39  multiclass MaskedAtomicRMWFourArgIntrinsics {
40    // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32 imm);
41    def _i32 : MaskedAtomicRMWFourArg<llvm_i32_ty>;
42    // i64 @llvm.<name>.i32.<p>(any*, i64, i64, i64 imm);
43    def _i64 : MaskedAtomicRMWFourArg<llvm_i64_ty>;
44  }
45
46  multiclass MaskedAtomicRMWFiveArgIntrinsics {
47    // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32, i32 imm);
48    def _i32 : MaskedAtomicRMWFiveArg<llvm_i32_ty>;
49    // i64 @llvm.<name>.i64.<p>(any*, i64, i64, i64, i64 imm);
50    def _i64 : MaskedAtomicRMWFiveArg<llvm_i64_ty>;
51  }
52
53  // These intrinsics are intended only for internal compiler use (i.e. as
54  // part of AtomicExpandpass via the emitMaskedAtomic*Intrinsic hooks). Their
55  // names and semantics could change in the future.
56
57  // @llvm.riscv.masked.atomicrmw.*.{i32,i64}.<p>(
58  //   ptr addr, ixlen oparg, ixlen mask, ixlenimm ordering)
59  defm int_riscv_masked_atomicrmw_xchg : MaskedAtomicRMWFourArgIntrinsics;
60  defm int_riscv_masked_atomicrmw_add : MaskedAtomicRMWFourArgIntrinsics;
61  defm int_riscv_masked_atomicrmw_sub : MaskedAtomicRMWFourArgIntrinsics;
62  defm int_riscv_masked_atomicrmw_nand : MaskedAtomicRMWFourArgIntrinsics;
63  defm int_riscv_masked_atomicrmw_umax : MaskedAtomicRMWFourArgIntrinsics;
64  defm int_riscv_masked_atomicrmw_umin : MaskedAtomicRMWFourArgIntrinsics;
65  // Signed min and max need an extra operand to do sign extension with.
66  // @llvm.riscv.masked.atomicrmw.{max,min}.{i32,i64}.<p>(
67  //   ptr addr, ixlen oparg, ixlen mask, ixlen shamt, ixlenimm ordering)
68  defm int_riscv_masked_atomicrmw_max : MaskedAtomicRMWFiveArgIntrinsics;
69  defm int_riscv_masked_atomicrmw_min : MaskedAtomicRMWFiveArgIntrinsics;
70
71  // @llvm.riscv.masked.cmpxchg.{i32,i64}.<p>(
72  //   ptr addr, ixlen cmpval, ixlen newval, ixlen mask, ixlenimm ordering)
73  defm int_riscv_masked_cmpxchg : MaskedAtomicRMWFiveArgIntrinsics;
74
75} // TargetPrefix = "riscv"
76
77//===----------------------------------------------------------------------===//
78// Bitmanip (Bit Manipulation) Extension
79
80let TargetPrefix = "riscv" in {
81
82  class BitManipGPRIntrinsics
83      : DefaultAttrsIntrinsic<[llvm_any_ty],
84                              [LLVMMatchType<0>],
85                              [IntrNoMem, IntrSpeculatable]>;
86  class BitManipGPRGPRIntrinsics
87      : DefaultAttrsIntrinsic<[llvm_any_ty],
88                              [LLVMMatchType<0>, LLVMMatchType<0>],
89                              [IntrNoMem, IntrSpeculatable]>;
90
91  // Zbb
92  def int_riscv_orc_b : BitManipGPRIntrinsics;
93
94  // Zbc or Zbkc
95  def int_riscv_clmul  : BitManipGPRGPRIntrinsics;
96  def int_riscv_clmulh : BitManipGPRGPRIntrinsics;
97
98  // Zbc
99  def int_riscv_clmulr : BitManipGPRGPRIntrinsics;
100
101  // Zbkb
102  def int_riscv_brev8 : BitManipGPRIntrinsics;
103  def int_riscv_zip   : BitManipGPRIntrinsics;
104  def int_riscv_unzip : BitManipGPRIntrinsics;
105
106  // Zbkx
107  def int_riscv_xperm4  : BitManipGPRGPRIntrinsics;
108  def int_riscv_xperm8  : BitManipGPRGPRIntrinsics;
109} // TargetPrefix = "riscv"
110
111//===----------------------------------------------------------------------===//
112// Vectors
113
114// The intrinsic does not have any operand that must be extended.
115defvar NoScalarOperand = 0xF;
116
117// The intrinsic does not have a VL operand.
118// (e.g., riscv_vmv_x_s and riscv_vfmv_f_s)
119defvar NoVLOperand = 0x1F;
120
121class RISCVVIntrinsic {
122  // These intrinsics may accept illegal integer values in their llvm_any_ty
123  // operand, so they have to be extended.
124  Intrinsic IntrinsicID = !cast<Intrinsic>(NAME);
125  bits<4> ScalarOperand = NoScalarOperand;
126  bits<5> VLOperand = NoVLOperand;
127}
128
129let TargetPrefix = "riscv" in {
130  // We use anyint here but we only support XLen.
131  def int_riscv_vsetvli   : Intrinsic<[llvm_anyint_ty],
132                           /* AVL */  [LLVMMatchType<0>,
133                           /* VSEW */  LLVMMatchType<0>,
134                           /* VLMUL */ LLVMMatchType<0>],
135                                      [IntrNoMem,
136                                       ImmArg<ArgIndex<1>>,
137                                       ImmArg<ArgIndex<2>>]>;
138  def int_riscv_vsetvlimax : Intrinsic<[llvm_anyint_ty],
139                            /* VSEW */ [LLVMMatchType<0>,
140                            /* VLMUL */ LLVMMatchType<0>],
141                                      [IntrNoMem,
142                                       ImmArg<ArgIndex<0>>,
143                                       ImmArg<ArgIndex<1>>]>;
144
145  // For unit stride mask load
146  // Input: (pointer, vl)
147  class RISCVUSMLoad
148        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
149                    [llvm_ptr_ty, llvm_anyint_ty],
150                    [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic {
151    let VLOperand = 1;
152  }
153  // For unit stride load
154  // Input: (passthru, pointer, vl)
155  class RISCVUSLoad
156        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
157                    [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyint_ty],
158                    [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
159    let VLOperand = 2;
160  }
161  // For unit stride fault-only-first load
162  // Input: (passthru, pointer, vl)
163  // Output: (data, vl)
164  // NOTE: We model this with default memory properties since we model writing
165  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
166  class RISCVUSLoadFF
167        : DefaultAttrsIntrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
168                    [LLVMMatchType<0>, llvm_ptr_ty, LLVMMatchType<1>],
169                    [NoCapture<ArgIndex<1>>]>,
170                    RISCVVIntrinsic {
171    let VLOperand = 2;
172  }
173  // For unit stride load with mask
174  // Input: (maskedoff, pointer, mask, vl, policy)
175  class RISCVUSLoadMasked
176        : DefaultAttrsIntrinsic<[llvm_anyvector_ty ],
177                    [LLVMMatchType<0>, llvm_ptr_ty,
178                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
179                     llvm_anyint_ty, LLVMMatchType<1>],
180                    [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>, IntrReadMem]>,
181                    RISCVVIntrinsic {
182    let VLOperand = 3;
183  }
184  // For unit stride fault-only-first load with mask
185  // Input: (maskedoff, pointer, mask, vl, policy)
186  // Output: (data, vl)
187  // NOTE: We model this with default memory properties since we model writing
188  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
189  class RISCVUSLoadFFMasked
190        : DefaultAttrsIntrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
191                    [LLVMMatchType<0>, llvm_ptr_ty,
192                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
193                     LLVMMatchType<1>, LLVMMatchType<1>],
194                    [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic {
195    let VLOperand = 3;
196  }
197  // For strided load with passthru operand
198  // Input: (passthru, pointer, stride, vl)
199  class RISCVSLoad
200        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
201                    [LLVMMatchType<0>, llvm_ptr_ty,
202                     llvm_anyint_ty, LLVMMatchType<1>],
203                    [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
204    let VLOperand = 3;
205  }
206  // For strided load with mask
207  // Input: (maskedoff, pointer, stride, mask, vl, policy)
208  class RISCVSLoadMasked
209        : DefaultAttrsIntrinsic<[llvm_anyvector_ty ],
210                    [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyint_ty,
211                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>,
212                     LLVMMatchType<1>],
213                    [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>,
214                    RISCVVIntrinsic {
215    let VLOperand = 4;
216  }
217  // For indexed load with passthru operand
218  // Input: (passthru, pointer, index, vl)
219  class RISCVILoad
220        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
221                    [LLVMMatchType<0>, llvm_ptr_ty,
222                     llvm_anyvector_ty, llvm_anyint_ty],
223                    [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
224    let VLOperand = 3;
225  }
226  // For indexed load with mask
227  // Input: (maskedoff, pointer, index, mask, vl, policy)
228  class RISCVILoadMasked
229        : DefaultAttrsIntrinsic<[llvm_anyvector_ty ],
230                    [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyvector_ty,
231                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
232                     LLVMMatchType<2>],
233                    [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>,
234                    RISCVVIntrinsic {
235    let VLOperand = 4;
236  }
237  // For unit stride store
238  // Input: (vector_in, pointer, vl)
239  class RISCVUSStore
240        : DefaultAttrsIntrinsic<[],
241                    [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyint_ty],
242                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
243    let VLOperand = 2;
244  }
245  // For unit stride store with mask
246  // Input: (vector_in, pointer, mask, vl)
247  class RISCVUSStoreMasked
248        : DefaultAttrsIntrinsic<[],
249                    [llvm_anyvector_ty, llvm_ptr_ty,
250                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
251                     llvm_anyint_ty],
252                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
253    let VLOperand = 3;
254  }
255  // For strided store
256  // Input: (vector_in, pointer, stride, vl)
257  class RISCVSStore
258        : DefaultAttrsIntrinsic<[],
259                    [llvm_anyvector_ty, llvm_ptr_ty,
260                     llvm_anyint_ty, LLVMMatchType<1>],
261                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
262    let VLOperand = 3;
263  }
264  // For stride store with mask
265  // Input: (vector_in, pointer, stirde, mask, vl)
266  class RISCVSStoreMasked
267        : DefaultAttrsIntrinsic<[],
268                    [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyint_ty,
269                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
270                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
271    let VLOperand = 4;
272  }
273  // For indexed store
274  // Input: (vector_in, pointer, index, vl)
275  class RISCVIStore
276        : DefaultAttrsIntrinsic<[],
277                    [llvm_anyvector_ty, llvm_ptr_ty,
278                     llvm_anyint_ty, llvm_anyint_ty],
279                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
280    let VLOperand = 3;
281  }
282  // For indexed store with mask
283  // Input: (vector_in, pointer, index, mask, vl)
284  class RISCVIStoreMasked
285        : DefaultAttrsIntrinsic<[],
286                    [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyvector_ty,
287                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
288                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
289    let VLOperand = 4;
290  }
291  // For destination vector type is the same as source vector.
292  // Input: (passthru, vector_in, vl)
293  class RISCVUnaryAAUnMasked
294        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
295                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
296                    [IntrNoMem]>, RISCVVIntrinsic {
297    let VLOperand = 2;
298  }
299  // For destination vector type is the same as first source vector (with mask).
300  // Input: (vector_in, vector_in, mask, vl, policy)
301  class RISCVUnaryAAMasked
302        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
303                    [LLVMMatchType<0>, LLVMMatchType<0>,
304                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
305                     LLVMMatchType<1>],
306                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
307    let VLOperand = 3;
308  }
309  // For destination vector type is the same as source vector.
310  // Input: (passthru, vector_in, frm, vl)
311  class RISCVUnaryAAUnMaskedRoundingMode
312        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
313                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, LLVMMatchType<1>],
314                    [ImmArg<ArgIndex<2>>, IntrNoMem]>, RISCVVIntrinsic {
315    let VLOperand = 3;
316  }
317  // For destination vector type is the same as first source vector (with mask).
318  // Input: (vector_in, vector_in, mask, frm, vl, policy)
319  class RISCVUnaryAAMaskedRoundingMode
320        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
321                    [LLVMMatchType<0>, LLVMMatchType<0>,
322                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
323                     LLVMMatchType<1>, LLVMMatchType<1>],
324                    [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
325    let VLOperand = 4;
326  }
327  // Input: (passthru, vector_in, vector_in, mask, vl)
328  class RISCVCompress
329        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
330                    [LLVMMatchType<0>, LLVMMatchType<0>,
331                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
332                    [IntrNoMem]>, RISCVVIntrinsic {
333    let VLOperand = 3;
334  }
335  // For destination vector type is the same as first and second source vector.
336  // Input: (vector_in, vector_in, vl)
337  class RISCVBinaryAAAUnMasked
338        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
339                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
340                    [IntrNoMem]>, RISCVVIntrinsic {
341    let VLOperand = 2;
342  }
343  // For destination vector type is the same as first and second source vector.
344  // Input: (passthru, vector_in, int_vector_in, vl)
345  class RISCVRGatherVVUnMasked
346        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
347                    [LLVMMatchType<0>, LLVMMatchType<0>,
348                     LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty],
349                    [IntrNoMem]>, RISCVVIntrinsic {
350    let VLOperand = 3;
351  }
352  // For destination vector type is the same as first and second source vector.
353  // Input: (vector_in, vector_in, int_vector_in, vl, policy)
354  class RISCVRGatherVVMasked
355        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
356                    [LLVMMatchType<0>, LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>,
357                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
358                     LLVMMatchType<1>],
359                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
360    let VLOperand = 4;
361  }
362  // Input: (passthru, vector_in, int16_vector_in, vl)
363  class RISCVRGatherEI16VVUnMasked
364        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
365                    [LLVMMatchType<0>, LLVMMatchType<0>,
366                     LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
367                     llvm_anyint_ty],
368                    [IntrNoMem]>, RISCVVIntrinsic {
369    let VLOperand = 3;
370  }
371  // For destination vector type is the same as first and second source vector.
372  // Input: (vector_in, vector_in, int16_vector_in, vl, policy)
373  class RISCVRGatherEI16VVMasked
374        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
375                    [LLVMMatchType<0>, LLVMMatchType<0>,
376                     LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
377                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
378                     LLVMMatchType<1>],
379                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
380    let VLOperand = 4;
381  }
382  // For destination vector type is the same as first source vector, and the
383  // second operand is XLen.
384  // Input: (passthru, vector_in, xlen_in, vl)
385  class RISCVGatherVXUnMasked
386        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
387                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
388                     LLVMMatchType<1>],
389                    [IntrNoMem]>, RISCVVIntrinsic {
390    let VLOperand = 3;
391  }
392  // For destination vector type is the same as first source vector (with mask).
393  // Second operand is XLen.
394  // Input: (maskedoff, vector_in, xlen_in, mask, vl, policy)
395  class RISCVGatherVXMasked
396       : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
397                   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
398                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>,
399                    LLVMMatchType<1>],
400                   [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
401    let VLOperand = 4;
402  }
403  // For destination vector type is the same as first source vector.
404  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
405  class RISCVBinaryAAXUnMasked
406        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
407                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
408                     llvm_anyint_ty],
409                    [IntrNoMem]>, RISCVVIntrinsic {
410    let ScalarOperand = 2;
411    let VLOperand = 3;
412  }
413  // For destination vector type is the same as first source vector (with mask).
414  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
415  class RISCVBinaryAAXMasked
416       : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
417                   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
418                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
419                    LLVMMatchType<2>],
420                   [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
421    let ScalarOperand = 2;
422    let VLOperand = 4;
423  }
424  // For destination vector type is the same as first source vector.
425  // Input: (passthru, vector_in, vector_in/scalar_in, frm, vl)
426  class RISCVBinaryAAXUnMaskedRoundingMode
427        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
428                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
429                     llvm_anyint_ty, LLVMMatchType<2>],
430                    [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
431    let ScalarOperand = 2;
432    let VLOperand = 4;
433  }
434  // For destination vector type is the same as first source vector (with mask).
435  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
436  class RISCVBinaryAAXMaskedRoundingMode
437       : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
438                   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
439                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
440                    LLVMMatchType<2>, LLVMMatchType<2>],
441                   [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic {
442    let ScalarOperand = 2;
443    let VLOperand = 5;
444  }
445  // For destination vector type is the same as first source vector. The
446  // second source operand must match the destination type or be an XLen scalar.
447  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
448  class RISCVBinaryAAShiftUnMasked
449        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
450                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
451                     llvm_anyint_ty],
452                    [IntrNoMem]>, RISCVVIntrinsic {
453    let VLOperand = 3;
454  }
455  // For destination vector type is the same as first source vector (with mask).
456  // The second source operand must match the destination type or be an XLen scalar.
457  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
458  class RISCVBinaryAAShiftMasked
459       : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
460                   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
461                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
462                    LLVMMatchType<2>],
463                   [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
464    let VLOperand = 4;
465  }
466  // For destination vector type is NOT the same as first source vector.
467  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
468  class RISCVBinaryABXUnMasked
469        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
470                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
471                     llvm_anyint_ty],
472                    [IntrNoMem]>, RISCVVIntrinsic {
473    let ScalarOperand = 2;
474    let VLOperand = 3;
475  }
476  // For destination vector type is NOT the same as first source vector (with mask).
477  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
478  class RISCVBinaryABXMasked
479        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
480                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
481                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
482                     LLVMMatchType<3>],
483                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
484    let ScalarOperand = 2;
485    let VLOperand = 4;
486  }
487  // For destination vector type is NOT the same as first source vector.
488  // Input: (passthru, vector_in, vector_in/scalar_in, frm, vl)
489  class RISCVBinaryABXUnMaskedRoundingMode
490        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
491                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
492                     llvm_anyint_ty, LLVMMatchType<3>],
493                    [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
494    let ScalarOperand = 2;
495    let VLOperand = 4;
496  }
497  // For destination vector type is NOT the same as first source vector (with mask).
498  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
499  class RISCVBinaryABXMaskedRoundingMode
500        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
501                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
502                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
503                     LLVMMatchType<3>, LLVMMatchType<3>],
504                    [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic {
505    let ScalarOperand = 2;
506    let VLOperand = 5;
507  }
508  // For destination vector type is NOT the same as first source vector. The
509  // second source operand must match the destination type or be an XLen scalar.
510  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
511  class RISCVBinaryABShiftUnMasked
512        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
513                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
514                     llvm_anyint_ty],
515                    [IntrNoMem]>, RISCVVIntrinsic {
516    let VLOperand = 3;
517  }
518  // For destination vector type is NOT the same as first source vector (with mask).
519  // The second source operand must match the destination type or be an XLen scalar.
520  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
521  class RISCVBinaryABShiftMasked
522        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
523                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
524                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
525                     LLVMMatchType<3>],
526                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
527    let VLOperand = 4;
528  }
529  // For binary operations with V0 as input.
530  // Input: (passthru, vector_in, vector_in/scalar_in, V0, vl)
531  class RISCVBinaryWithV0
532        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
533                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
534                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
535                     llvm_anyint_ty],
536                    [IntrNoMem]>, RISCVVIntrinsic {
537    let ScalarOperand = 2;
538    let VLOperand = 4;
539  }
540  // For binary operations with mask type output and V0 as input.
541  // Output: (mask type output)
542  // Input: (vector_in, vector_in/scalar_in, V0, vl)
543  class RISCVBinaryMOutWithV0
544        :DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
545                   [llvm_anyvector_ty, llvm_any_ty,
546                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
547                    llvm_anyint_ty],
548                   [IntrNoMem]>, RISCVVIntrinsic {
549    let ScalarOperand = 1;
550    let VLOperand = 3;
551  }
552  // For binary operations with mask type output.
553  // Output: (mask type output)
554  // Input: (vector_in, vector_in/scalar_in, vl)
555  class RISCVBinaryMOut
556        : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
557                    [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
558                    [IntrNoMem]>, RISCVVIntrinsic {
559    let ScalarOperand = 1;
560    let VLOperand = 2;
561  }
562  // For binary operations with mask type output without mask.
563  // Output: (mask type output)
564  // Input: (vector_in, vector_in/scalar_in, vl)
565  class RISCVCompareUnMasked
566        : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
567                    [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
568                    [IntrNoMem]>, RISCVVIntrinsic {
569    let ScalarOperand = 1;
570    let VLOperand = 2;
571  }
572  // For binary operations with mask type output with mask.
573  // Output: (mask type output)
574  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
575  class RISCVCompareMasked
576        : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
577                    [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
578                     llvm_anyvector_ty, llvm_any_ty,
579                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
580                    [IntrNoMem]>, RISCVVIntrinsic {
581    let ScalarOperand = 2;
582    let VLOperand = 4;
583  }
584  // For FP classify operations.
585  // Output: (bit mask type output)
586  // Input: (passthru, vector_in, vl)
587  class RISCVClassifyUnMasked
588        : DefaultAttrsIntrinsic<[LLVMVectorOfBitcastsToInt<0>],
589                    [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty,
590                      llvm_anyint_ty],
591                    [IntrNoMem]>, RISCVVIntrinsic {
592    let VLOperand = 1;
593  }
594  // For FP classify operations with mask.
595  // Output: (bit mask type output)
596  // Input: (maskedoff, vector_in, mask, vl, policy)
597  class RISCVClassifyMasked
598        : DefaultAttrsIntrinsic<[LLVMVectorOfBitcastsToInt<0>],
599                    [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty,
600                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
601                     llvm_anyint_ty, LLVMMatchType<1>],
602                    [IntrNoMem, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic {
603    let VLOperand = 3;
604  }
605  // For Saturating binary operations.
606  // The destination vector type is the same as first source vector.
607  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
608  class RISCVSaturatingBinaryAAXUnMasked
609        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
610                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
611                     llvm_anyint_ty],
612                    [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
613    let ScalarOperand = 2;
614    let VLOperand = 3;
615  }
616  // For Saturating binary operations with rounding-mode operand
617  // The destination vector type is the same as first source vector.
618  // Input: (passthru, vector_in, vector_in/scalar_in, vxrm, vl)
619  class RISCVSaturatingBinaryAAXUnMaskedRoundingMode
620        : Intrinsic<[llvm_anyvector_ty],
621                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
622                     llvm_anyint_ty, LLVMMatchType<2>],
623                    [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
624    let ScalarOperand = 2;
625    let VLOperand = 4;
626  }
627  // For Saturating binary operations with mask.
628  // The destination vector type is the same as first source vector.
629  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
630  class RISCVSaturatingBinaryAAXMasked
631        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
632                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
633                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
634                     LLVMMatchType<2>],
635                    [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
636    let ScalarOperand = 2;
637    let VLOperand = 4;
638  }
639  // For Saturating binary operations with mask and rounding-mode operand
640  // The destination vector type is the same as first source vector.
641  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
642  class RISCVSaturatingBinaryAAXMaskedRoundingMode
643        : Intrinsic<[llvm_anyvector_ty],
644                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
645                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
646                     LLVMMatchType<2>, LLVMMatchType<2>],
647                    [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic {
648    let ScalarOperand = 2;
649    let VLOperand = 5;
650  }
651  // For Saturating binary operations.
652  // The destination vector type is the same as first source vector.
653  // The second source operand matches the destination type or is an XLen scalar.
654  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
655  class RISCVSaturatingBinaryAAShiftUnMasked
656        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
657                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
658                     llvm_anyint_ty],
659                    [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
660    let VLOperand = 3;
661  }
662  // For Saturating binary operations with mask.
663  // The destination vector type is the same as first source vector.
664  // The second source operand matches the destination type or is an XLen scalar.
665  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
666  class RISCVSaturatingBinaryAAShiftMasked
667        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
668                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
669                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
670                     LLVMMatchType<2>],
671                    [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
672    let VLOperand = 4;
673  }
674  // For Saturating binary operations.
675  // The destination vector type is the same as first source vector.
676  // The second source operand matches the destination type or is an XLen scalar.
677  // Input: (passthru, vector_in, vector_in/scalar_in, vxrm, vl)
678  class RISCVSaturatingBinaryAAShiftUnMaskedRoundingMode
679        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
680                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
681                     llvm_anyint_ty, LLVMMatchType<2>],
682                    [ImmArg<ArgIndex<3>>, IntrNoMem, IntrHasSideEffects]>,
683                    RISCVVIntrinsic {
684    let VLOperand = 4;
685  }
686  // For Saturating binary operations with mask.
687  // The destination vector type is the same as first source vector.
688  // The second source operand matches the destination type or is an XLen scalar.
689  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
690  class RISCVSaturatingBinaryAAShiftMaskedRoundingMode
691        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
692                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
693                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
694                     LLVMMatchType<2>, LLVMMatchType<2>],
695                    [ImmArg<ArgIndex<4>>,ImmArg<ArgIndex<6>>, IntrNoMem, IntrHasSideEffects]>,
696                    RISCVVIntrinsic {
697    let VLOperand = 6;
698  }
699  // For Saturating binary operations.
700  // The destination vector type is NOT the same as first source vector.
701  // The second source operand matches the destination type or is an XLen scalar.
702  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
703  class RISCVSaturatingBinaryABShiftUnMasked
704        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
705                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
706                     llvm_anyint_ty],
707                    [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
708    let VLOperand = 3;
709  }
710  // For Saturating binary operations with mask.
711  // The destination vector type is NOT the same as first source vector (with mask).
712  // The second source operand matches the destination type or is an XLen scalar.
713  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
714  class RISCVSaturatingBinaryABShiftMasked
715        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
716                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
717                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
718                     LLVMMatchType<3>],
719                    [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
720    let VLOperand = 4;
721  }
722  // For Saturating binary operations.
723  // The destination vector type is NOT the same as first source vector.
724  // The second source operand matches the destination type or is an XLen scalar.
725  // Input: (passthru, vector_in, vector_in/scalar_in, vxrm, vl)
726  class RISCVSaturatingBinaryABShiftUnMaskedRoundingMode
727        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
728                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
729                     llvm_anyint_ty, LLVMMatchType<3>],
730                    [ImmArg<ArgIndex<3>>, IntrNoMem, IntrHasSideEffects]>,
731                    RISCVVIntrinsic {
732    let VLOperand = 4;
733  }
734  // For Saturating binary operations with mask.
735  // The destination vector type is NOT the same as first source vector (with mask).
736  // The second source operand matches the destination type or is an XLen scalar.
737  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
738  class RISCVSaturatingBinaryABShiftMaskedRoundingMode
739        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
740                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
741                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
742                     LLVMMatchType<3>, LLVMMatchType<3>],
743                    [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem,
744                     IntrHasSideEffects]>, RISCVVIntrinsic {
745    let VLOperand = 5;
746  }
747  // Input: (vector_in, vector_in, scalar_in, vl, policy)
748  class RVVSlideUnMasked
749        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
750                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
751                     LLVMMatchType<1>, LLVMMatchType<1>],
752                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
753    let VLOperand = 3;
754  }
755  // Input: (vector_in, vector_in, vector_in/scalar_in, mask, vl, policy)
756  class RVVSlideMasked
757        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
758                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
759                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
760                     LLVMMatchType<1>, LLVMMatchType<1>],
761                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
762    let VLOperand = 4;
763  }
764  // UnMasked Vector Multiply-Add operations, its first operand can not be undef.
765  // Input: (vector_in, vector_in/scalar, vector_in, vl, policy)
766  class RISCVTernaryAAXAUnMasked
767        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
768                    [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
769                     llvm_anyint_ty, LLVMMatchType<2>],
770                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
771    let ScalarOperand = 1;
772    let VLOperand = 3;
773  }
774  // Masked Vector Multiply-Add operations, its first operand can not be undef.
775  // Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy
776  class RISCVTernaryAAXAMasked
777        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
778                    [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
779                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
780                     llvm_anyint_ty, LLVMMatchType<2>],
781                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
782    let ScalarOperand = 1;
783    let VLOperand = 4;
784  }
785  // UnMasked Vector Multiply-Add operations, its first operand can not be undef.
786  // Input: (vector_in, vector_in/scalar, vector_in, frm, vl, policy)
787  class RISCVTernaryAAXAUnMaskedRoundingMode
788        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
789                    [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
790                     llvm_anyint_ty, LLVMMatchType<2>, LLVMMatchType<2>],
791                    [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem]>,
792                    RISCVVIntrinsic {
793    let ScalarOperand = 1;
794    let VLOperand = 4;
795  }
796  // Masked Vector Multiply-Add operations, its first operand can not be undef.
797  // Input: (vector_in, vector_in/scalar, vector_in, mask, frm, vl, policy
798  class RISCVTernaryAAXAMaskedRoundingMode
799        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
800                    [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
801                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
802                     llvm_anyint_ty, LLVMMatchType<2>, LLVMMatchType<2>],
803                    [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>,
804                    RISCVVIntrinsic {
805    let ScalarOperand = 1;
806    let VLOperand = 5;
807  }
808  // UnMasked Widening Vector Multiply-Add operations, its first operand can not be undef.
809  // Input: (vector_in, vector_in/scalar, vector_in, vl, policy)
810  class RISCVTernaryWideUnMasked
811        : DefaultAttrsIntrinsic< [llvm_anyvector_ty],
812                     [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
813                      llvm_anyint_ty, LLVMMatchType<3>],
814                     [ImmArg<ArgIndex<4>>, IntrNoMem] >, RISCVVIntrinsic {
815    let ScalarOperand = 1;
816    let VLOperand = 3;
817  }
818  // Masked Widening Vector Multiply-Add operations, its first operand can not be undef.
819  // Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy
820  class RISCVTernaryWideMasked
821        : DefaultAttrsIntrinsic< [llvm_anyvector_ty],
822                     [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
823                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
824                      llvm_anyint_ty, LLVMMatchType<3>],
825                     [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
826    let ScalarOperand = 1;
827    let VLOperand = 4;
828  }
829  // UnMasked Widening Vector Multiply-Add operations, its first operand can not be undef.
830  // Input: (vector_in, vector_in/scalar, vector_in, frm, vl, policy)
831  class RISCVTernaryWideUnMaskedRoundingMode
832        : DefaultAttrsIntrinsic< [llvm_anyvector_ty],
833                     [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
834                      llvm_anyint_ty, LLVMMatchType<3>, LLVMMatchType<3>],
835                     [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem] >,
836                     RISCVVIntrinsic {
837    let ScalarOperand = 1;
838    let VLOperand = 4;
839  }
840  // Masked Widening Vector Multiply-Add operations, its first operand can not be undef.
841  // Input: (vector_in, vector_in/scalar, vector_in, mask, frm, vl, policy
842  class RISCVTernaryWideMaskedRoundingMode
843        : DefaultAttrsIntrinsic< [llvm_anyvector_ty],
844                     [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
845                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
846                      llvm_anyint_ty, LLVMMatchType<3>, LLVMMatchType<3>],
847                     [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>,
848                     RISCVVIntrinsic {
849    let ScalarOperand = 1;
850    let VLOperand = 5;
851  }
852  // For Reduction ternary operations.
853  // For destination vector type is the same as first and third source vector.
854  // Input: (vector_in, vector_in, vector_in, vl)
855  class RISCVReductionUnMasked
856        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
857                    [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
858                     llvm_anyint_ty],
859                    [IntrNoMem]>, RISCVVIntrinsic {
860    let VLOperand = 3;
861  }
862  // For Reduction ternary operations with mask.
863  // For destination vector type is the same as first and third source vector.
864  // The mask type come from second source vector.
865  // Input: (maskedoff, vector_in, vector_in, vector_in, mask, vl)
866  class RISCVReductionMasked
867        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
868                    [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
869                     LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty],
870                    [IntrNoMem]>, RISCVVIntrinsic {
871    let VLOperand = 4;
872  }
873  // For Reduction ternary operations.
874  // For destination vector type is the same as first and third source vector.
875  // Input: (vector_in, vector_in, vector_in, frm, vl)
876  class RISCVReductionUnMaskedRoundingMode
877        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
878                    [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
879                     llvm_anyint_ty, LLVMMatchType<2>],
880                    [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
881    let VLOperand = 4;
882  }
883  // For Reduction ternary operations with mask.
884  // For destination vector type is the same as first and third source vector.
885  // The mask type come from second source vector.
886  // Input: (vector_in, vector_in, vector_in, mask, frm, vl)
887  class RISCVReductionMaskedRoundingMode
888        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
889                    [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
890                     LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty,
891                     LLVMMatchType<2>],
892                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
893    let VLOperand = 5;
894  }
895  // For unary operations with scalar type output without mask
896  // Output: (scalar type)
897  // Input: (vector_in, vl)
898  class RISCVMaskedUnarySOutUnMasked
899        : DefaultAttrsIntrinsic<[LLVMMatchType<1>],
900                    [llvm_anyvector_ty, llvm_anyint_ty],
901                    [IntrNoMem]>, RISCVVIntrinsic {
902    let VLOperand = 1;
903  }
904  // For unary operations with scalar type output with mask
905  // Output: (scalar type)
906  // Input: (vector_in, mask, vl)
907  class RISCVMaskedUnarySOutMasked
908        : DefaultAttrsIntrinsic<[LLVMMatchType<1>],
909                    [llvm_anyvector_ty, LLVMMatchType<0>, llvm_anyint_ty],
910                    [IntrNoMem]>, RISCVVIntrinsic {
911    let VLOperand = 2;
912  }
913  // For destination vector type is NOT the same as source vector.
914  // Input: (passthru, vector_in, vl)
915  class RISCVUnaryABUnMasked
916        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
917                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty],
918                    [IntrNoMem]>, RISCVVIntrinsic {
919    let VLOperand = 2;
920  }
921  // For destination vector type is NOT the same as source vector (with mask).
922  // Input: (maskedoff, vector_in, mask, vl, policy)
923  class RISCVUnaryABMasked
924        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
925                    [LLVMMatchType<0>, llvm_anyvector_ty,
926                     LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>,
927                     llvm_anyint_ty, LLVMMatchType<2>],
928                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
929    let VLOperand = 3;
930  }
931  // For unary operations with the same vector type in/out without mask
932  // Output: (vector)
933  // Input: (vector_in, vl)
934  class RISCVUnaryUnMasked
935        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
936                    [LLVMMatchType<0>, llvm_anyint_ty],
937                    [IntrNoMem]>, RISCVVIntrinsic {
938    let VLOperand = 1;
939  }
940  // For mask unary operations with mask type in/out with mask
941  // Output: (mask type output)
942  // Input: (mask type maskedoff, mask type vector_in, mask, vl)
943  class RISCVMaskedUnaryMOutMasked
944        : DefaultAttrsIntrinsic<[llvm_anyint_ty],
945                    [LLVMMatchType<0>, LLVMMatchType<0>,
946                     LLVMMatchType<0>, llvm_anyint_ty],
947                    [IntrNoMem]>, RISCVVIntrinsic {
948    let VLOperand = 3;
949  }
950  // Output: (vector)
951  // Input: (vl)
952  class RISCVNullaryIntrinsic
953        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
954                    [llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic {
955    let VLOperand = 1;
956  }
957  // Output: (vector)
958  // Input: (passthru, vl)
959  class RISCVID
960        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
961                    [LLVMMatchType<0>, llvm_anyint_ty],
962                    [IntrNoMem]>, RISCVVIntrinsic {
963    let VLOperand = 1;
964  }
965  // For Conversion unary operations.
966  // Input: (passthru, vector_in, vl)
967  class RISCVConversionUnMasked
968        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
969                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty],
970                    [IntrNoMem]>, RISCVVIntrinsic {
971    let VLOperand = 2;
972  }
973  // For Conversion unary operations with mask.
974  // Input: (maskedoff, vector_in, mask, vl, policy)
975  class RISCVConversionMasked
976        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
977                    [LLVMMatchType<0>, llvm_anyvector_ty,
978                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
979                     LLVMMatchType<2>],
980                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
981    let VLOperand = 3;
982  }
983  // For Conversion unary operations.
984  // Input: (passthru, vector_in, frm, vl)
985  class RISCVConversionUnMaskedRoundingMode
986        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
987                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty,
988                     LLVMMatchType<2>],
989                    [ImmArg<ArgIndex<2>>, IntrNoMem]>, RISCVVIntrinsic {
990    let VLOperand = 3;
991  }
992  // For Conversion unary operations with mask.
993  // Input: (maskedoff, vector_in, mask, frm, vl, policy)
994  class RISCVConversionMaskedRoundingMode
995        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
996                    [LLVMMatchType<0>, llvm_anyvector_ty,
997                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
998                     LLVMMatchType<2>, LLVMMatchType<2>],
999                    [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
1000    let VLOperand = 4;
1001  }
1002
1003  // For unit stride segment load
1004  // Input: (passthru, pointer, vl)
1005  class RISCVUSSegLoad<int nf>
1006        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
1007                                !add(nf, -1))),
1008                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
1009                                [llvm_ptr_ty, llvm_anyint_ty]),
1010                    [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic {
1011    let VLOperand = !add(nf, 1);
1012  }
1013  // For unit stride segment load with mask
1014  // Input: (maskedoff, pointer, mask, vl, policy)
1015  class RISCVUSSegLoadMasked<int nf>
1016        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
1017                                !add(nf, -1))),
1018                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
1019                                [llvm_ptr_ty,
1020                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1021                                 llvm_anyint_ty, LLVMMatchType<1>]),
1022                    [ImmArg<ArgIndex<!add(nf, 3)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
1023                    RISCVVIntrinsic {
1024    let VLOperand = !add(nf, 2);
1025  }
1026
1027  // For unit stride fault-only-first segment load
1028  // Input: (passthru, pointer, vl)
1029  // Output: (data, vl)
1030  // NOTE: We model this with default memory properties since we model writing
1031  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
1032  class RISCVUSSegLoadFF<int nf>
1033        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
1034                                !add(nf, -1)), [llvm_anyint_ty]),
1035                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
1036                    [llvm_ptr_ty, LLVMMatchType<1>]),
1037                    [NoCapture<ArgIndex<nf>>]>, RISCVVIntrinsic {
1038    let VLOperand = !add(nf, 1);
1039  }
1040  // For unit stride fault-only-first segment load with mask
1041  // Input: (maskedoff, pointer, mask, vl, policy)
1042  // Output: (data, vl)
1043  // NOTE: We model this with default memory properties since we model writing
1044  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
1045  class RISCVUSSegLoadFFMasked<int nf>
1046        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
1047                                !add(nf, -1)), [llvm_anyint_ty]),
1048                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
1049                     [llvm_ptr_ty,
1050                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1051                      LLVMMatchType<1>, LLVMMatchType<1>]),
1052                    [ImmArg<ArgIndex<!add(nf, 3)>>, NoCapture<ArgIndex<nf>>]>,
1053                    RISCVVIntrinsic {
1054    let VLOperand = !add(nf, 2);
1055  }
1056
1057  // For stride segment load
1058  // Input: (passthru, pointer, offset, vl)
1059  class RISCVSSegLoad<int nf>
1060        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
1061                                !add(nf, -1))),
1062                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
1063                    [llvm_ptr_ty, llvm_anyint_ty, LLVMMatchType<1>]),
1064                    [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic {
1065    let VLOperand = !add(nf, 2);
1066  }
1067  // For stride segment load with mask
1068  // Input: (maskedoff, pointer, offset, mask, vl, policy)
1069  class RISCVSSegLoadMasked<int nf>
1070        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
1071                                !add(nf, -1))),
1072                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
1073                                [llvm_ptr_ty,
1074                                 llvm_anyint_ty,
1075                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1076                                 LLVMMatchType<1>, LLVMMatchType<1>]),
1077                    [ImmArg<ArgIndex<!add(nf, 4)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
1078                    RISCVVIntrinsic {
1079    let VLOperand = !add(nf, 3);
1080  }
1081
1082  // For indexed segment load
1083  // Input: (passthru, pointer, index, vl)
1084  class RISCVISegLoad<int nf>
1085        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
1086                                !add(nf, -1))),
1087                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
1088                    [llvm_ptr_ty, llvm_anyvector_ty, llvm_anyint_ty]),
1089                    [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic {
1090    let VLOperand = !add(nf, 2);
1091  }
1092  // For indexed segment load with mask
1093  // Input: (maskedoff, pointer, index, mask, vl, policy)
1094  class RISCVISegLoadMasked<int nf>
1095        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
1096                                !add(nf, -1))),
1097                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
1098                                [llvm_ptr_ty,
1099                                 llvm_anyvector_ty,
1100                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1101                                 llvm_anyint_ty, LLVMMatchType<2>]),
1102                    [ImmArg<ArgIndex<!add(nf, 4)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
1103                    RISCVVIntrinsic {
1104    let VLOperand = !add(nf, 3);
1105  }
1106
1107  // For unit stride segment store
1108  // Input: (value, pointer, vl)
1109  class RISCVUSSegStore<int nf>
1110        : DefaultAttrsIntrinsic<[],
1111                    !listconcat([llvm_anyvector_ty],
1112                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
1113                                [llvm_ptr_ty, llvm_anyint_ty]),
1114                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
1115    let VLOperand = !add(nf, 1);
1116  }
1117  // For unit stride segment store with mask
1118  // Input: (value, pointer, mask, vl)
1119  class RISCVUSSegStoreMasked<int nf>
1120        : DefaultAttrsIntrinsic<[],
1121                    !listconcat([llvm_anyvector_ty],
1122                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
1123                                [llvm_ptr_ty,
1124                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1125                                 llvm_anyint_ty]),
1126                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
1127    let VLOperand = !add(nf, 2);
1128  }
1129
1130  // For stride segment store
1131  // Input: (value, pointer, offset, vl)
1132  class RISCVSSegStore<int nf>
1133        : DefaultAttrsIntrinsic<[],
1134                    !listconcat([llvm_anyvector_ty],
1135                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
1136                                [llvm_ptr_ty, llvm_anyint_ty,
1137                                 LLVMMatchType<1>]),
1138                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
1139    let VLOperand = !add(nf, 2);
1140  }
1141  // For stride segment store with mask
1142  // Input: (value, pointer, offset, mask, vl)
1143  class RISCVSSegStoreMasked<int nf>
1144        : DefaultAttrsIntrinsic<[],
1145                    !listconcat([llvm_anyvector_ty],
1146                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
1147                                [llvm_ptr_ty, llvm_anyint_ty,
1148                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1149                                 LLVMMatchType<1>]),
1150                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
1151    let VLOperand = !add(nf, 3);
1152  }
1153
1154  // For indexed segment store
1155  // Input: (value, pointer, offset, vl)
1156  class RISCVISegStore<int nf>
1157        : DefaultAttrsIntrinsic<[],
1158                    !listconcat([llvm_anyvector_ty],
1159                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
1160                                [llvm_ptr_ty, llvm_anyvector_ty,
1161                                 llvm_anyint_ty]),
1162                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
1163    let VLOperand = !add(nf, 2);
1164  }
1165  // For indexed segment store with mask
1166  // Input: (value, pointer, offset, mask, vl)
1167  class RISCVISegStoreMasked<int nf>
1168        : DefaultAttrsIntrinsic<[],
1169                    !listconcat([llvm_anyvector_ty],
1170                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
1171                                [llvm_ptr_ty, llvm_anyvector_ty,
1172                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1173                                 llvm_anyint_ty]),
1174                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
1175    let VLOperand = !add(nf, 3);
1176  }
1177
1178  multiclass RISCVUSLoad {
1179    def "int_riscv_" # NAME : RISCVUSLoad;
1180    def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMasked;
1181  }
1182  multiclass RISCVUSLoadFF {
1183    def "int_riscv_" # NAME : RISCVUSLoadFF;
1184    def "int_riscv_" # NAME # "_mask" : RISCVUSLoadFFMasked;
1185  }
1186  multiclass RISCVSLoad {
1187    def "int_riscv_" # NAME : RISCVSLoad;
1188    def "int_riscv_" # NAME # "_mask" : RISCVSLoadMasked;
1189  }
1190  multiclass RISCVILoad {
1191    def "int_riscv_" # NAME : RISCVILoad;
1192    def "int_riscv_" # NAME # "_mask" : RISCVILoadMasked;
1193  }
1194  multiclass RISCVUSStore {
1195    def "int_riscv_" # NAME : RISCVUSStore;
1196    def "int_riscv_" # NAME # "_mask" : RISCVUSStoreMasked;
1197  }
1198  multiclass RISCVSStore {
1199    def "int_riscv_" # NAME : RISCVSStore;
1200    def "int_riscv_" # NAME # "_mask" : RISCVSStoreMasked;
1201  }
1202
1203  multiclass RISCVIStore {
1204    def "int_riscv_" # NAME : RISCVIStore;
1205    def "int_riscv_" # NAME # "_mask" : RISCVIStoreMasked;
1206  }
1207  multiclass RISCVUnaryAA {
1208    def "int_riscv_" # NAME : RISCVUnaryAAUnMasked;
1209    def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMasked;
1210  }
1211  multiclass RISCVUnaryAARoundingMode {
1212    def "int_riscv_" # NAME : RISCVUnaryAAUnMaskedRoundingMode;
1213    def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMaskedRoundingMode;
1214  }
1215  multiclass RISCVUnaryAB {
1216    def "int_riscv_" # NAME : RISCVUnaryABUnMasked;
1217    def "int_riscv_" # NAME # "_mask" : RISCVUnaryABMasked;
1218  }
1219  // AAX means the destination type(A) is the same as the first source
1220  // type(A). X means any type for the second source operand.
1221  multiclass RISCVBinaryAAX {
1222    def "int_riscv_" # NAME : RISCVBinaryAAXUnMasked;
1223    def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMasked;
1224  }
1225  multiclass RISCVBinaryAAXRoundingMode {
1226    def "int_riscv_" # NAME : RISCVBinaryAAXUnMaskedRoundingMode;
1227    def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMaskedRoundingMode;
1228  }
1229  // Like RISCVBinaryAAX, but the second operand is used a shift amount so it
1230  // must be a vector or an XLen scalar.
1231  multiclass RISCVBinaryAAShift {
1232    def "int_riscv_" # NAME : RISCVBinaryAAShiftUnMasked;
1233    def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAShiftMasked;
1234  }
1235  multiclass RISCVRGatherVV {
1236    def "int_riscv_" # NAME : RISCVRGatherVVUnMasked;
1237    def "int_riscv_" # NAME # "_mask" : RISCVRGatherVVMasked;
1238  }
1239  multiclass RISCVRGatherVX {
1240    def "int_riscv_" # NAME : RISCVGatherVXUnMasked;
1241    def "int_riscv_" # NAME # "_mask" : RISCVGatherVXMasked;
1242  }
1243  multiclass RISCVRGatherEI16VV {
1244    def "int_riscv_" # NAME : RISCVRGatherEI16VVUnMasked;
1245    def "int_riscv_" # NAME # "_mask" : RISCVRGatherEI16VVMasked;
1246  }
1247  // ABX means the destination type(A) is different from the first source
1248  // type(B). X means any type for the second source operand.
1249  multiclass RISCVBinaryABX {
1250    def "int_riscv_" # NAME : RISCVBinaryABXUnMasked;
1251    def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMasked;
1252  }
1253  multiclass RISCVBinaryABXRoundingMode {
1254    def "int_riscv_" # NAME : RISCVBinaryABXUnMaskedRoundingMode;
1255    def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMaskedRoundingMode;
1256  }
1257  // Like RISCVBinaryABX, but the second operand is used a shift amount so it
1258  // must be a vector or an XLen scalar.
1259  multiclass RISCVBinaryABShift {
1260    def "int_riscv_" # NAME : RISCVBinaryABShiftUnMasked;
1261    def "int_riscv_" # NAME # "_mask" : RISCVBinaryABShiftMasked;
1262  }
1263  multiclass RISCVBinaryWithV0 {
1264    def "int_riscv_" # NAME : RISCVBinaryWithV0;
1265  }
1266  multiclass RISCVBinaryMaskOutWithV0 {
1267    def "int_riscv_" # NAME : RISCVBinaryMOutWithV0;
1268  }
1269  multiclass RISCVBinaryMaskOut {
1270    def "int_riscv_" # NAME : RISCVBinaryMOut;
1271  }
1272  multiclass RISCVSaturatingBinaryAAX {
1273    def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXUnMasked;
1274    def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMasked;
1275  }
1276  multiclass RISCVSaturatingBinaryAAXRoundingMode {
1277    def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXUnMaskedRoundingMode;
1278    def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMaskedRoundingMode;
1279  }
1280  multiclass RISCVSaturatingBinaryAAShiftRoundingMode {
1281    def "int_riscv_" # NAME : RISCVSaturatingBinaryAAShiftUnMaskedRoundingMode;
1282    def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAShiftMaskedRoundingMode;
1283  }
1284  multiclass RISCVSaturatingBinaryABShiftRoundingMode {
1285    def "int_riscv_" # NAME : RISCVSaturatingBinaryABShiftUnMaskedRoundingMode;
1286    def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryABShiftMaskedRoundingMode;
1287  }
1288  multiclass RVVSlide {
1289    def "int_riscv_" # NAME : RVVSlideUnMasked;
1290    def "int_riscv_" # NAME # "_mask" : RVVSlideMasked;
1291  }
1292  multiclass RISCVTernaryAAXA {
1293    def "int_riscv_" # NAME : RISCVTernaryAAXAUnMasked;
1294    def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAXAMasked;
1295  }
1296  multiclass RISCVTernaryAAXARoundingMode {
1297    def "int_riscv_" # NAME : RISCVTernaryAAXAUnMaskedRoundingMode;
1298    def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAXAMaskedRoundingMode;
1299  }
1300  multiclass RISCVCompare {
1301    def "int_riscv_" # NAME : RISCVCompareUnMasked;
1302    def "int_riscv_" # NAME # "_mask" : RISCVCompareMasked;
1303  }
1304  multiclass RISCVClassify {
1305    def "int_riscv_" # NAME : RISCVClassifyUnMasked;
1306    def "int_riscv_" # NAME # "_mask" : RISCVClassifyMasked;
1307  }
1308  multiclass RISCVTernaryWide {
1309    def "int_riscv_" # NAME : RISCVTernaryWideUnMasked;
1310    def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMasked;
1311  }
1312  multiclass RISCVTernaryWideRoundingMode {
1313    def "int_riscv_" # NAME : RISCVTernaryWideUnMaskedRoundingMode;
1314    def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMaskedRoundingMode;
1315  }
1316  multiclass RISCVReduction {
1317    def "int_riscv_" # NAME : RISCVReductionUnMasked;
1318    def "int_riscv_" # NAME # "_mask" : RISCVReductionMasked;
1319  }
1320  multiclass RISCVReductionRoundingMode {
1321    def "int_riscv_" # NAME : RISCVReductionUnMaskedRoundingMode;
1322    def "int_riscv_" # NAME # "_mask" : RISCVReductionMaskedRoundingMode;
1323  }
1324  multiclass RISCVMaskedUnarySOut {
1325    def "int_riscv_" # NAME : RISCVMaskedUnarySOutUnMasked;
1326    def "int_riscv_" # NAME # "_mask" : RISCVMaskedUnarySOutMasked;
1327  }
1328  multiclass RISCVMaskedUnaryMOut {
1329    def "int_riscv_" # NAME : RISCVUnaryUnMasked;
1330    def "int_riscv_" # NAME # "_mask" : RISCVMaskedUnaryMOutMasked;
1331  }
1332  multiclass RISCVConversion {
1333    def "int_riscv_" #NAME :RISCVConversionUnMasked;
1334    def "int_riscv_" # NAME # "_mask" : RISCVConversionMasked;
1335  }
1336  multiclass RISCVConversionRoundingMode {
1337    def "int_riscv_" #NAME :RISCVConversionUnMaskedRoundingMode;
1338    def "int_riscv_" # NAME # "_mask" : RISCVConversionMaskedRoundingMode;
1339  }
1340  multiclass RISCVUSSegLoad<int nf> {
1341    def "int_riscv_" # NAME : RISCVUSSegLoad<nf>;
1342    def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadMasked<nf>;
1343  }
1344  multiclass RISCVUSSegLoadFF<int nf> {
1345    def "int_riscv_" # NAME : RISCVUSSegLoadFF<nf>;
1346    def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadFFMasked<nf>;
1347  }
1348  multiclass RISCVSSegLoad<int nf> {
1349    def "int_riscv_" # NAME : RISCVSSegLoad<nf>;
1350    def "int_riscv_" # NAME # "_mask" : RISCVSSegLoadMasked<nf>;
1351  }
1352  multiclass RISCVISegLoad<int nf> {
1353    def "int_riscv_" # NAME : RISCVISegLoad<nf>;
1354    def "int_riscv_" # NAME # "_mask" : RISCVISegLoadMasked<nf>;
1355  }
1356  multiclass RISCVUSSegStore<int nf> {
1357    def "int_riscv_" # NAME : RISCVUSSegStore<nf>;
1358    def "int_riscv_" # NAME # "_mask" : RISCVUSSegStoreMasked<nf>;
1359  }
1360  multiclass RISCVSSegStore<int nf> {
1361    def "int_riscv_" # NAME : RISCVSSegStore<nf>;
1362    def "int_riscv_" # NAME # "_mask" : RISCVSSegStoreMasked<nf>;
1363  }
1364  multiclass RISCVISegStore<int nf> {
1365    def "int_riscv_" # NAME : RISCVISegStore<nf>;
1366    def "int_riscv_" # NAME # "_mask" : RISCVISegStoreMasked<nf>;
1367  }
1368
1369  defm vle : RISCVUSLoad;
1370  defm vleff : RISCVUSLoadFF;
1371  defm vse : RISCVUSStore;
1372  defm vlse: RISCVSLoad;
1373  defm vsse: RISCVSStore;
1374  defm vluxei : RISCVILoad;
1375  defm vloxei : RISCVILoad;
1376  defm vsoxei : RISCVIStore;
1377  defm vsuxei : RISCVIStore;
1378
1379  def int_riscv_vlm : RISCVUSMLoad;
1380  def int_riscv_vsm : RISCVUSStore;
1381
1382  defm vadd : RISCVBinaryAAX;
1383  defm vsub : RISCVBinaryAAX;
1384  defm vrsub : RISCVBinaryAAX;
1385
1386  defm vwaddu : RISCVBinaryABX;
1387  defm vwadd : RISCVBinaryABX;
1388  defm vwaddu_w : RISCVBinaryAAX;
1389  defm vwadd_w : RISCVBinaryAAX;
1390  defm vwsubu : RISCVBinaryABX;
1391  defm vwsub : RISCVBinaryABX;
1392  defm vwsubu_w : RISCVBinaryAAX;
1393  defm vwsub_w : RISCVBinaryAAX;
1394
1395  defm vzext : RISCVUnaryAB;
1396  defm vsext : RISCVUnaryAB;
1397
1398  defm vadc : RISCVBinaryWithV0;
1399  defm vmadc_carry_in : RISCVBinaryMaskOutWithV0;
1400  defm vmadc : RISCVBinaryMaskOut;
1401
1402  defm vsbc : RISCVBinaryWithV0;
1403  defm vmsbc_borrow_in : RISCVBinaryMaskOutWithV0;
1404  defm vmsbc : RISCVBinaryMaskOut;
1405
1406  defm vand : RISCVBinaryAAX;
1407  defm vor : RISCVBinaryAAX;
1408  defm vxor : RISCVBinaryAAX;
1409
1410  defm vsll : RISCVBinaryAAShift;
1411  defm vsrl : RISCVBinaryAAShift;
1412  defm vsra : RISCVBinaryAAShift;
1413
1414  defm vnsrl : RISCVBinaryABShift;
1415  defm vnsra : RISCVBinaryABShift;
1416
1417  defm vmseq : RISCVCompare;
1418  defm vmsne : RISCVCompare;
1419  defm vmsltu : RISCVCompare;
1420  defm vmslt : RISCVCompare;
1421  defm vmsleu : RISCVCompare;
1422  defm vmsle : RISCVCompare;
1423  defm vmsgtu : RISCVCompare;
1424  defm vmsgt : RISCVCompare;
1425  defm vmsgeu : RISCVCompare;
1426  defm vmsge : RISCVCompare;
1427
1428  defm vminu : RISCVBinaryAAX;
1429  defm vmin : RISCVBinaryAAX;
1430  defm vmaxu : RISCVBinaryAAX;
1431  defm vmax : RISCVBinaryAAX;
1432
1433  defm vmul : RISCVBinaryAAX;
1434  defm vmulh : RISCVBinaryAAX;
1435  defm vmulhu : RISCVBinaryAAX;
1436  defm vmulhsu : RISCVBinaryAAX;
1437
1438  defm vdivu : RISCVBinaryAAX;
1439  defm vdiv : RISCVBinaryAAX;
1440  defm vremu : RISCVBinaryAAX;
1441  defm vrem : RISCVBinaryAAX;
1442
1443  defm vwmul : RISCVBinaryABX;
1444  defm vwmulu : RISCVBinaryABX;
1445  defm vwmulsu : RISCVBinaryABX;
1446
1447  defm vmacc : RISCVTernaryAAXA;
1448  defm vnmsac : RISCVTernaryAAXA;
1449  defm vmadd : RISCVTernaryAAXA;
1450  defm vnmsub : RISCVTernaryAAXA;
1451
1452  defm vwmaccu  : RISCVTernaryWide;
1453  defm vwmacc   : RISCVTernaryWide;
1454  defm vwmaccus : RISCVTernaryWide;
1455  defm vwmaccsu : RISCVTernaryWide;
1456
1457  defm vfadd : RISCVBinaryAAXRoundingMode;
1458  defm vfsub : RISCVBinaryAAXRoundingMode;
1459  defm vfrsub : RISCVBinaryAAXRoundingMode;
1460
1461  defm vfwadd : RISCVBinaryABXRoundingMode;
1462  defm vfwsub : RISCVBinaryABXRoundingMode;
1463  defm vfwadd_w : RISCVBinaryAAXRoundingMode;
1464  defm vfwsub_w : RISCVBinaryAAXRoundingMode;
1465
1466  defm vsaddu : RISCVSaturatingBinaryAAX;
1467  defm vsadd : RISCVSaturatingBinaryAAX;
1468  defm vssubu : RISCVSaturatingBinaryAAX;
1469  defm vssub : RISCVSaturatingBinaryAAX;
1470
1471  defm vmerge : RISCVBinaryWithV0;
1472
1473  // Output: (vector)
1474  // Input: (passthru, vector_in, vl)
1475  def int_riscv_vmv_v_v : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1476                                                [LLVMMatchType<0>,
1477                                                 LLVMMatchType<0>,
1478                                                 llvm_anyint_ty],
1479                                                [IntrNoMem]>, RISCVVIntrinsic {
1480    let VLOperand = 2;
1481  }
1482  // Output: (vector)
1483  // Input: (passthru, scalar, vl)
1484  def int_riscv_vmv_v_x : DefaultAttrsIntrinsic<[llvm_anyint_ty],
1485                                                 [LLVMMatchType<0>,
1486                                                  LLVMVectorElementType<0>,
1487                                                  llvm_anyint_ty],
1488                                                 [IntrNoMem]>, RISCVVIntrinsic {
1489    let VLOperand = 2;
1490  }
1491  // Output: (vector)
1492  // Input: (passthru, scalar, vl)
1493  def int_riscv_vfmv_v_f : DefaultAttrsIntrinsic<[llvm_anyfloat_ty],
1494                                                 [LLVMMatchType<0>,
1495                                                  LLVMVectorElementType<0>,
1496                                                  llvm_anyint_ty],
1497                                                 [IntrNoMem]>, RISCVVIntrinsic {
1498    let VLOperand = 2;
1499  }
1500
1501  def int_riscv_vmv_x_s : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
1502                                                [llvm_anyint_ty],
1503                                                [IntrNoMem]>, RISCVVIntrinsic;
1504  def int_riscv_vmv_s_x : DefaultAttrsIntrinsic<[llvm_anyint_ty],
1505                                                [LLVMMatchType<0>,
1506                                                 LLVMVectorElementType<0>,
1507                                                 llvm_anyint_ty],
1508                                                [IntrNoMem]>, RISCVVIntrinsic {
1509    let VLOperand = 2;
1510  }
1511
1512  def int_riscv_vfmv_f_s : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
1513                                                 [llvm_anyfloat_ty],
1514                                                 [IntrNoMem]>, RISCVVIntrinsic;
1515  def int_riscv_vfmv_s_f : DefaultAttrsIntrinsic<[llvm_anyfloat_ty],
1516                                                 [LLVMMatchType<0>,
1517                                                  LLVMVectorElementType<0>,
1518                                                  llvm_anyint_ty],
1519                                                 [IntrNoMem]>, RISCVVIntrinsic {
1520    let VLOperand = 2;
1521  }
1522
1523  defm vfmul : RISCVBinaryAAXRoundingMode;
1524  defm vfdiv : RISCVBinaryAAXRoundingMode;
1525  defm vfrdiv : RISCVBinaryAAXRoundingMode;
1526
1527  defm vfwmul : RISCVBinaryABXRoundingMode;
1528
1529  defm vfmacc : RISCVTernaryAAXARoundingMode;
1530  defm vfnmacc : RISCVTernaryAAXARoundingMode;
1531  defm vfmsac : RISCVTernaryAAXARoundingMode;
1532  defm vfnmsac : RISCVTernaryAAXARoundingMode;
1533  defm vfmadd : RISCVTernaryAAXARoundingMode;
1534  defm vfnmadd : RISCVTernaryAAXARoundingMode;
1535  defm vfmsub : RISCVTernaryAAXARoundingMode;
1536  defm vfnmsub : RISCVTernaryAAXARoundingMode;
1537
1538  defm vfwmacc : RISCVTernaryWideRoundingMode;
1539  defm vfwnmacc : RISCVTernaryWideRoundingMode;
1540  defm vfwmsac : RISCVTernaryWideRoundingMode;
1541  defm vfwnmsac : RISCVTernaryWideRoundingMode;
1542
1543  defm vfsqrt : RISCVUnaryAARoundingMode;
1544  defm vfrsqrt7 : RISCVUnaryAA;
1545  defm vfrec7 : RISCVUnaryAARoundingMode;
1546
1547  defm vfmin : RISCVBinaryAAX;
1548  defm vfmax : RISCVBinaryAAX;
1549
1550  defm vfsgnj : RISCVBinaryAAX;
1551  defm vfsgnjn : RISCVBinaryAAX;
1552  defm vfsgnjx : RISCVBinaryAAX;
1553
1554  defm vfclass : RISCVClassify;
1555
1556  defm vfmerge : RISCVBinaryWithV0;
1557
1558  defm vslideup : RVVSlide;
1559  defm vslidedown : RVVSlide;
1560
1561  defm vslide1up : RISCVBinaryAAX;
1562  defm vslide1down : RISCVBinaryAAX;
1563  defm vfslide1up : RISCVBinaryAAX;
1564  defm vfslide1down : RISCVBinaryAAX;
1565
1566  defm vrgather_vv : RISCVRGatherVV;
1567  defm vrgather_vx : RISCVRGatherVX;
1568  defm vrgatherei16_vv : RISCVRGatherEI16VV;
1569
1570  def "int_riscv_vcompress" : RISCVCompress;
1571
1572  defm vaaddu : RISCVSaturatingBinaryAAXRoundingMode;
1573  defm vaadd : RISCVSaturatingBinaryAAXRoundingMode;
1574  defm vasubu : RISCVSaturatingBinaryAAXRoundingMode;
1575  defm vasub : RISCVSaturatingBinaryAAXRoundingMode;
1576
1577  defm vsmul : RISCVSaturatingBinaryAAXRoundingMode;
1578
1579  defm vssrl : RISCVSaturatingBinaryAAShiftRoundingMode;
1580  defm vssra : RISCVSaturatingBinaryAAShiftRoundingMode;
1581
1582  defm vnclipu : RISCVSaturatingBinaryABShiftRoundingMode;
1583  defm vnclip : RISCVSaturatingBinaryABShiftRoundingMode;
1584
1585  defm vmfeq : RISCVCompare;
1586  defm vmfne : RISCVCompare;
1587  defm vmflt : RISCVCompare;
1588  defm vmfle : RISCVCompare;
1589  defm vmfgt : RISCVCompare;
1590  defm vmfge : RISCVCompare;
1591
1592  defm vredsum : RISCVReduction;
1593  defm vredand : RISCVReduction;
1594  defm vredor : RISCVReduction;
1595  defm vredxor : RISCVReduction;
1596  defm vredminu : RISCVReduction;
1597  defm vredmin : RISCVReduction;
1598  defm vredmaxu : RISCVReduction;
1599  defm vredmax : RISCVReduction;
1600
1601  defm vwredsumu : RISCVReduction;
1602  defm vwredsum : RISCVReduction;
1603
1604  defm vfredosum : RISCVReductionRoundingMode;
1605  defm vfredusum : RISCVReductionRoundingMode;
1606  defm vfredmin : RISCVReduction;
1607  defm vfredmax : RISCVReduction;
1608
1609  defm vfwredusum : RISCVReductionRoundingMode;
1610  defm vfwredosum : RISCVReductionRoundingMode;
1611
1612  def int_riscv_vmand: RISCVBinaryAAAUnMasked;
1613  def int_riscv_vmnand: RISCVBinaryAAAUnMasked;
1614  def int_riscv_vmandn: RISCVBinaryAAAUnMasked;
1615  def int_riscv_vmxor: RISCVBinaryAAAUnMasked;
1616  def int_riscv_vmor: RISCVBinaryAAAUnMasked;
1617  def int_riscv_vmnor: RISCVBinaryAAAUnMasked;
1618  def int_riscv_vmorn: RISCVBinaryAAAUnMasked;
1619  def int_riscv_vmxnor: RISCVBinaryAAAUnMasked;
1620  def int_riscv_vmclr : RISCVNullaryIntrinsic;
1621  def int_riscv_vmset : RISCVNullaryIntrinsic;
1622
1623  defm vcpop : RISCVMaskedUnarySOut;
1624  defm vfirst : RISCVMaskedUnarySOut;
1625  defm vmsbf : RISCVMaskedUnaryMOut;
1626  defm vmsof : RISCVMaskedUnaryMOut;
1627  defm vmsif : RISCVMaskedUnaryMOut;
1628
1629  defm vfcvt_xu_f_v : RISCVConversionRoundingMode;
1630  defm vfcvt_x_f_v : RISCVConversionRoundingMode;
1631  defm vfcvt_rtz_xu_f_v : RISCVConversion;
1632  defm vfcvt_rtz_x_f_v : RISCVConversion;
1633  defm vfcvt_f_xu_v : RISCVConversionRoundingMode;
1634  defm vfcvt_f_x_v : RISCVConversionRoundingMode;
1635
1636  defm vfwcvt_f_xu_v : RISCVConversion;
1637  defm vfwcvt_f_x_v : RISCVConversion;
1638  defm vfwcvt_xu_f_v : RISCVConversionRoundingMode;
1639  defm vfwcvt_x_f_v : RISCVConversionRoundingMode;
1640  defm vfwcvt_rtz_xu_f_v : RISCVConversion;
1641  defm vfwcvt_rtz_x_f_v : RISCVConversion;
1642  defm vfwcvt_f_f_v : RISCVConversion;
1643
1644  defm vfncvt_f_xu_w : RISCVConversionRoundingMode;
1645  defm vfncvt_f_x_w : RISCVConversionRoundingMode;
1646  defm vfncvt_xu_f_w : RISCVConversionRoundingMode;
1647  defm vfncvt_x_f_w : RISCVConversionRoundingMode;
1648  defm vfncvt_rtz_xu_f_w : RISCVConversion;
1649  defm vfncvt_rtz_x_f_w : RISCVConversion;
1650  defm vfncvt_f_f_w : RISCVConversionRoundingMode;
1651  defm vfncvt_rod_f_f_w : RISCVConversion;
1652
1653  // Output: (vector)
1654  // Input: (passthru, mask type input, vl)
1655  def int_riscv_viota
1656        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1657                                [LLVMMatchType<0>,
1658                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1659                                 llvm_anyint_ty],
1660                                [IntrNoMem]>, RISCVVIntrinsic {
1661    let VLOperand = 2;
1662  }
1663  // Output: (vector)
1664  // Input: (maskedoff, mask type vector_in, mask, vl, policy)
1665  def int_riscv_viota_mask
1666        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1667                                [LLVMMatchType<0>,
1668                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1669                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1670                                 llvm_anyint_ty, LLVMMatchType<1>],
1671                                [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
1672    let VLOperand = 3;
1673  }
1674  // Output: (vector)
1675  // Input: (passthru, vl)
1676  def int_riscv_vid : RISCVID;
1677
1678  // Output: (vector)
1679  // Input: (maskedoff, mask, vl, policy)
1680  def int_riscv_vid_mask
1681        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1682                                [LLVMMatchType<0>,
1683                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1684                                 llvm_anyint_ty, LLVMMatchType<1>],
1685                                [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
1686    let VLOperand = 2;
1687  }
1688
1689  foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
1690    defm vlseg # nf : RISCVUSSegLoad<nf>;
1691    defm vlseg # nf # ff : RISCVUSSegLoadFF<nf>;
1692    defm vlsseg # nf : RISCVSSegLoad<nf>;
1693    defm vloxseg # nf : RISCVISegLoad<nf>;
1694    defm vluxseg # nf : RISCVISegLoad<nf>;
1695    defm vsseg # nf : RISCVUSSegStore<nf>;
1696    defm vssseg # nf : RISCVSSegStore<nf>;
1697    defm vsoxseg # nf : RISCVISegStore<nf>;
1698    defm vsuxseg # nf : RISCVISegStore<nf>;
1699  }
1700
1701  // Strided loads/stores for fixed vectors.
1702  def int_riscv_masked_strided_load
1703        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1704                                [LLVMMatchType<0>, llvm_anyptr_ty,
1705                                 llvm_anyint_ty,
1706                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
1707                                [NoCapture<ArgIndex<1>>, IntrReadMem]>;
1708  def int_riscv_masked_strided_store
1709        : DefaultAttrsIntrinsic<[],
1710                                [llvm_anyvector_ty, llvm_anyptr_ty,
1711                                 llvm_anyint_ty,
1712                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
1713                                [NoCapture<ArgIndex<1>>, IntrWriteMem]>;
1714
1715  // Segment loads/stores for fixed vectors.
1716  foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
1717    def int_riscv_seg # nf # _load
1718          : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty],
1719                                              !listsplat(LLVMMatchType<0>,
1720                                              !add(nf, -1))),
1721                                  [llvm_anyptr_ty, llvm_anyint_ty],
1722                                  [NoCapture<ArgIndex<0>>, IntrReadMem]>;
1723    def int_riscv_seg # nf # _store
1724          : DefaultAttrsIntrinsic<[],
1725                                  !listconcat([llvm_anyvector_ty],
1726                                              !listsplat(LLVMMatchType<0>,
1727                                                          !add(nf, -1)),
1728                                              [llvm_anyptr_ty, llvm_anyint_ty]),
1729                                  [NoCapture<ArgIndex<nf>>, IntrWriteMem]>;
1730  }
1731
1732} // TargetPrefix = "riscv"
1733
1734//===----------------------------------------------------------------------===//
1735// Scalar Cryptography
1736//
1737// These intrinsics will lower directly into the corresponding instructions
1738// added by the scalar cyptography extension, if the extension is present.
1739
1740let TargetPrefix = "riscv" in {
1741
1742class ScalarCryptoByteSelect32
1743    : DefaultAttrsIntrinsic<[llvm_i32_ty],
1744                            [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1745                            [IntrNoMem, IntrSpeculatable,
1746                             ImmArg<ArgIndex<2>>]>;
1747
1748class ScalarCryptoGprGprIntrinsic32
1749    : DefaultAttrsIntrinsic<[llvm_i32_ty],
1750                            [llvm_i32_ty, llvm_i32_ty],
1751                            [IntrNoMem, IntrSpeculatable]>;
1752
1753class ScalarCryptoGprGprIntrinsic64
1754    : DefaultAttrsIntrinsic<[llvm_i64_ty],
1755                            [llvm_i64_ty, llvm_i64_ty],
1756                            [IntrNoMem, IntrSpeculatable]>;
1757
1758class ScalarCryptoGprIntrinsic32
1759    : DefaultAttrsIntrinsic<[llvm_i32_ty],
1760                            [llvm_i32_ty],
1761                            [IntrNoMem, IntrSpeculatable]>;
1762
1763class ScalarCryptoGprIntrinsic64
1764    : DefaultAttrsIntrinsic<[llvm_i64_ty],
1765                            [llvm_i64_ty],
1766                            [IntrNoMem, IntrSpeculatable]>;
1767
1768// Zknd
1769def int_riscv_aes32dsi  : ScalarCryptoByteSelect32,
1770                          ClangBuiltin<"__builtin_riscv_aes32dsi">;
1771def int_riscv_aes32dsmi : ScalarCryptoByteSelect32,
1772                          ClangBuiltin<"__builtin_riscv_aes32dsmi">;
1773
1774def int_riscv_aes64ds   : ScalarCryptoGprGprIntrinsic64,
1775                          ClangBuiltin<"__builtin_riscv_aes64ds">;
1776def int_riscv_aes64dsm  : ScalarCryptoGprGprIntrinsic64,
1777                          ClangBuiltin<"__builtin_riscv_aes64dsm">;
1778
1779def int_riscv_aes64im   : ScalarCryptoGprIntrinsic64,
1780                          ClangBuiltin<"__builtin_riscv_aes64im">;
1781
1782// Zkne
1783def int_riscv_aes32esi  : ScalarCryptoByteSelect32,
1784                          ClangBuiltin<"__builtin_riscv_aes32esi">;
1785def int_riscv_aes32esmi : ScalarCryptoByteSelect32,
1786                          ClangBuiltin<"__builtin_riscv_aes32esmi">;
1787
1788def int_riscv_aes64es   : ScalarCryptoGprGprIntrinsic64,
1789                          ClangBuiltin<"__builtin_riscv_aes64es">;
1790def int_riscv_aes64esm  : ScalarCryptoGprGprIntrinsic64,
1791                          ClangBuiltin<"__builtin_riscv_aes64esm">;
1792
1793// Zknd & Zkne
1794def int_riscv_aes64ks2  : ScalarCryptoGprGprIntrinsic64,
1795                          ClangBuiltin<"__builtin_riscv_aes64ks2">;
1796def int_riscv_aes64ks1i : DefaultAttrsIntrinsic<[llvm_i64_ty],
1797                                                [llvm_i64_ty, llvm_i32_ty],
1798                                                [IntrNoMem, IntrSpeculatable,
1799                                                 ImmArg<ArgIndex<1>>]>,
1800                          ClangBuiltin<"__builtin_riscv_aes64ks1i">;
1801
1802// Zknh
1803def int_riscv_sha256sig0 : ScalarCryptoGprIntrinsic32;
1804def int_riscv_sha256sig1 : ScalarCryptoGprIntrinsic32;
1805def int_riscv_sha256sum0 : ScalarCryptoGprIntrinsic32;
1806def int_riscv_sha256sum1 : ScalarCryptoGprIntrinsic32;
1807
1808def int_riscv_sha512sig0l : ScalarCryptoGprGprIntrinsic32,
1809                            ClangBuiltin<"__builtin_riscv_sha512sig0l">;
1810def int_riscv_sha512sig0h : ScalarCryptoGprGprIntrinsic32,
1811                            ClangBuiltin<"__builtin_riscv_sha512sig0h">;
1812def int_riscv_sha512sig1l : ScalarCryptoGprGprIntrinsic32,
1813                            ClangBuiltin<"__builtin_riscv_sha512sig1l">;
1814def int_riscv_sha512sig1h : ScalarCryptoGprGprIntrinsic32,
1815                            ClangBuiltin<"__builtin_riscv_sha512sig1h">;
1816def int_riscv_sha512sum0r : ScalarCryptoGprGprIntrinsic32,
1817                            ClangBuiltin<"__builtin_riscv_sha512sum0r">;
1818def int_riscv_sha512sum1r : ScalarCryptoGprGprIntrinsic32,
1819                            ClangBuiltin<"__builtin_riscv_sha512sum1r">;
1820
1821def int_riscv_sha512sig0 : ScalarCryptoGprIntrinsic64,
1822                           ClangBuiltin<"__builtin_riscv_sha512sig0">;
1823def int_riscv_sha512sig1 : ScalarCryptoGprIntrinsic64,
1824                           ClangBuiltin<"__builtin_riscv_sha512sig1">;
1825def int_riscv_sha512sum0 : ScalarCryptoGprIntrinsic64,
1826                           ClangBuiltin<"__builtin_riscv_sha512sum0">;
1827def int_riscv_sha512sum1 : ScalarCryptoGprIntrinsic64,
1828                           ClangBuiltin<"__builtin_riscv_sha512sum1">;
1829
1830// Zksed
1831def int_riscv_sm4ks      : ScalarCryptoByteSelect32;
1832def int_riscv_sm4ed      : ScalarCryptoByteSelect32;
1833
1834// Zksh
1835def int_riscv_sm3p0      : ScalarCryptoGprIntrinsic32;
1836def int_riscv_sm3p1      : ScalarCryptoGprIntrinsic32;
1837} // TargetPrefix = "riscv"
1838
1839//===----------------------------------------------------------------------===//
1840// Vendor extensions
1841//===----------------------------------------------------------------------===//
1842include "llvm/IR/IntrinsicsRISCVXTHead.td"
1843include "llvm/IR/IntrinsicsRISCVXsf.td"
1844