1//===- IntrinsicsRISCV.td - Defines RISCV intrinsics -------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines all of the RISCV-specific intrinsics.
10//
11//===----------------------------------------------------------------------===//
12
13//===----------------------------------------------------------------------===//
14// Atomics
15
16// Atomic Intrinsics have multiple versions for different access widths, which
17// all follow one of the following signatures (depending on how many arguments
18// they require). We carefully instantiate only specific versions of these for
19// specific integer widths, rather than using `llvm_anyint_ty`.
20//
21// In fact, as these intrinsics take `llvm_anyptr_ty`, the given names are the
22// canonical names, and the intrinsics used in the code will have a name
23// suffixed with the pointer type they are specialised for (denoted `<p>` in the
24// names below), in order to avoid type conflicts.
25
26let TargetPrefix = "riscv" in {
27
28  // T @llvm.<name>.T.<p>(any*, T, T, T imm);
29  class MaskedAtomicRMWFourArg<LLVMType itype>
30      : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype],
31                  [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<3>>]>;
32  // T @llvm.<name>.T.<p>(any*, T, T, T, T imm);
33  class MaskedAtomicRMWFiveArg<LLVMType itype>
34      : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype, itype],
35                  [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<4>>]>;
36
37  // We define 32-bit and 64-bit variants of the above, where T stands for i32
38  // or i64 respectively:
39  multiclass MaskedAtomicRMWFourArgIntrinsics {
40    // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32 imm);
41    def _i32 : MaskedAtomicRMWFourArg<llvm_i32_ty>;
42    // i64 @llvm.<name>.i32.<p>(any*, i64, i64, i64 imm);
43    def _i64 : MaskedAtomicRMWFourArg<llvm_i64_ty>;
44  }
45
46  multiclass MaskedAtomicRMWFiveArgIntrinsics {
47    // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32, i32 imm);
48    def _i32 : MaskedAtomicRMWFiveArg<llvm_i32_ty>;
49    // i64 @llvm.<name>.i64.<p>(any*, i64, i64, i64, i64 imm);
50    def _i64 : MaskedAtomicRMWFiveArg<llvm_i64_ty>;
51  }
52
53  // These intrinsics are intended only for internal compiler use (i.e. as
54  // part of AtomicExpandpass via the emitMaskedAtomic*Intrinsic hooks). Their
55  // names and semantics could change in the future.
56
57  // @llvm.riscv.masked.atomicrmw.*.{i32,i64}.<p>(
58  //   ptr addr, ixlen oparg, ixlen mask, ixlenimm ordering)
59  defm int_riscv_masked_atomicrmw_xchg : MaskedAtomicRMWFourArgIntrinsics;
60  defm int_riscv_masked_atomicrmw_add : MaskedAtomicRMWFourArgIntrinsics;
61  defm int_riscv_masked_atomicrmw_sub : MaskedAtomicRMWFourArgIntrinsics;
62  defm int_riscv_masked_atomicrmw_nand : MaskedAtomicRMWFourArgIntrinsics;
63  defm int_riscv_masked_atomicrmw_umax : MaskedAtomicRMWFourArgIntrinsics;
64  defm int_riscv_masked_atomicrmw_umin : MaskedAtomicRMWFourArgIntrinsics;
65  // Signed min and max need an extra operand to do sign extension with.
66  // @llvm.riscv.masked.atomicrmw.{max,min}.{i32,i64}.<p>(
67  //   ptr addr, ixlen oparg, ixlen mask, ixlen shamt, ixlenimm ordering)
68  defm int_riscv_masked_atomicrmw_max : MaskedAtomicRMWFiveArgIntrinsics;
69  defm int_riscv_masked_atomicrmw_min : MaskedAtomicRMWFiveArgIntrinsics;
70
71  // @llvm.riscv.masked.cmpxchg.{i32,i64}.<p>(
72  //   ptr addr, ixlen cmpval, ixlen newval, ixlen mask, ixlenimm ordering)
73  defm int_riscv_masked_cmpxchg : MaskedAtomicRMWFiveArgIntrinsics;
74
75} // TargetPrefix = "riscv"
76
77//===----------------------------------------------------------------------===//
78// Bitmanip (Bit Manipulation) Extension
79
80let TargetPrefix = "riscv" in {
81
82  class BitManipGPRIntrinsics
83      : DefaultAttrsIntrinsic<[llvm_any_ty],
84                              [LLVMMatchType<0>],
85                              [IntrNoMem, IntrSpeculatable]>;
86  class BitManipGPRGPRIntrinsics
87      : DefaultAttrsIntrinsic<[llvm_any_ty],
88                              [LLVMMatchType<0>, LLVMMatchType<0>],
89                              [IntrNoMem, IntrSpeculatable]>;
90
91  // Zbb
92  def int_riscv_orc_b : BitManipGPRIntrinsics;
93
94  // Zbc or Zbkc
95  def int_riscv_clmul  : BitManipGPRGPRIntrinsics;
96  def int_riscv_clmulh : BitManipGPRGPRIntrinsics;
97
98  // Zbc
99  def int_riscv_clmulr : BitManipGPRGPRIntrinsics;
100
101  // Zbkb
102  def int_riscv_brev8 : BitManipGPRIntrinsics;
103  def int_riscv_zip   : BitManipGPRIntrinsics;
104  def int_riscv_unzip : BitManipGPRIntrinsics;
105
106  // Zbkx
107  def int_riscv_xperm4  : BitManipGPRGPRIntrinsics;
108  def int_riscv_xperm8  : BitManipGPRGPRIntrinsics;
109} // TargetPrefix = "riscv"
110
111//===----------------------------------------------------------------------===//
112// Vectors
113
114// The intrinsic does not have any operand that must be extended.
115defvar NoScalarOperand = 0xF;
116
117// The intrinsic does not have a VL operand.
118// (e.g., riscv_vmv_x_s and riscv_vfmv_f_s)
119defvar NoVLOperand = 0x1F;
120
121class RISCVVIntrinsic {
122  // These intrinsics may accept illegal integer values in their llvm_any_ty
123  // operand, so they have to be extended.
124  Intrinsic IntrinsicID = !cast<Intrinsic>(NAME);
125  bits<4> ScalarOperand = NoScalarOperand;
126  bits<5> VLOperand = NoVLOperand;
127}
128
129let TargetPrefix = "riscv" in {
130  // We use anyint here but we only support XLen.
131  def int_riscv_vsetvli   : Intrinsic<[llvm_anyint_ty],
132                           /* AVL */  [LLVMMatchType<0>,
133                           /* VSEW */  LLVMMatchType<0>,
134                           /* VLMUL */ LLVMMatchType<0>],
135                                      [IntrNoMem,
136                                       ImmArg<ArgIndex<1>>,
137                                       ImmArg<ArgIndex<2>>]>;
138  def int_riscv_vsetvlimax : Intrinsic<[llvm_anyint_ty],
139                            /* VSEW */ [LLVMMatchType<0>,
140                            /* VLMUL */ LLVMMatchType<0>],
141                                      [IntrNoMem,
142                                       ImmArg<ArgIndex<0>>,
143                                       ImmArg<ArgIndex<1>>]>;
144
145  // For unit stride mask load
146  // Input: (pointer, vl)
147  class RISCVUSMLoad
148        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
149                    [llvm_ptr_ty, llvm_anyint_ty],
150                    [NoCapture<ArgIndex<0>>, IntrReadMem, IntrArgMemOnly]>,
151          RISCVVIntrinsic {
152    let VLOperand = 1;
153  }
154  // For unit stride load
155  // Input: (passthru, pointer, vl)
156  class RISCVUSLoad
157        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
158                    [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyint_ty],
159                    [NoCapture<ArgIndex<1>>, IntrReadMem, IntrArgMemOnly]>,
160          RISCVVIntrinsic {
161    let VLOperand = 2;
162  }
163  // For unit stride fault-only-first load
164  // Input: (passthru, pointer, vl)
165  // Output: (data, vl)
166  // NOTE: We model this with default memory properties since we model writing
167  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
168  class RISCVUSLoadFF
169        : DefaultAttrsIntrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
170                    [LLVMMatchType<0>, llvm_ptr_ty, LLVMMatchType<1>],
171                    [NoCapture<ArgIndex<1>>]>,
172                    RISCVVIntrinsic {
173    let VLOperand = 2;
174  }
175  // For unit stride load with mask
176  // Input: (maskedoff, pointer, mask, vl, policy)
177  class RISCVUSLoadMasked
178        : DefaultAttrsIntrinsic<[llvm_anyvector_ty ],
179                    [LLVMMatchType<0>, llvm_ptr_ty,
180                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
181                     llvm_anyint_ty, LLVMMatchType<1>],
182                    [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>, IntrReadMem,
183                     IntrArgMemOnly]>,
184                    RISCVVIntrinsic {
185    let VLOperand = 3;
186  }
187  // For unit stride fault-only-first load with mask
188  // Input: (maskedoff, pointer, mask, vl, policy)
189  // Output: (data, vl)
190  // NOTE: We model this with default memory properties since we model writing
191  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
192  class RISCVUSLoadFFMasked
193        : DefaultAttrsIntrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
194                    [LLVMMatchType<0>, llvm_ptr_ty,
195                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
196                     LLVMMatchType<1>, LLVMMatchType<1>],
197                    [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic {
198    let VLOperand = 3;
199  }
200  // For strided load with passthru operand
201  // Input: (passthru, pointer, stride, vl)
202  class RISCVSLoad
203        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
204                    [LLVMMatchType<0>, llvm_ptr_ty,
205                     llvm_anyint_ty, LLVMMatchType<1>],
206                    [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
207    let VLOperand = 3;
208  }
209  // For strided load with mask
210  // Input: (maskedoff, pointer, stride, mask, vl, policy)
211  class RISCVSLoadMasked
212        : DefaultAttrsIntrinsic<[llvm_anyvector_ty ],
213                    [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyint_ty,
214                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>,
215                     LLVMMatchType<1>],
216                    [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>,
217                    RISCVVIntrinsic {
218    let VLOperand = 4;
219  }
220  // For indexed load with passthru operand
221  // Input: (passthru, pointer, index, vl)
222  class RISCVILoad
223        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
224                    [LLVMMatchType<0>, llvm_ptr_ty,
225                     llvm_anyvector_ty, llvm_anyint_ty],
226                    [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
227    let VLOperand = 3;
228  }
229  // For indexed load with mask
230  // Input: (maskedoff, pointer, index, mask, vl, policy)
231  class RISCVILoadMasked
232        : DefaultAttrsIntrinsic<[llvm_anyvector_ty ],
233                    [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyvector_ty,
234                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
235                     LLVMMatchType<2>],
236                    [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>,
237                    RISCVVIntrinsic {
238    let VLOperand = 4;
239  }
240  // For unit stride store
241  // Input: (vector_in, pointer, vl)
242  class RISCVUSStore
243        : DefaultAttrsIntrinsic<[],
244                    [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyint_ty],
245                    [NoCapture<ArgIndex<1>>, IntrWriteMem, IntrArgMemOnly]>,
246          RISCVVIntrinsic {
247    let VLOperand = 2;
248  }
249  // For unit stride store with mask
250  // Input: (vector_in, pointer, mask, vl)
251  class RISCVUSStoreMasked
252        : DefaultAttrsIntrinsic<[],
253                    [llvm_anyvector_ty, llvm_ptr_ty,
254                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
255                     llvm_anyint_ty],
256                    [NoCapture<ArgIndex<1>>, IntrWriteMem, IntrArgMemOnly]>,
257          RISCVVIntrinsic {
258    let VLOperand = 3;
259  }
260  // For strided store
261  // Input: (vector_in, pointer, stride, vl)
262  class RISCVSStore
263        : DefaultAttrsIntrinsic<[],
264                    [llvm_anyvector_ty, llvm_ptr_ty,
265                     llvm_anyint_ty, LLVMMatchType<1>],
266                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
267    let VLOperand = 3;
268  }
269  // For stride store with mask
270  // Input: (vector_in, pointer, stirde, mask, vl)
271  class RISCVSStoreMasked
272        : DefaultAttrsIntrinsic<[],
273                    [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyint_ty,
274                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
275                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
276    let VLOperand = 4;
277  }
278  // For indexed store
279  // Input: (vector_in, pointer, index, vl)
280  class RISCVIStore
281        : DefaultAttrsIntrinsic<[],
282                    [llvm_anyvector_ty, llvm_ptr_ty,
283                     llvm_anyint_ty, llvm_anyint_ty],
284                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
285    let VLOperand = 3;
286  }
287  // For indexed store with mask
288  // Input: (vector_in, pointer, index, mask, vl)
289  class RISCVIStoreMasked
290        : DefaultAttrsIntrinsic<[],
291                    [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyvector_ty,
292                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
293                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
294    let VLOperand = 4;
295  }
296  // For destination vector type is the same as source vector.
297  // Input: (passthru, vector_in, vl)
298  class RISCVUnaryAAUnMasked
299        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
300                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
301                    [IntrNoMem]>, RISCVVIntrinsic {
302    let VLOperand = 2;
303  }
304  // For destination vector type is the same as the source vector type
305  // Input: (passthru, vector_in, vl, policy)
306  class RISCVUnaryAAUnMaskedZvk<bit IsVS>
307        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
308                    [LLVMMatchType<0>, !if(IsVS, llvm_anyvector_ty, LLVMMatchType<0>),
309                     llvm_anyint_ty, !if(IsVS, LLVMMatchType<2>, LLVMMatchType<1>)],
310                    [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
311    let VLOperand = 2;
312  }
313
314  multiclass RISCVUnaryAAUnMaskedZvk<bit HasVV = 1, bit HasVS = 1> {
315    if HasVV then
316      def "int_riscv_" # NAME # "_vv" : RISCVUnaryAAUnMaskedZvk<IsVS=0>;
317
318    if HasVS then
319      def "int_riscv_" # NAME # "_vs" : RISCVUnaryAAUnMaskedZvk<IsVS=1>;
320  }
321  // For destination vector type is the same as first source vector (with mask).
322  // Input: (vector_in, vector_in, mask, vl, policy)
323  class RISCVUnaryAAMasked
324        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
325                    [LLVMMatchType<0>, LLVMMatchType<0>,
326                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
327                     LLVMMatchType<1>],
328                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
329    let VLOperand = 3;
330  }
331  // For destination vector type is the same as source vector.
332  // Input: (passthru, vector_in, frm, vl)
333  class RISCVUnaryAAUnMaskedRoundingMode
334        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
335                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, LLVMMatchType<1>],
336                    [ImmArg<ArgIndex<2>>, IntrNoMem]>, RISCVVIntrinsic {
337    let VLOperand = 3;
338  }
339  // For destination vector type is the same as first source vector (with mask).
340  // Input: (vector_in, vector_in, mask, frm, vl, policy)
341  class RISCVUnaryAAMaskedRoundingMode
342        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
343                    [LLVMMatchType<0>, LLVMMatchType<0>,
344                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
345                     LLVMMatchType<1>, LLVMMatchType<1>],
346                    [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
347    let VLOperand = 4;
348  }
349  // Input: (passthru, vector_in, vector_in, mask, vl)
350  class RISCVCompress
351        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
352                    [LLVMMatchType<0>, LLVMMatchType<0>,
353                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
354                    [IntrNoMem]>, RISCVVIntrinsic {
355    let VLOperand = 3;
356  }
357  // For destination vector type is the same as first and second source vector.
358  // Input: (vector_in, vector_in, vl)
359  class RISCVBinaryAAAUnMasked
360        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
361                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
362                    [IntrNoMem]>, RISCVVIntrinsic {
363    let VLOperand = 2;
364  }
365  // For destination vector type is the same as first and second source vector.
366  // Input: (passthru, vector_in, int_vector_in, vl)
367  class RISCVRGatherVVUnMasked
368        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
369                    [LLVMMatchType<0>, LLVMMatchType<0>,
370                     LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty],
371                    [IntrNoMem]>, RISCVVIntrinsic {
372    let VLOperand = 3;
373  }
374  // For destination vector type is the same as first and second source vector.
375  // Input: (vector_in, vector_in, int_vector_in, vl, policy)
376  class RISCVRGatherVVMasked
377        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
378                    [LLVMMatchType<0>, LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>,
379                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
380                     LLVMMatchType<1>],
381                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
382    let VLOperand = 4;
383  }
384  // Input: (passthru, vector_in, int16_vector_in, vl)
385  class RISCVRGatherEI16VVUnMasked
386        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
387                    [LLVMMatchType<0>, LLVMMatchType<0>,
388                     LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
389                     llvm_anyint_ty],
390                    [IntrNoMem]>, RISCVVIntrinsic {
391    let VLOperand = 3;
392  }
393  // For destination vector type is the same as first and second source vector.
394  // Input: (vector_in, vector_in, int16_vector_in, vl, policy)
395  class RISCVRGatherEI16VVMasked
396        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
397                    [LLVMMatchType<0>, LLVMMatchType<0>,
398                     LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
399                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
400                     LLVMMatchType<1>],
401                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
402    let VLOperand = 4;
403  }
404  // For destination vector type is the same as first source vector, and the
405  // second operand is XLen.
406  // Input: (passthru, vector_in, xlen_in, vl)
407  class RISCVGatherVXUnMasked
408        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
409                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
410                     LLVMMatchType<1>],
411                    [IntrNoMem]>, RISCVVIntrinsic {
412    let VLOperand = 3;
413  }
414  // For destination vector type is the same as first source vector (with mask).
415  // Second operand is XLen.
416  // Input: (maskedoff, vector_in, xlen_in, mask, vl, policy)
417  class RISCVGatherVXMasked
418       : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
419                   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
420                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>,
421                    LLVMMatchType<1>],
422                   [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
423    let VLOperand = 4;
424  }
425  // For destination vector type is the same as first source vector.
426  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
427  class RISCVBinaryAAXUnMasked<bit IsVI = 0>
428        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
429                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
430                     llvm_anyint_ty],
431                    !listconcat([IntrNoMem],
432                                !if(IsVI, [ImmArg<ArgIndex<2>>], []))>,
433                    RISCVVIntrinsic {
434    let ScalarOperand = 2;
435    let VLOperand = 3;
436  }
437  // For destination vector type is the same as the source vector type.
438  // Input: (passthru, vector_in, vector_in/scalar_in, vl, policy)
439  class RISCVBinaryAAXUnMaskedZvk<bit IsVI = 0>
440        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
441                                [LLVMMatchType<0>, LLVMMatchType<0>,
442                                 llvm_any_ty, llvm_anyint_ty, LLVMMatchType<2>],
443                                !listconcat([ImmArg<ArgIndex<4>>, IntrNoMem],
444                                            !if(IsVI, [ImmArg<ArgIndex<2>>], []))>,
445                                RISCVVIntrinsic {
446    let ScalarOperand = 2;
447    let VLOperand = 3;
448  }
449  // For destination vector type is the same as first source vector (with mask).
450  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
451  class RISCVBinaryAAXMasked
452       : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
453                   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
454                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
455                    LLVMMatchType<2>],
456                   [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
457    let ScalarOperand = 2;
458    let VLOperand = 4;
459  }
460  // For destination vector type is the same as first source vector.
461  // Input: (passthru, vector_in, vector_in/scalar_in, frm, vl)
462  class RISCVBinaryAAXUnMaskedRoundingMode
463        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
464                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
465                     llvm_anyint_ty, LLVMMatchType<2>],
466                    [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
467    let ScalarOperand = 2;
468    let VLOperand = 4;
469  }
470  // For destination vector type is the same as first source vector (with mask).
471  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
472  class RISCVBinaryAAXMaskedRoundingMode
473       : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
474                   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
475                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
476                    LLVMMatchType<2>, LLVMMatchType<2>],
477                   [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic {
478    let ScalarOperand = 2;
479    let VLOperand = 5;
480  }
481  // For destination vector type is the same as first source vector. The
482  // second source operand must match the destination type or be an XLen scalar.
483  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
484  class RISCVBinaryAAShiftUnMasked
485        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
486                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
487                     llvm_anyint_ty],
488                    [IntrNoMem]>, RISCVVIntrinsic {
489    let VLOperand = 3;
490  }
491  // For destination vector type is the same as first source vector (with mask).
492  // The second source operand must match the destination type or be an XLen scalar.
493  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
494  class RISCVBinaryAAShiftMasked
495       : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
496                   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
497                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
498                    LLVMMatchType<2>],
499                   [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
500    let VLOperand = 4;
501  }
502  // For destination vector type is NOT the same as first source vector.
503  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
504  class RISCVBinaryABXUnMasked
505        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
506                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
507                     llvm_anyint_ty],
508                    [IntrNoMem]>, RISCVVIntrinsic {
509    let ScalarOperand = 2;
510    let VLOperand = 3;
511  }
512  // For destination vector type is NOT the same as first source vector (with mask).
513  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
514  class RISCVBinaryABXMasked
515        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
516                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
517                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
518                     LLVMMatchType<3>],
519                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
520    let ScalarOperand = 2;
521    let VLOperand = 4;
522  }
523  // For destination vector type is NOT the same as first source vector.
524  // Input: (passthru, vector_in, vector_in/scalar_in, frm, vl)
525  class RISCVBinaryABXUnMaskedRoundingMode
526        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
527                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
528                     llvm_anyint_ty, LLVMMatchType<3>],
529                    [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
530    let ScalarOperand = 2;
531    let VLOperand = 4;
532  }
533  // For destination vector type is NOT the same as first source vector (with mask).
534  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
535  class RISCVBinaryABXMaskedRoundingMode
536        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
537                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
538                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
539                     LLVMMatchType<3>, LLVMMatchType<3>],
540                    [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic {
541    let ScalarOperand = 2;
542    let VLOperand = 5;
543  }
544  // For destination vector type is NOT the same as first source vector. The
545  // second source operand must match the destination type or be an XLen scalar.
546  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
547  class RISCVBinaryABShiftUnMasked
548        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
549                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
550                     llvm_anyint_ty],
551                    [IntrNoMem]>, RISCVVIntrinsic {
552    let VLOperand = 3;
553  }
554  // For destination vector type is NOT the same as first source vector (with mask).
555  // The second source operand must match the destination type or be an XLen scalar.
556  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
557  class RISCVBinaryABShiftMasked
558        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
559                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
560                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
561                     LLVMMatchType<3>],
562                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
563    let VLOperand = 4;
564  }
565  // For binary operations with V0 as input.
566  // Input: (passthru, vector_in, vector_in/scalar_in, V0, vl)
567  class RISCVBinaryWithV0
568        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
569                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
570                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
571                     llvm_anyint_ty],
572                    [IntrNoMem]>, RISCVVIntrinsic {
573    let ScalarOperand = 2;
574    let VLOperand = 4;
575  }
576  // For binary operations with mask type output and V0 as input.
577  // Output: (mask type output)
578  // Input: (vector_in, vector_in/scalar_in, V0, vl)
579  class RISCVBinaryMOutWithV0
580        :DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
581                   [llvm_anyvector_ty, llvm_any_ty,
582                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
583                    llvm_anyint_ty],
584                   [IntrNoMem]>, RISCVVIntrinsic {
585    let ScalarOperand = 1;
586    let VLOperand = 3;
587  }
588  // For binary operations with mask type output.
589  // Output: (mask type output)
590  // Input: (vector_in, vector_in/scalar_in, vl)
591  class RISCVBinaryMOut
592        : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
593                    [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
594                    [IntrNoMem]>, RISCVVIntrinsic {
595    let ScalarOperand = 1;
596    let VLOperand = 2;
597  }
598  // For binary operations with mask type output without mask.
599  // Output: (mask type output)
600  // Input: (vector_in, vector_in/scalar_in, vl)
601  class RISCVCompareUnMasked
602        : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
603                    [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
604                    [IntrNoMem]>, RISCVVIntrinsic {
605    let ScalarOperand = 1;
606    let VLOperand = 2;
607  }
608  // For binary operations with mask type output with mask.
609  // Output: (mask type output)
610  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
611  class RISCVCompareMasked
612        : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
613                    [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
614                     llvm_anyvector_ty, llvm_any_ty,
615                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
616                    [IntrNoMem]>, RISCVVIntrinsic {
617    let ScalarOperand = 2;
618    let VLOperand = 4;
619  }
620  // For FP classify operations.
621  // Output: (bit mask type output)
622  // Input: (passthru, vector_in, vl)
623  class RISCVClassifyUnMasked
624        : DefaultAttrsIntrinsic<[LLVMVectorOfBitcastsToInt<0>],
625                    [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty,
626                      llvm_anyint_ty],
627                    [IntrNoMem]>, RISCVVIntrinsic {
628    let VLOperand = 1;
629  }
630  // For FP classify operations with mask.
631  // Output: (bit mask type output)
632  // Input: (maskedoff, vector_in, mask, vl, policy)
633  class RISCVClassifyMasked
634        : DefaultAttrsIntrinsic<[LLVMVectorOfBitcastsToInt<0>],
635                    [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty,
636                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
637                     llvm_anyint_ty, LLVMMatchType<1>],
638                    [IntrNoMem, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic {
639    let VLOperand = 3;
640  }
641  // For Saturating binary operations.
642  // The destination vector type is the same as first source vector.
643  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
644  class RISCVSaturatingBinaryAAXUnMasked
645        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
646                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
647                     llvm_anyint_ty],
648                    [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
649    let ScalarOperand = 2;
650    let VLOperand = 3;
651  }
652  // For Saturating binary operations with rounding-mode operand
653  // The destination vector type is the same as first source vector.
654  // Input: (passthru, vector_in, vector_in/scalar_in, vxrm, vl)
655  class RISCVSaturatingBinaryAAXUnMaskedRoundingMode
656        : Intrinsic<[llvm_anyvector_ty],
657                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
658                     llvm_anyint_ty, LLVMMatchType<2>],
659                    [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
660    let ScalarOperand = 2;
661    let VLOperand = 4;
662  }
663  // For Saturating binary operations with mask.
664  // The destination vector type is the same as first source vector.
665  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
666  class RISCVSaturatingBinaryAAXMasked
667        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
668                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
669                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
670                     LLVMMatchType<2>],
671                    [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
672    let ScalarOperand = 2;
673    let VLOperand = 4;
674  }
675  // For Saturating binary operations with mask and rounding-mode operand
676  // The destination vector type is the same as first source vector.
677  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
678  class RISCVSaturatingBinaryAAXMaskedRoundingMode
679        : Intrinsic<[llvm_anyvector_ty],
680                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
681                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
682                     LLVMMatchType<2>, LLVMMatchType<2>],
683                    [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic {
684    let ScalarOperand = 2;
685    let VLOperand = 5;
686  }
687  // For Saturating binary operations.
688  // The destination vector type is the same as first source vector.
689  // The second source operand matches the destination type or is an XLen scalar.
690  // Input: (passthru, vector_in, vector_in/scalar_in, vxrm, vl)
691  class RISCVSaturatingBinaryAAShiftUnMaskedRoundingMode
692        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
693                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
694                     llvm_anyint_ty, LLVMMatchType<2>],
695                    [ImmArg<ArgIndex<3>>, IntrNoMem, IntrHasSideEffects]>,
696                    RISCVVIntrinsic {
697    let VLOperand = 4;
698  }
699  // For Saturating binary operations with mask.
700  // The destination vector type is the same as first source vector.
701  // The second source operand matches the destination type or is an XLen scalar.
702  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
703  class RISCVSaturatingBinaryAAShiftMaskedRoundingMode
704        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
705                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
706                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
707                     LLVMMatchType<2>, LLVMMatchType<2>],
708                    [ImmArg<ArgIndex<4>>,ImmArg<ArgIndex<6>>, IntrNoMem, IntrHasSideEffects]>,
709                    RISCVVIntrinsic {
710    let VLOperand = 5;
711  }
712  // For Saturating binary operations.
713  // The destination vector type is NOT the same as first source vector.
714  // The second source operand matches the destination type or is an XLen scalar.
715  // Input: (passthru, vector_in, vector_in/scalar_in, vxrm, vl)
716  class RISCVSaturatingBinaryABShiftUnMaskedRoundingMode
717        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
718                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
719                     llvm_anyint_ty, LLVMMatchType<3>],
720                    [ImmArg<ArgIndex<3>>, IntrNoMem, IntrHasSideEffects]>,
721                    RISCVVIntrinsic {
722    let VLOperand = 4;
723  }
724  // For Saturating binary operations with mask.
725  // The destination vector type is NOT the same as first source vector (with mask).
726  // The second source operand matches the destination type or is an XLen scalar.
727  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
728  class RISCVSaturatingBinaryABShiftMaskedRoundingMode
729        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
730                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
731                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
732                     LLVMMatchType<3>, LLVMMatchType<3>],
733                    [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem,
734                     IntrHasSideEffects]>, RISCVVIntrinsic {
735    let VLOperand = 5;
736  }
737  // Input: (vector_in, vector_in, scalar_in, vl, policy)
738  class RVVSlideUnMasked
739        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
740                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
741                     LLVMMatchType<1>, LLVMMatchType<1>],
742                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
743    let VLOperand = 3;
744  }
745  // Input: (vector_in, vector_in, vector_in/scalar_in, mask, vl, policy)
746  class RVVSlideMasked
747        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
748                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
749                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
750                     LLVMMatchType<1>, LLVMMatchType<1>],
751                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
752    let VLOperand = 4;
753  }
754  // UnMasked Vector Multiply-Add operations, its first operand can not be undef.
755  // Input: (vector_in, vector_in/scalar, vector_in, vl, policy)
756  class RISCVTernaryAAXAUnMasked
757        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
758                    [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
759                     llvm_anyint_ty, LLVMMatchType<2>],
760                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
761    let ScalarOperand = 1;
762    let VLOperand = 3;
763  }
764  // Masked Vector Multiply-Add operations, its first operand can not be undef.
765  // Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy
766  class RISCVTernaryAAXAMasked
767        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
768                    [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
769                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
770                     llvm_anyint_ty, LLVMMatchType<2>],
771                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
772    let ScalarOperand = 1;
773    let VLOperand = 4;
774  }
775  // UnMasked Vector Multiply-Add operations, its first operand can not be undef.
776  // Input: (vector_in, vector_in/scalar, vector_in, frm, vl, policy)
777  class RISCVTernaryAAXAUnMaskedRoundingMode
778        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
779                    [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
780                     llvm_anyint_ty, LLVMMatchType<2>, LLVMMatchType<2>],
781                    [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem]>,
782                    RISCVVIntrinsic {
783    let ScalarOperand = 1;
784    let VLOperand = 4;
785  }
786  // Masked Vector Multiply-Add operations, its first operand can not be undef.
787  // Input: (vector_in, vector_in/scalar, vector_in, mask, frm, vl, policy
788  class RISCVTernaryAAXAMaskedRoundingMode
789        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
790                    [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
791                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
792                     llvm_anyint_ty, LLVMMatchType<2>, LLVMMatchType<2>],
793                    [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>,
794                    RISCVVIntrinsic {
795    let ScalarOperand = 1;
796    let VLOperand = 5;
797  }
798  // UnMasked Widening Vector Multiply-Add operations, its first operand can not be undef.
799  // Input: (vector_in, vector_in/scalar, vector_in, vl, policy)
800  class RISCVTernaryWideUnMasked
801        : DefaultAttrsIntrinsic< [llvm_anyvector_ty],
802                     [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
803                      llvm_anyint_ty, LLVMMatchType<3>],
804                     [ImmArg<ArgIndex<4>>, IntrNoMem] >, RISCVVIntrinsic {
805    let ScalarOperand = 1;
806    let VLOperand = 3;
807  }
808  // Masked Widening Vector Multiply-Add operations, its first operand can not be undef.
809  // Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy
810  class RISCVTernaryWideMasked
811        : DefaultAttrsIntrinsic< [llvm_anyvector_ty],
812                     [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
813                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
814                      llvm_anyint_ty, LLVMMatchType<3>],
815                     [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
816    let ScalarOperand = 1;
817    let VLOperand = 4;
818  }
819  // UnMasked Widening Vector Multiply-Add operations, its first operand can not be undef.
820  // Input: (vector_in, vector_in/scalar, vector_in, frm, vl, policy)
821  class RISCVTernaryWideUnMaskedRoundingMode
822        : DefaultAttrsIntrinsic< [llvm_anyvector_ty],
823                     [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
824                      llvm_anyint_ty, LLVMMatchType<3>, LLVMMatchType<3>],
825                     [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem] >,
826                     RISCVVIntrinsic {
827    let ScalarOperand = 1;
828    let VLOperand = 4;
829  }
830  // Masked Widening Vector Multiply-Add operations, its first operand can not be undef.
831  // Input: (vector_in, vector_in/scalar, vector_in, mask, frm, vl, policy
832  class RISCVTernaryWideMaskedRoundingMode
833        : DefaultAttrsIntrinsic< [llvm_anyvector_ty],
834                     [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
835                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
836                      llvm_anyint_ty, LLVMMatchType<3>, LLVMMatchType<3>],
837                     [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>,
838                     RISCVVIntrinsic {
839    let ScalarOperand = 1;
840    let VLOperand = 5;
841  }
842  // For Reduction ternary operations.
843  // For destination vector type is the same as first and third source vector.
844  // Input: (vector_in, vector_in, vector_in, vl)
845  class RISCVReductionUnMasked
846        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
847                    [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
848                     llvm_anyint_ty],
849                    [IntrNoMem]>, RISCVVIntrinsic {
850    let VLOperand = 3;
851  }
852  // For Reduction ternary operations with mask.
853  // For destination vector type is the same as first and third source vector.
854  // The mask type come from second source vector.
855  // Input: (maskedoff, vector_in, vector_in, vector_in, mask, vl)
856  class RISCVReductionMasked
857        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
858                    [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
859                     LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty],
860                    [IntrNoMem]>, RISCVVIntrinsic {
861    let VLOperand = 4;
862  }
863  // For Reduction ternary operations.
864  // For destination vector type is the same as first and third source vector.
865  // Input: (vector_in, vector_in, vector_in, frm, vl)
866  class RISCVReductionUnMaskedRoundingMode
867        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
868                    [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
869                     llvm_anyint_ty, LLVMMatchType<2>],
870                    [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
871    let VLOperand = 4;
872  }
873  // For Reduction ternary operations with mask.
874  // For destination vector type is the same as first and third source vector.
875  // The mask type come from second source vector.
876  // Input: (vector_in, vector_in, vector_in, mask, frm, vl)
877  class RISCVReductionMaskedRoundingMode
878        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
879                    [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
880                     LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty,
881                     LLVMMatchType<2>],
882                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
883    let VLOperand = 5;
884  }
885  // For unary operations with scalar type output without mask
886  // Output: (scalar type)
887  // Input: (vector_in, vl)
888  class RISCVMaskedUnarySOutUnMasked
889        : DefaultAttrsIntrinsic<[LLVMMatchType<1>],
890                    [llvm_anyvector_ty, llvm_anyint_ty],
891                    [IntrNoMem]>, RISCVVIntrinsic {
892    let VLOperand = 1;
893  }
894  // For unary operations with scalar type output with mask
895  // Output: (scalar type)
896  // Input: (vector_in, mask, vl)
897  class RISCVMaskedUnarySOutMasked
898        : DefaultAttrsIntrinsic<[LLVMMatchType<1>],
899                    [llvm_anyvector_ty, LLVMMatchType<0>, llvm_anyint_ty],
900                    [IntrNoMem]>, RISCVVIntrinsic {
901    let VLOperand = 2;
902  }
903  // For destination vector type is NOT the same as source vector.
904  // Input: (passthru, vector_in, vl)
905  class RISCVUnaryABUnMasked
906        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
907                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty],
908                    [IntrNoMem]>, RISCVVIntrinsic {
909    let VLOperand = 2;
910  }
911  // For destination vector type is NOT the same as source vector (with mask).
912  // Input: (maskedoff, vector_in, mask, vl, policy)
913  class RISCVUnaryABMasked
914        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
915                    [LLVMMatchType<0>, llvm_anyvector_ty,
916                     LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>,
917                     llvm_anyint_ty, LLVMMatchType<2>],
918                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
919    let VLOperand = 3;
920  }
921  // For unary operations with the same vector type in/out without mask
922  // Output: (vector)
923  // Input: (vector_in, vl)
924  class RISCVUnaryUnMasked
925        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
926                    [LLVMMatchType<0>, llvm_anyint_ty],
927                    [IntrNoMem]>, RISCVVIntrinsic {
928    let VLOperand = 1;
929  }
930  // For mask unary operations with mask type in/out with mask
931  // Output: (mask type output)
932  // Input: (mask type maskedoff, mask type vector_in, mask, vl)
933  class RISCVMaskedUnaryMOutMasked
934        : DefaultAttrsIntrinsic<[llvm_anyint_ty],
935                    [LLVMMatchType<0>, LLVMMatchType<0>,
936                     LLVMMatchType<0>, llvm_anyint_ty],
937                    [IntrNoMem]>, RISCVVIntrinsic {
938    let VLOperand = 3;
939  }
940  // Output: (vector)
941  // Input: (vl)
942  class RISCVNullaryIntrinsic
943        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
944                    [llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic {
945    let VLOperand = 1;
946  }
947  // Output: (vector)
948  // Input: (passthru, vl)
949  class RISCVID
950        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
951                    [LLVMMatchType<0>, llvm_anyint_ty],
952                    [IntrNoMem]>, RISCVVIntrinsic {
953    let VLOperand = 1;
954  }
955  // For Conversion unary operations.
956  // Input: (passthru, vector_in, vl)
957  class RISCVConversionUnMasked
958        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
959                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty],
960                    [IntrNoMem]>, RISCVVIntrinsic {
961    let VLOperand = 2;
962  }
963  // For Conversion unary operations with mask.
964  // Input: (maskedoff, vector_in, mask, vl, policy)
965  class RISCVConversionMasked
966        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
967                    [LLVMMatchType<0>, llvm_anyvector_ty,
968                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
969                     LLVMMatchType<2>],
970                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
971    let VLOperand = 3;
972  }
973  // For Conversion unary operations.
974  // Input: (passthru, vector_in, frm, vl)
975  class RISCVConversionUnMaskedRoundingMode
976        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
977                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty,
978                     LLVMMatchType<2>],
979                    [ImmArg<ArgIndex<2>>, IntrNoMem]>, RISCVVIntrinsic {
980    let VLOperand = 3;
981  }
982  // For Conversion unary operations with mask.
983  // Input: (maskedoff, vector_in, mask, frm, vl, policy)
984  class RISCVConversionMaskedRoundingMode
985        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
986                    [LLVMMatchType<0>, llvm_anyvector_ty,
987                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
988                     LLVMMatchType<2>, LLVMMatchType<2>],
989                    [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
990    let VLOperand = 4;
991  }
992
993  // For unit stride segment load
994  // Input: (passthru, pointer, vl)
995  class RISCVUSSegLoad<int nf>
996        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
997                                !add(nf, -1))),
998                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
999                                [llvm_ptr_ty, llvm_anyint_ty]),
1000                    [NoCapture<ArgIndex<nf>>, IntrReadMem, IntrArgMemOnly]>,
1001          RISCVVIntrinsic {
1002    let VLOperand = !add(nf, 1);
1003  }
1004  // For unit stride segment load with mask
1005  // Input: (maskedoff, pointer, mask, vl, policy)
1006  class RISCVUSSegLoadMasked<int nf>
1007        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
1008                                !add(nf, -1))),
1009                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
1010                                [llvm_ptr_ty,
1011                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1012                                 llvm_anyint_ty, LLVMMatchType<1>]),
1013                    [ImmArg<ArgIndex<!add(nf, 3)>>, NoCapture<ArgIndex<nf>>,
1014                     IntrReadMem, IntrArgMemOnly]>,
1015          RISCVVIntrinsic {
1016    let VLOperand = !add(nf, 2);
1017  }
1018
1019  // For unit stride fault-only-first segment load
1020  // Input: (passthru, pointer, vl)
1021  // Output: (data, vl)
1022  // NOTE: We model this with default memory properties since we model writing
1023  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
1024  class RISCVUSSegLoadFF<int nf>
1025        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
1026                                !add(nf, -1)), [llvm_anyint_ty]),
1027                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
1028                    [llvm_ptr_ty, LLVMMatchType<1>]),
1029                    [NoCapture<ArgIndex<nf>>]>, RISCVVIntrinsic {
1030    let VLOperand = !add(nf, 1);
1031  }
1032  // For unit stride fault-only-first segment load with mask
1033  // Input: (maskedoff, pointer, mask, vl, policy)
1034  // Output: (data, vl)
1035  // NOTE: We model this with default memory properties since we model writing
1036  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
1037  class RISCVUSSegLoadFFMasked<int nf>
1038        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
1039                                !add(nf, -1)), [llvm_anyint_ty]),
1040                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
1041                     [llvm_ptr_ty,
1042                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1043                      LLVMMatchType<1>, LLVMMatchType<1>]),
1044                    [ImmArg<ArgIndex<!add(nf, 3)>>, NoCapture<ArgIndex<nf>>]>,
1045                    RISCVVIntrinsic {
1046    let VLOperand = !add(nf, 2);
1047  }
1048
1049  // For stride segment load
1050  // Input: (passthru, pointer, offset, vl)
1051  class RISCVSSegLoad<int nf>
1052        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
1053                                !add(nf, -1))),
1054                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
1055                    [llvm_ptr_ty, llvm_anyint_ty, LLVMMatchType<1>]),
1056                    [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic {
1057    let VLOperand = !add(nf, 2);
1058  }
1059  // For stride segment load with mask
1060  // Input: (maskedoff, pointer, offset, mask, vl, policy)
1061  class RISCVSSegLoadMasked<int nf>
1062        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
1063                                !add(nf, -1))),
1064                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
1065                                [llvm_ptr_ty,
1066                                 llvm_anyint_ty,
1067                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1068                                 LLVMMatchType<1>, LLVMMatchType<1>]),
1069                    [ImmArg<ArgIndex<!add(nf, 4)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
1070                    RISCVVIntrinsic {
1071    let VLOperand = !add(nf, 3);
1072  }
1073
1074  // For indexed segment load
1075  // Input: (passthru, pointer, index, vl)
1076  class RISCVISegLoad<int nf>
1077        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
1078                                !add(nf, -1))),
1079                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
1080                    [llvm_ptr_ty, llvm_anyvector_ty, llvm_anyint_ty]),
1081                    [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic {
1082    let VLOperand = !add(nf, 2);
1083  }
1084  // For indexed segment load with mask
1085  // Input: (maskedoff, pointer, index, mask, vl, policy)
1086  class RISCVISegLoadMasked<int nf>
1087        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
1088                                !add(nf, -1))),
1089                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
1090                                [llvm_ptr_ty,
1091                                 llvm_anyvector_ty,
1092                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1093                                 llvm_anyint_ty, LLVMMatchType<2>]),
1094                    [ImmArg<ArgIndex<!add(nf, 4)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
1095                    RISCVVIntrinsic {
1096    let VLOperand = !add(nf, 3);
1097  }
1098
1099  // For unit stride segment store
1100  // Input: (value, pointer, vl)
1101  class RISCVUSSegStore<int nf>
1102        : DefaultAttrsIntrinsic<[],
1103                    !listconcat([llvm_anyvector_ty],
1104                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
1105                                [llvm_ptr_ty, llvm_anyint_ty]),
1106                    [NoCapture<ArgIndex<nf>>, IntrWriteMem, IntrArgMemOnly]>,
1107          RISCVVIntrinsic {
1108    let VLOperand = !add(nf, 1);
1109  }
1110  // For unit stride segment store with mask
1111  // Input: (value, pointer, mask, vl)
1112  class RISCVUSSegStoreMasked<int nf>
1113        : DefaultAttrsIntrinsic<[],
1114                    !listconcat([llvm_anyvector_ty],
1115                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
1116                                [llvm_ptr_ty,
1117                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1118                                 llvm_anyint_ty]),
1119                    [NoCapture<ArgIndex<nf>>, IntrWriteMem, IntrArgMemOnly]>,
1120          RISCVVIntrinsic {
1121    let VLOperand = !add(nf, 2);
1122  }
1123
1124  // For stride segment store
1125  // Input: (value, pointer, offset, vl)
1126  class RISCVSSegStore<int nf>
1127        : DefaultAttrsIntrinsic<[],
1128                    !listconcat([llvm_anyvector_ty],
1129                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
1130                                [llvm_ptr_ty, llvm_anyint_ty,
1131                                 LLVMMatchType<1>]),
1132                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
1133    let VLOperand = !add(nf, 2);
1134  }
1135  // For stride segment store with mask
1136  // Input: (value, pointer, offset, mask, vl)
1137  class RISCVSSegStoreMasked<int nf>
1138        : DefaultAttrsIntrinsic<[],
1139                    !listconcat([llvm_anyvector_ty],
1140                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
1141                                [llvm_ptr_ty, llvm_anyint_ty,
1142                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1143                                 LLVMMatchType<1>]),
1144                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
1145    let VLOperand = !add(nf, 3);
1146  }
1147
1148  // For indexed segment store
1149  // Input: (value, pointer, offset, vl)
1150  class RISCVISegStore<int nf>
1151        : DefaultAttrsIntrinsic<[],
1152                    !listconcat([llvm_anyvector_ty],
1153                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
1154                                [llvm_ptr_ty, llvm_anyvector_ty,
1155                                 llvm_anyint_ty]),
1156                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
1157    let VLOperand = !add(nf, 2);
1158  }
1159  // For indexed segment store with mask
1160  // Input: (value, pointer, offset, mask, vl)
1161  class RISCVISegStoreMasked<int nf>
1162        : DefaultAttrsIntrinsic<[],
1163                    !listconcat([llvm_anyvector_ty],
1164                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
1165                                [llvm_ptr_ty, llvm_anyvector_ty,
1166                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1167                                 llvm_anyint_ty]),
1168                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
1169    let VLOperand = !add(nf, 3);
1170  }
1171
1172  multiclass RISCVUSLoad {
1173    def "int_riscv_" # NAME : RISCVUSLoad;
1174    def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMasked;
1175  }
1176  multiclass RISCVUSLoadFF {
1177    def "int_riscv_" # NAME : RISCVUSLoadFF;
1178    def "int_riscv_" # NAME # "_mask" : RISCVUSLoadFFMasked;
1179  }
1180  multiclass RISCVSLoad {
1181    def "int_riscv_" # NAME : RISCVSLoad;
1182    def "int_riscv_" # NAME # "_mask" : RISCVSLoadMasked;
1183  }
1184  multiclass RISCVILoad {
1185    def "int_riscv_" # NAME : RISCVILoad;
1186    def "int_riscv_" # NAME # "_mask" : RISCVILoadMasked;
1187  }
1188  multiclass RISCVUSStore {
1189    def "int_riscv_" # NAME : RISCVUSStore;
1190    def "int_riscv_" # NAME # "_mask" : RISCVUSStoreMasked;
1191  }
1192  multiclass RISCVSStore {
1193    def "int_riscv_" # NAME : RISCVSStore;
1194    def "int_riscv_" # NAME # "_mask" : RISCVSStoreMasked;
1195  }
1196
1197  multiclass RISCVIStore {
1198    def "int_riscv_" # NAME : RISCVIStore;
1199    def "int_riscv_" # NAME # "_mask" : RISCVIStoreMasked;
1200  }
1201  multiclass RISCVUnaryAA {
1202    def "int_riscv_" # NAME : RISCVUnaryAAUnMasked;
1203    def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMasked;
1204  }
1205  multiclass RISCVUnaryAARoundingMode {
1206    def "int_riscv_" # NAME : RISCVUnaryAAUnMaskedRoundingMode;
1207    def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMaskedRoundingMode;
1208  }
1209  multiclass RISCVUnaryAB {
1210    def "int_riscv_" # NAME : RISCVUnaryABUnMasked;
1211    def "int_riscv_" # NAME # "_mask" : RISCVUnaryABMasked;
1212  }
1213  // AAX means the destination type(A) is the same as the first source
1214  // type(A). X means any type for the second source operand.
1215  multiclass RISCVBinaryAAX {
1216    def "int_riscv_" # NAME : RISCVBinaryAAXUnMasked;
1217    def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMasked;
1218  }
1219  multiclass RISCVBinaryAAXRoundingMode {
1220    def "int_riscv_" # NAME : RISCVBinaryAAXUnMaskedRoundingMode;
1221    def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMaskedRoundingMode;
1222  }
1223  // Like RISCVBinaryAAX, but the second operand is used a shift amount so it
1224  // must be a vector or an XLen scalar.
1225  multiclass RISCVBinaryAAShift {
1226    def "int_riscv_" # NAME : RISCVBinaryAAShiftUnMasked;
1227    def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAShiftMasked;
1228  }
1229  multiclass RISCVRGatherVV {
1230    def "int_riscv_" # NAME : RISCVRGatherVVUnMasked;
1231    def "int_riscv_" # NAME # "_mask" : RISCVRGatherVVMasked;
1232  }
1233  multiclass RISCVRGatherVX {
1234    def "int_riscv_" # NAME : RISCVGatherVXUnMasked;
1235    def "int_riscv_" # NAME # "_mask" : RISCVGatherVXMasked;
1236  }
1237  multiclass RISCVRGatherEI16VV {
1238    def "int_riscv_" # NAME : RISCVRGatherEI16VVUnMasked;
1239    def "int_riscv_" # NAME # "_mask" : RISCVRGatherEI16VVMasked;
1240  }
1241  // ABX means the destination type(A) is different from the first source
1242  // type(B). X means any type for the second source operand.
1243  multiclass RISCVBinaryABX {
1244    def "int_riscv_" # NAME : RISCVBinaryABXUnMasked;
1245    def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMasked;
1246  }
1247  multiclass RISCVBinaryABXRoundingMode {
1248    def "int_riscv_" # NAME : RISCVBinaryABXUnMaskedRoundingMode;
1249    def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMaskedRoundingMode;
1250  }
1251  // Like RISCVBinaryABX, but the second operand is used a shift amount so it
1252  // must be a vector or an XLen scalar.
1253  multiclass RISCVBinaryABShift {
1254    def "int_riscv_" # NAME : RISCVBinaryABShiftUnMasked;
1255    def "int_riscv_" # NAME # "_mask" : RISCVBinaryABShiftMasked;
1256  }
1257  multiclass RISCVBinaryWithV0 {
1258    def "int_riscv_" # NAME : RISCVBinaryWithV0;
1259  }
1260  multiclass RISCVBinaryMaskOutWithV0 {
1261    def "int_riscv_" # NAME : RISCVBinaryMOutWithV0;
1262  }
1263  multiclass RISCVBinaryMaskOut {
1264    def "int_riscv_" # NAME : RISCVBinaryMOut;
1265  }
1266  multiclass RISCVSaturatingBinaryAAX {
1267    def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXUnMasked;
1268    def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMasked;
1269  }
1270  multiclass RISCVSaturatingBinaryAAXRoundingMode {
1271    def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXUnMaskedRoundingMode;
1272    def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMaskedRoundingMode;
1273  }
1274  multiclass RISCVSaturatingBinaryAAShiftRoundingMode {
1275    def "int_riscv_" # NAME : RISCVSaturatingBinaryAAShiftUnMaskedRoundingMode;
1276    def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAShiftMaskedRoundingMode;
1277  }
1278  multiclass RISCVSaturatingBinaryABShiftRoundingMode {
1279    def "int_riscv_" # NAME : RISCVSaturatingBinaryABShiftUnMaskedRoundingMode;
1280    def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryABShiftMaskedRoundingMode;
1281  }
1282  multiclass RVVSlide {
1283    def "int_riscv_" # NAME : RVVSlideUnMasked;
1284    def "int_riscv_" # NAME # "_mask" : RVVSlideMasked;
1285  }
1286  multiclass RISCVTernaryAAXA {
1287    def "int_riscv_" # NAME : RISCVTernaryAAXAUnMasked;
1288    def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAXAMasked;
1289  }
1290  multiclass RISCVTernaryAAXARoundingMode {
1291    def "int_riscv_" # NAME : RISCVTernaryAAXAUnMaskedRoundingMode;
1292    def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAXAMaskedRoundingMode;
1293  }
1294  multiclass RISCVCompare {
1295    def "int_riscv_" # NAME : RISCVCompareUnMasked;
1296    def "int_riscv_" # NAME # "_mask" : RISCVCompareMasked;
1297  }
1298  multiclass RISCVClassify {
1299    def "int_riscv_" # NAME : RISCVClassifyUnMasked;
1300    def "int_riscv_" # NAME # "_mask" : RISCVClassifyMasked;
1301  }
1302  multiclass RISCVTernaryWide {
1303    def "int_riscv_" # NAME : RISCVTernaryWideUnMasked;
1304    def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMasked;
1305  }
1306  multiclass RISCVTernaryWideRoundingMode {
1307    def "int_riscv_" # NAME : RISCVTernaryWideUnMaskedRoundingMode;
1308    def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMaskedRoundingMode;
1309  }
1310  multiclass RISCVReduction {
1311    def "int_riscv_" # NAME : RISCVReductionUnMasked;
1312    def "int_riscv_" # NAME # "_mask" : RISCVReductionMasked;
1313  }
1314  multiclass RISCVReductionRoundingMode {
1315    def "int_riscv_" # NAME : RISCVReductionUnMaskedRoundingMode;
1316    def "int_riscv_" # NAME # "_mask" : RISCVReductionMaskedRoundingMode;
1317  }
1318  multiclass RISCVMaskedUnarySOut {
1319    def "int_riscv_" # NAME : RISCVMaskedUnarySOutUnMasked;
1320    def "int_riscv_" # NAME # "_mask" : RISCVMaskedUnarySOutMasked;
1321  }
1322  multiclass RISCVMaskedUnaryMOut {
1323    def "int_riscv_" # NAME : RISCVUnaryUnMasked;
1324    def "int_riscv_" # NAME # "_mask" : RISCVMaskedUnaryMOutMasked;
1325  }
1326  multiclass RISCVConversion {
1327    def "int_riscv_" #NAME :RISCVConversionUnMasked;
1328    def "int_riscv_" # NAME # "_mask" : RISCVConversionMasked;
1329  }
1330  multiclass RISCVConversionRoundingMode {
1331    def "int_riscv_" #NAME :RISCVConversionUnMaskedRoundingMode;
1332    def "int_riscv_" # NAME # "_mask" : RISCVConversionMaskedRoundingMode;
1333  }
1334  multiclass RISCVUSSegLoad<int nf> {
1335    def "int_riscv_" # NAME : RISCVUSSegLoad<nf>;
1336    def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadMasked<nf>;
1337  }
1338  multiclass RISCVUSSegLoadFF<int nf> {
1339    def "int_riscv_" # NAME : RISCVUSSegLoadFF<nf>;
1340    def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadFFMasked<nf>;
1341  }
1342  multiclass RISCVSSegLoad<int nf> {
1343    def "int_riscv_" # NAME : RISCVSSegLoad<nf>;
1344    def "int_riscv_" # NAME # "_mask" : RISCVSSegLoadMasked<nf>;
1345  }
1346  multiclass RISCVISegLoad<int nf> {
1347    def "int_riscv_" # NAME : RISCVISegLoad<nf>;
1348    def "int_riscv_" # NAME # "_mask" : RISCVISegLoadMasked<nf>;
1349  }
1350  multiclass RISCVUSSegStore<int nf> {
1351    def "int_riscv_" # NAME : RISCVUSSegStore<nf>;
1352    def "int_riscv_" # NAME # "_mask" : RISCVUSSegStoreMasked<nf>;
1353  }
1354  multiclass RISCVSSegStore<int nf> {
1355    def "int_riscv_" # NAME : RISCVSSegStore<nf>;
1356    def "int_riscv_" # NAME # "_mask" : RISCVSSegStoreMasked<nf>;
1357  }
1358  multiclass RISCVISegStore<int nf> {
1359    def "int_riscv_" # NAME : RISCVISegStore<nf>;
1360    def "int_riscv_" # NAME # "_mask" : RISCVISegStoreMasked<nf>;
1361  }
1362
1363  defm vle : RISCVUSLoad;
1364  defm vleff : RISCVUSLoadFF;
1365  defm vse : RISCVUSStore;
1366  defm vlse: RISCVSLoad;
1367  defm vsse: RISCVSStore;
1368  defm vluxei : RISCVILoad;
1369  defm vloxei : RISCVILoad;
1370  defm vsoxei : RISCVIStore;
1371  defm vsuxei : RISCVIStore;
1372
1373  def int_riscv_vlm : RISCVUSMLoad;
1374  def int_riscv_vsm : RISCVUSStore;
1375
1376  defm vadd : RISCVBinaryAAX;
1377  defm vsub : RISCVBinaryAAX;
1378  defm vrsub : RISCVBinaryAAX;
1379
1380  defm vwaddu : RISCVBinaryABX;
1381  defm vwadd : RISCVBinaryABX;
1382  defm vwaddu_w : RISCVBinaryAAX;
1383  defm vwadd_w : RISCVBinaryAAX;
1384  defm vwsubu : RISCVBinaryABX;
1385  defm vwsub : RISCVBinaryABX;
1386  defm vwsubu_w : RISCVBinaryAAX;
1387  defm vwsub_w : RISCVBinaryAAX;
1388
1389  defm vzext : RISCVUnaryAB;
1390  defm vsext : RISCVUnaryAB;
1391
1392  defm vadc : RISCVBinaryWithV0;
1393  defm vmadc_carry_in : RISCVBinaryMaskOutWithV0;
1394  defm vmadc : RISCVBinaryMaskOut;
1395
1396  defm vsbc : RISCVBinaryWithV0;
1397  defm vmsbc_borrow_in : RISCVBinaryMaskOutWithV0;
1398  defm vmsbc : RISCVBinaryMaskOut;
1399
1400  defm vand : RISCVBinaryAAX;
1401  defm vor : RISCVBinaryAAX;
1402  defm vxor : RISCVBinaryAAX;
1403
1404  defm vsll : RISCVBinaryAAShift;
1405  defm vsrl : RISCVBinaryAAShift;
1406  defm vsra : RISCVBinaryAAShift;
1407
1408  defm vnsrl : RISCVBinaryABShift;
1409  defm vnsra : RISCVBinaryABShift;
1410
1411  defm vmseq : RISCVCompare;
1412  defm vmsne : RISCVCompare;
1413  defm vmsltu : RISCVCompare;
1414  defm vmslt : RISCVCompare;
1415  defm vmsleu : RISCVCompare;
1416  defm vmsle : RISCVCompare;
1417  defm vmsgtu : RISCVCompare;
1418  defm vmsgt : RISCVCompare;
1419  defm vmsgeu : RISCVCompare;
1420  defm vmsge : RISCVCompare;
1421
1422  defm vminu : RISCVBinaryAAX;
1423  defm vmin : RISCVBinaryAAX;
1424  defm vmaxu : RISCVBinaryAAX;
1425  defm vmax : RISCVBinaryAAX;
1426
1427  defm vmul : RISCVBinaryAAX;
1428  defm vmulh : RISCVBinaryAAX;
1429  defm vmulhu : RISCVBinaryAAX;
1430  defm vmulhsu : RISCVBinaryAAX;
1431
1432  defm vdivu : RISCVBinaryAAX;
1433  defm vdiv : RISCVBinaryAAX;
1434  defm vremu : RISCVBinaryAAX;
1435  defm vrem : RISCVBinaryAAX;
1436
1437  defm vwmul : RISCVBinaryABX;
1438  defm vwmulu : RISCVBinaryABX;
1439  defm vwmulsu : RISCVBinaryABX;
1440
1441  defm vmacc : RISCVTernaryAAXA;
1442  defm vnmsac : RISCVTernaryAAXA;
1443  defm vmadd : RISCVTernaryAAXA;
1444  defm vnmsub : RISCVTernaryAAXA;
1445
1446  defm vwmaccu  : RISCVTernaryWide;
1447  defm vwmacc   : RISCVTernaryWide;
1448  defm vwmaccus : RISCVTernaryWide;
1449  defm vwmaccsu : RISCVTernaryWide;
1450
1451  defm vfadd : RISCVBinaryAAXRoundingMode;
1452  defm vfsub : RISCVBinaryAAXRoundingMode;
1453  defm vfrsub : RISCVBinaryAAXRoundingMode;
1454
1455  defm vfwadd : RISCVBinaryABXRoundingMode;
1456  defm vfwsub : RISCVBinaryABXRoundingMode;
1457  defm vfwadd_w : RISCVBinaryAAXRoundingMode;
1458  defm vfwsub_w : RISCVBinaryAAXRoundingMode;
1459
1460  defm vsaddu : RISCVSaturatingBinaryAAX;
1461  defm vsadd : RISCVSaturatingBinaryAAX;
1462  defm vssubu : RISCVSaturatingBinaryAAX;
1463  defm vssub : RISCVSaturatingBinaryAAX;
1464
1465  defm vmerge : RISCVBinaryWithV0;
1466
1467  // Output: (vector)
1468  // Input: (passthru, vector_in, vl)
1469  def int_riscv_vmv_v_v : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1470                                                [LLVMMatchType<0>,
1471                                                 LLVMMatchType<0>,
1472                                                 llvm_anyint_ty],
1473                                                [IntrNoMem]>, RISCVVIntrinsic {
1474    let VLOperand = 2;
1475  }
1476  // Output: (vector)
1477  // Input: (passthru, scalar, vl)
1478  def int_riscv_vmv_v_x : DefaultAttrsIntrinsic<[llvm_anyint_ty],
1479                                                 [LLVMMatchType<0>,
1480                                                  LLVMVectorElementType<0>,
1481                                                  llvm_anyint_ty],
1482                                                 [IntrNoMem]>, RISCVVIntrinsic {
1483    let VLOperand = 2;
1484  }
1485  // Output: (vector)
1486  // Input: (passthru, scalar, vl)
1487  def int_riscv_vfmv_v_f : DefaultAttrsIntrinsic<[llvm_anyfloat_ty],
1488                                                 [LLVMMatchType<0>,
1489                                                  LLVMVectorElementType<0>,
1490                                                  llvm_anyint_ty],
1491                                                 [IntrNoMem]>, RISCVVIntrinsic {
1492    let VLOperand = 2;
1493  }
1494
1495  def int_riscv_vmv_x_s : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
1496                                                [llvm_anyint_ty],
1497                                                [IntrNoMem]>, RISCVVIntrinsic;
1498  def int_riscv_vmv_s_x : DefaultAttrsIntrinsic<[llvm_anyint_ty],
1499                                                [LLVMMatchType<0>,
1500                                                 LLVMVectorElementType<0>,
1501                                                 llvm_anyint_ty],
1502                                                [IntrNoMem]>, RISCVVIntrinsic {
1503    let VLOperand = 2;
1504  }
1505
1506  def int_riscv_vfmv_f_s : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
1507                                                 [llvm_anyfloat_ty],
1508                                                 [IntrNoMem]>, RISCVVIntrinsic;
1509  def int_riscv_vfmv_s_f : DefaultAttrsIntrinsic<[llvm_anyfloat_ty],
1510                                                 [LLVMMatchType<0>,
1511                                                  LLVMVectorElementType<0>,
1512                                                  llvm_anyint_ty],
1513                                                 [IntrNoMem]>, RISCVVIntrinsic {
1514    let VLOperand = 2;
1515  }
1516
1517  defm vfmul : RISCVBinaryAAXRoundingMode;
1518  defm vfdiv : RISCVBinaryAAXRoundingMode;
1519  defm vfrdiv : RISCVBinaryAAXRoundingMode;
1520
1521  defm vfwmul : RISCVBinaryABXRoundingMode;
1522
1523  defm vfmacc : RISCVTernaryAAXARoundingMode;
1524  defm vfnmacc : RISCVTernaryAAXARoundingMode;
1525  defm vfmsac : RISCVTernaryAAXARoundingMode;
1526  defm vfnmsac : RISCVTernaryAAXARoundingMode;
1527  defm vfmadd : RISCVTernaryAAXARoundingMode;
1528  defm vfnmadd : RISCVTernaryAAXARoundingMode;
1529  defm vfmsub : RISCVTernaryAAXARoundingMode;
1530  defm vfnmsub : RISCVTernaryAAXARoundingMode;
1531
1532  defm vfwmacc : RISCVTernaryWideRoundingMode;
1533  defm vfwmaccbf16 : RISCVTernaryWideRoundingMode;
1534  defm vfwnmacc : RISCVTernaryWideRoundingMode;
1535  defm vfwmsac : RISCVTernaryWideRoundingMode;
1536  defm vfwnmsac : RISCVTernaryWideRoundingMode;
1537
1538  defm vfsqrt : RISCVUnaryAARoundingMode;
1539  defm vfrsqrt7 : RISCVUnaryAA;
1540  defm vfrec7 : RISCVUnaryAARoundingMode;
1541
1542  defm vfmin : RISCVBinaryAAX;
1543  defm vfmax : RISCVBinaryAAX;
1544
1545  defm vfsgnj : RISCVBinaryAAX;
1546  defm vfsgnjn : RISCVBinaryAAX;
1547  defm vfsgnjx : RISCVBinaryAAX;
1548
1549  defm vfclass : RISCVClassify;
1550
1551  defm vfmerge : RISCVBinaryWithV0;
1552
1553  defm vslideup : RVVSlide;
1554  defm vslidedown : RVVSlide;
1555
1556  defm vslide1up : RISCVBinaryAAX;
1557  defm vslide1down : RISCVBinaryAAX;
1558  defm vfslide1up : RISCVBinaryAAX;
1559  defm vfslide1down : RISCVBinaryAAX;
1560
1561  defm vrgather_vv : RISCVRGatherVV;
1562  defm vrgather_vx : RISCVRGatherVX;
1563  defm vrgatherei16_vv : RISCVRGatherEI16VV;
1564
1565  def "int_riscv_vcompress" : RISCVCompress;
1566
1567  defm vaaddu : RISCVSaturatingBinaryAAXRoundingMode;
1568  defm vaadd : RISCVSaturatingBinaryAAXRoundingMode;
1569  defm vasubu : RISCVSaturatingBinaryAAXRoundingMode;
1570  defm vasub : RISCVSaturatingBinaryAAXRoundingMode;
1571
1572  defm vsmul : RISCVSaturatingBinaryAAXRoundingMode;
1573
1574  defm vssrl : RISCVSaturatingBinaryAAShiftRoundingMode;
1575  defm vssra : RISCVSaturatingBinaryAAShiftRoundingMode;
1576
1577  defm vnclipu : RISCVSaturatingBinaryABShiftRoundingMode;
1578  defm vnclip : RISCVSaturatingBinaryABShiftRoundingMode;
1579
1580  defm vmfeq : RISCVCompare;
1581  defm vmfne : RISCVCompare;
1582  defm vmflt : RISCVCompare;
1583  defm vmfle : RISCVCompare;
1584  defm vmfgt : RISCVCompare;
1585  defm vmfge : RISCVCompare;
1586
1587  defm vredsum : RISCVReduction;
1588  defm vredand : RISCVReduction;
1589  defm vredor : RISCVReduction;
1590  defm vredxor : RISCVReduction;
1591  defm vredminu : RISCVReduction;
1592  defm vredmin : RISCVReduction;
1593  defm vredmaxu : RISCVReduction;
1594  defm vredmax : RISCVReduction;
1595
1596  defm vwredsumu : RISCVReduction;
1597  defm vwredsum : RISCVReduction;
1598
1599  defm vfredosum : RISCVReductionRoundingMode;
1600  defm vfredusum : RISCVReductionRoundingMode;
1601  defm vfredmin : RISCVReduction;
1602  defm vfredmax : RISCVReduction;
1603
1604  defm vfwredusum : RISCVReductionRoundingMode;
1605  defm vfwredosum : RISCVReductionRoundingMode;
1606
1607  def int_riscv_vmand: RISCVBinaryAAAUnMasked;
1608  def int_riscv_vmnand: RISCVBinaryAAAUnMasked;
1609  def int_riscv_vmandn: RISCVBinaryAAAUnMasked;
1610  def int_riscv_vmxor: RISCVBinaryAAAUnMasked;
1611  def int_riscv_vmor: RISCVBinaryAAAUnMasked;
1612  def int_riscv_vmnor: RISCVBinaryAAAUnMasked;
1613  def int_riscv_vmorn: RISCVBinaryAAAUnMasked;
1614  def int_riscv_vmxnor: RISCVBinaryAAAUnMasked;
1615  def int_riscv_vmclr : RISCVNullaryIntrinsic;
1616  def int_riscv_vmset : RISCVNullaryIntrinsic;
1617
1618  defm vcpop : RISCVMaskedUnarySOut;
1619  defm vfirst : RISCVMaskedUnarySOut;
1620  defm vmsbf : RISCVMaskedUnaryMOut;
1621  defm vmsof : RISCVMaskedUnaryMOut;
1622  defm vmsif : RISCVMaskedUnaryMOut;
1623
1624  defm vfcvt_xu_f_v : RISCVConversionRoundingMode;
1625  defm vfcvt_x_f_v : RISCVConversionRoundingMode;
1626  defm vfcvt_rtz_xu_f_v : RISCVConversion;
1627  defm vfcvt_rtz_x_f_v : RISCVConversion;
1628  defm vfcvt_f_xu_v : RISCVConversionRoundingMode;
1629  defm vfcvt_f_x_v : RISCVConversionRoundingMode;
1630
1631  defm vfwcvt_f_xu_v : RISCVConversion;
1632  defm vfwcvt_f_x_v : RISCVConversion;
1633  defm vfwcvt_xu_f_v : RISCVConversionRoundingMode;
1634  defm vfwcvt_x_f_v : RISCVConversionRoundingMode;
1635  defm vfwcvt_rtz_xu_f_v : RISCVConversion;
1636  defm vfwcvt_rtz_x_f_v : RISCVConversion;
1637  defm vfwcvt_f_f_v : RISCVConversion;
1638  defm vfwcvtbf16_f_f_v : RISCVConversion;
1639
1640  defm vfncvt_f_xu_w : RISCVConversionRoundingMode;
1641  defm vfncvt_f_x_w : RISCVConversionRoundingMode;
1642  defm vfncvt_xu_f_w : RISCVConversionRoundingMode;
1643  defm vfncvt_x_f_w : RISCVConversionRoundingMode;
1644  defm vfncvt_rtz_xu_f_w : RISCVConversion;
1645  defm vfncvt_rtz_x_f_w : RISCVConversion;
1646  defm vfncvt_f_f_w : RISCVConversionRoundingMode;
1647  defm vfncvtbf16_f_f_w : RISCVConversionRoundingMode;
1648  defm vfncvt_rod_f_f_w : RISCVConversion;
1649
1650  // Output: (vector)
1651  // Input: (passthru, mask type input, vl)
1652  def int_riscv_viota
1653        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1654                                [LLVMMatchType<0>,
1655                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1656                                 llvm_anyint_ty],
1657                                [IntrNoMem]>, RISCVVIntrinsic {
1658    let VLOperand = 2;
1659  }
1660  // Output: (vector)
1661  // Input: (maskedoff, mask type vector_in, mask, vl, policy)
1662  def int_riscv_viota_mask
1663        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1664                                [LLVMMatchType<0>,
1665                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1666                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1667                                 llvm_anyint_ty, LLVMMatchType<1>],
1668                                [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
1669    let VLOperand = 3;
1670  }
1671  // Output: (vector)
1672  // Input: (passthru, vl)
1673  def int_riscv_vid : RISCVID;
1674
1675  // Output: (vector)
1676  // Input: (maskedoff, mask, vl, policy)
1677  def int_riscv_vid_mask
1678        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1679                                [LLVMMatchType<0>,
1680                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1681                                 llvm_anyint_ty, LLVMMatchType<1>],
1682                                [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
1683    let VLOperand = 2;
1684  }
1685
1686  foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
1687    defm vlseg # nf : RISCVUSSegLoad<nf>;
1688    defm vlseg # nf # ff : RISCVUSSegLoadFF<nf>;
1689    defm vlsseg # nf : RISCVSSegLoad<nf>;
1690    defm vloxseg # nf : RISCVISegLoad<nf>;
1691    defm vluxseg # nf : RISCVISegLoad<nf>;
1692    defm vsseg # nf : RISCVUSSegStore<nf>;
1693    defm vssseg # nf : RISCVSSegStore<nf>;
1694    defm vsoxseg # nf : RISCVISegStore<nf>;
1695    defm vsuxseg # nf : RISCVISegStore<nf>;
1696  }
1697
1698  // Strided loads/stores for fixed vectors.
1699  def int_riscv_masked_strided_load
1700        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1701                                [LLVMMatchType<0>, llvm_anyptr_ty,
1702                                 llvm_anyint_ty,
1703                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
1704                                [NoCapture<ArgIndex<1>>, IntrReadMem]>;
1705  def int_riscv_masked_strided_store
1706        : DefaultAttrsIntrinsic<[],
1707                                [llvm_anyvector_ty, llvm_anyptr_ty,
1708                                 llvm_anyint_ty,
1709                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
1710                                [NoCapture<ArgIndex<1>>, IntrWriteMem]>;
1711
1712  // Segment loads/stores for fixed vectors.
1713  foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
1714    def int_riscv_seg # nf # _load
1715          : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty],
1716                                              !listsplat(LLVMMatchType<0>,
1717                                              !add(nf, -1))),
1718                                  [llvm_anyptr_ty, llvm_anyint_ty],
1719                                  [NoCapture<ArgIndex<0>>, IntrReadMem]>;
1720    def int_riscv_seg # nf # _store
1721          : DefaultAttrsIntrinsic<[],
1722                                  !listconcat([llvm_anyvector_ty],
1723                                              !listsplat(LLVMMatchType<0>,
1724                                                          !add(nf, -1)),
1725                                              [llvm_anyptr_ty, llvm_anyint_ty]),
1726                                  [NoCapture<ArgIndex<nf>>, IntrWriteMem]>;
1727  }
1728
1729} // TargetPrefix = "riscv"
1730
1731//===----------------------------------------------------------------------===//
1732// Scalar Cryptography
1733//
1734// These intrinsics will lower directly into the corresponding instructions
1735// added by the scalar cyptography extension, if the extension is present.
1736
1737let TargetPrefix = "riscv" in {
1738
1739class ScalarCryptoByteSelect32
1740    : DefaultAttrsIntrinsic<[llvm_i32_ty],
1741                            [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1742                            [IntrNoMem, IntrSpeculatable,
1743                             ImmArg<ArgIndex<2>>]>;
1744
1745class ScalarCryptoGprGprIntrinsic32
1746    : DefaultAttrsIntrinsic<[llvm_i32_ty],
1747                            [llvm_i32_ty, llvm_i32_ty],
1748                            [IntrNoMem, IntrSpeculatable]>;
1749
1750class ScalarCryptoGprGprIntrinsic64
1751    : DefaultAttrsIntrinsic<[llvm_i64_ty],
1752                            [llvm_i64_ty, llvm_i64_ty],
1753                            [IntrNoMem, IntrSpeculatable]>;
1754
1755class ScalarCryptoGprIntrinsic32
1756    : DefaultAttrsIntrinsic<[llvm_i32_ty],
1757                            [llvm_i32_ty],
1758                            [IntrNoMem, IntrSpeculatable]>;
1759
1760class ScalarCryptoGprIntrinsic64
1761    : DefaultAttrsIntrinsic<[llvm_i64_ty],
1762                            [llvm_i64_ty],
1763                            [IntrNoMem, IntrSpeculatable]>;
1764
1765// Zknd
1766def int_riscv_aes32dsi  : ScalarCryptoByteSelect32,
1767                          ClangBuiltin<"__builtin_riscv_aes32dsi">;
1768def int_riscv_aes32dsmi : ScalarCryptoByteSelect32,
1769                          ClangBuiltin<"__builtin_riscv_aes32dsmi">;
1770
1771def int_riscv_aes64ds   : ScalarCryptoGprGprIntrinsic64,
1772                          ClangBuiltin<"__builtin_riscv_aes64ds">;
1773def int_riscv_aes64dsm  : ScalarCryptoGprGprIntrinsic64,
1774                          ClangBuiltin<"__builtin_riscv_aes64dsm">;
1775
1776def int_riscv_aes64im   : ScalarCryptoGprIntrinsic64,
1777                          ClangBuiltin<"__builtin_riscv_aes64im">;
1778
1779// Zkne
1780def int_riscv_aes32esi  : ScalarCryptoByteSelect32,
1781                          ClangBuiltin<"__builtin_riscv_aes32esi">;
1782def int_riscv_aes32esmi : ScalarCryptoByteSelect32,
1783                          ClangBuiltin<"__builtin_riscv_aes32esmi">;
1784
1785def int_riscv_aes64es   : ScalarCryptoGprGprIntrinsic64,
1786                          ClangBuiltin<"__builtin_riscv_aes64es">;
1787def int_riscv_aes64esm  : ScalarCryptoGprGprIntrinsic64,
1788                          ClangBuiltin<"__builtin_riscv_aes64esm">;
1789
1790// Zknd & Zkne
1791def int_riscv_aes64ks2  : ScalarCryptoGprGprIntrinsic64,
1792                          ClangBuiltin<"__builtin_riscv_aes64ks2">;
1793def int_riscv_aes64ks1i : DefaultAttrsIntrinsic<[llvm_i64_ty],
1794                                                [llvm_i64_ty, llvm_i32_ty],
1795                                                [IntrNoMem, IntrSpeculatable,
1796                                                 ImmArg<ArgIndex<1>>]>,
1797                          ClangBuiltin<"__builtin_riscv_aes64ks1i">;
1798
1799// Zknh
1800def int_riscv_sha256sig0 : ScalarCryptoGprIntrinsic32;
1801def int_riscv_sha256sig1 : ScalarCryptoGprIntrinsic32;
1802def int_riscv_sha256sum0 : ScalarCryptoGprIntrinsic32;
1803def int_riscv_sha256sum1 : ScalarCryptoGprIntrinsic32;
1804
1805def int_riscv_sha512sig0l : ScalarCryptoGprGprIntrinsic32,
1806                            ClangBuiltin<"__builtin_riscv_sha512sig0l">;
1807def int_riscv_sha512sig0h : ScalarCryptoGprGprIntrinsic32,
1808                            ClangBuiltin<"__builtin_riscv_sha512sig0h">;
1809def int_riscv_sha512sig1l : ScalarCryptoGprGprIntrinsic32,
1810                            ClangBuiltin<"__builtin_riscv_sha512sig1l">;
1811def int_riscv_sha512sig1h : ScalarCryptoGprGprIntrinsic32,
1812                            ClangBuiltin<"__builtin_riscv_sha512sig1h">;
1813def int_riscv_sha512sum0r : ScalarCryptoGprGprIntrinsic32,
1814                            ClangBuiltin<"__builtin_riscv_sha512sum0r">;
1815def int_riscv_sha512sum1r : ScalarCryptoGprGprIntrinsic32,
1816                            ClangBuiltin<"__builtin_riscv_sha512sum1r">;
1817
1818def int_riscv_sha512sig0 : ScalarCryptoGprIntrinsic64,
1819                           ClangBuiltin<"__builtin_riscv_sha512sig0">;
1820def int_riscv_sha512sig1 : ScalarCryptoGprIntrinsic64,
1821                           ClangBuiltin<"__builtin_riscv_sha512sig1">;
1822def int_riscv_sha512sum0 : ScalarCryptoGprIntrinsic64,
1823                           ClangBuiltin<"__builtin_riscv_sha512sum0">;
1824def int_riscv_sha512sum1 : ScalarCryptoGprIntrinsic64,
1825                           ClangBuiltin<"__builtin_riscv_sha512sum1">;
1826
1827// Zksed
1828def int_riscv_sm4ks      : ScalarCryptoByteSelect32;
1829def int_riscv_sm4ed      : ScalarCryptoByteSelect32;
1830
1831// Zksh
1832def int_riscv_sm3p0      : ScalarCryptoGprIntrinsic32;
1833def int_riscv_sm3p1      : ScalarCryptoGprIntrinsic32;
1834} // TargetPrefix = "riscv"
1835
1836//===----------------------------------------------------------------------===//
1837// Vector Cryptography
1838//
1839// These intrinsics will lower directly into the corresponding instructions
1840// added by the vector cyptography extension, if the extension is present.
1841let TargetPrefix = "riscv" in {
1842  // Zvkb
1843  defm vandn             : RISCVBinaryAAX;
1844  defm vbrev8            : RISCVUnaryAA;
1845  defm vrev8             : RISCVUnaryAA;
1846  defm vrol              : RISCVBinaryAAX;
1847  defm vror              : RISCVBinaryAAX;
1848
1849  // Zvbb
1850  defm vbrev             : RISCVUnaryAA;
1851  defm vclz              : RISCVUnaryAA;
1852  defm vctz              : RISCVUnaryAA;
1853  defm vcpopv            : RISCVUnaryAA;
1854  defm vwsll             : RISCVBinaryABX;
1855
1856  // Zvbc
1857  defm vclmul            : RISCVBinaryAAX;
1858  defm vclmulh           : RISCVBinaryAAX;
1859
1860  // Zvkg
1861  def int_riscv_vghsh    : RISCVBinaryAAXUnMaskedZvk;
1862  def int_riscv_vgmul_vv : RISCVUnaryAAUnMaskedZvk<IsVS=0>;
1863
1864  // Zvkned
1865  defm vaesdf            : RISCVUnaryAAUnMaskedZvk;
1866  defm vaesdm            : RISCVUnaryAAUnMaskedZvk;
1867  defm vaesef            : RISCVUnaryAAUnMaskedZvk;
1868  defm vaesem            : RISCVUnaryAAUnMaskedZvk;
1869  def int_riscv_vaeskf1  : RISCVBinaryAAXUnMasked<IsVI=1>;
1870  def int_riscv_vaeskf2  : RISCVBinaryAAXUnMaskedZvk<IsVI=1>;
1871  defm vaesz             : RISCVUnaryAAUnMaskedZvk<HasVV=0>;
1872
1873  // Zvknha or Zvknhb
1874  def int_riscv_vsha2ch  : RISCVBinaryAAXUnMaskedZvk;
1875  def int_riscv_vsha2cl  : RISCVBinaryAAXUnMaskedZvk;
1876  def int_riscv_vsha2ms  : RISCVBinaryAAXUnMaskedZvk;
1877
1878  // Zvksed
1879  def int_riscv_vsm4k    : RISCVBinaryAAXUnMasked<IsVI=1>;
1880  defm vsm4r             : RISCVUnaryAAUnMaskedZvk;
1881
1882  // Zvksh
1883  def int_riscv_vsm3c    : RISCVBinaryAAXUnMaskedZvk<IsVI=1>;
1884  def int_riscv_vsm3me   : RISCVBinaryAAXUnMasked;
1885} // TargetPrefix = "riscv"
1886
1887// Vendor extensions
1888//===----------------------------------------------------------------------===//
1889include "llvm/IR/IntrinsicsRISCVXTHead.td"
1890include "llvm/IR/IntrinsicsRISCVXsf.td"
1891include "llvm/IR/IntrinsicsRISCVXCV.td"
1892