1//===-- RISCVInstrInfoVPseudos.td - RISC-V 'V' Pseudos -----*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// This file contains the required infrastructure to support code generation
10/// for the standard 'V' (Vector) extension, version 0.10.  This version is still
11/// experimental as the 'V' extension hasn't been ratified yet.
12///
13/// This file is included from RISCVInstrInfoV.td
14///
15//===----------------------------------------------------------------------===//
16
17def riscv_vmv_x_s : SDNode<"RISCVISD::VMV_X_S",
18                           SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisVec<1>,
19                                                SDTCisInt<1>]>>;
20def riscv_read_vlenb : SDNode<"RISCVISD::READ_VLENB",
21                              SDTypeProfile<1, 0, [SDTCisVT<0, XLenVT>]>>;
22
23def riscv_vleff : SDNode<"RISCVISD::VLEFF",
24                         SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisPtrTy<1>,
25                                              SDTCisVT<2, XLenVT>]>,
26                         [SDNPHasChain, SDNPOutGlue, SDNPMayLoad,
27                          SDNPSideEffect]>;
28def riscv_vleff_mask : SDNode<"RISCVISD::VLEFF_MASK",
29                              SDTypeProfile<1, 4, [SDTCisVec<0>,
30                                                   SDTCisSameAs<0, 1>,
31                                                   SDTCisPtrTy<2>,
32                                                   SDTCVecEltisVT<3, i1>,
33                                                   SDTCisVT<4, XLenVT>]>,
34                              [SDNPHasChain, SDNPOutGlue, SDNPMayLoad,
35                               SDNPSideEffect]>;
36def riscv_read_vl : SDNode<"RISCVISD::READ_VL",
37                           SDTypeProfile<1, 0, [SDTCisVT<0, XLenVT>]>,
38                           [SDNPInGlue]>;
39
40// X0 has special meaning for vsetvl/vsetvli.
41//  rd | rs1 |   AVL value | Effect on vl
42//--------------------------------------------------------------
43// !X0 |  X0 |       VLMAX | Set vl to VLMAX
44//  X0 |  X0 | Value in vl | Keep current vl, just change vtype.
45def VLOp : ComplexPattern<XLenVT, 1, "selectVLOp">;
46
47def DecImm : SDNodeXForm<imm, [{
48  return CurDAG->getTargetConstant(N->getSExtValue() - 1, SDLoc(N),
49                                   N->getValueType(0));
50}]>;
51
52//===----------------------------------------------------------------------===//
53// Utilities.
54//===----------------------------------------------------------------------===//
55
56// This class describes information associated to the LMUL.
57class LMULInfo<int lmul, VReg regclass, VReg wregclass,
58               VReg f2regclass, VReg f4regclass, VReg f8regclass, string mx> {
59  bits<3> value = lmul; // This is encoded as the vlmul field of vtype.
60  VReg vrclass = regclass;
61  VReg wvrclass = wregclass;
62  VReg f8vrclass = f8regclass;
63  VReg f4vrclass = f4regclass;
64  VReg f2vrclass = f2regclass;
65  string MX = mx;
66}
67
68// Associate LMUL with tablegen records of register classes.
69def V_M1  : LMULInfo<0b000,   VR,        VRM2,   VR,   VR, VR, "M1">;
70def V_M2  : LMULInfo<0b001, VRM2,        VRM4,   VR,   VR, VR, "M2">;
71def V_M4  : LMULInfo<0b010, VRM4,        VRM8, VRM2,   VR, VR, "M4">;
72def V_M8  : LMULInfo<0b011, VRM8,/*NoVReg*/VR, VRM4, VRM2, VR, "M8">;
73
74def V_MF8 : LMULInfo<0b101, VR, VR,/*NoVReg*/VR,/*NoVReg*/VR,/*NoVReg*/VR, "MF8">;
75def V_MF4 : LMULInfo<0b110, VR, VR,          VR,/*NoVReg*/VR,/*NoVReg*/VR, "MF4">;
76def V_MF2 : LMULInfo<0b111, VR, VR,          VR,          VR,/*NoVReg*/VR, "MF2">;
77
78// Used to iterate over all possible LMULs.
79def MxList {
80  list<LMULInfo> m = [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
81}
82
83class FPR_Info<RegisterClass regclass, string fx> {
84  RegisterClass fprclass = regclass;
85  string FX = fx;
86}
87
88def SCALAR_F16 : FPR_Info<FPR16, "F16">;
89def SCALAR_F32 : FPR_Info<FPR32, "F32">;
90def SCALAR_F64 : FPR_Info<FPR64, "F64">;
91
92def FPList {
93  list<FPR_Info> fpinfo = [SCALAR_F16, SCALAR_F32, SCALAR_F64];
94}
95
96class MxSet<int eew> {
97  list<LMULInfo> m = !cond(!eq(eew, 8) : [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8],
98                           !eq(eew, 16) : [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8],
99                           !eq(eew, 32) : [V_MF2, V_M1, V_M2, V_M4, V_M8],
100                           !eq(eew, 64) : [V_M1, V_M2, V_M4, V_M8]);
101}
102
103class NFSet<LMULInfo m> {
104  list<int> L = !cond(!eq(m.value, V_M8.value): [],
105                      !eq(m.value, V_M4.value): [2],
106                      !eq(m.value, V_M2.value): [2, 3, 4],
107                      true: [2, 3, 4, 5, 6, 7, 8]);
108}
109
110class shift_amount<int num> {
111  int val = !if(!eq(num, 1), 0, !add(1, shift_amount<!srl(num, 1)>.val));
112}
113
114class octuple_from_str<string MX> {
115  int ret = !cond(!eq(MX, "MF8") : 1,
116                        !eq(MX, "MF4") : 2,
117                        !eq(MX, "MF2") : 4,
118                        !eq(MX, "M1") : 8,
119                        !eq(MX, "M2") : 16,
120                        !eq(MX, "M4") : 32,
121                        !eq(MX, "M8") : 64);
122}
123
124class octuple_to_str<int octuple> {
125  string ret = !if(!eq(octuple, 1), "MF8",
126                   !if(!eq(octuple, 2), "MF4",
127                   !if(!eq(octuple, 4), "MF2",
128                   !if(!eq(octuple, 8), "M1",
129                   !if(!eq(octuple, 16), "M2",
130                   !if(!eq(octuple, 32), "M4",
131                   !if(!eq(octuple, 64), "M8",
132                   "NoDef")))))));
133}
134
135// Output pattern for X0 used to represent VLMAX in the pseudo instructions.
136def VLMax : OutPatFrag<(ops), (XLenVT X0)>;
137
138// List of EEW.
139defvar EEWList = [8, 16, 32, 64];
140
141class SegRegClass<LMULInfo m, int nf> {
142  VReg RC = !cast<VReg>("VRN" # nf # !cond(!eq(m.value, V_MF8.value): V_M1.MX,
143                                           !eq(m.value, V_MF4.value): V_M1.MX,
144                                           !eq(m.value, V_MF2.value): V_M1.MX,
145                                           true: m.MX));
146}
147
148//===----------------------------------------------------------------------===//
149// Vector register and vector group type information.
150//===----------------------------------------------------------------------===//
151
152class VTypeInfo<ValueType Vec, ValueType Mas, int Sew, VReg Reg, LMULInfo M,
153                ValueType Scal = XLenVT, RegisterClass ScalarReg = GPR>
154{
155  ValueType Vector = Vec;
156  ValueType Mask = Mas;
157  int SEW = Sew;
158  VReg RegClass = Reg;
159  LMULInfo LMul = M;
160  ValueType Scalar = Scal;
161  RegisterClass ScalarRegClass = ScalarReg;
162  // The pattern fragment which produces the AVL operand, representing the
163  // "natural" vector length for this type. For scalable vectors this is VLMax.
164  OutPatFrag AVL = VLMax;
165
166  string ScalarSuffix = !cond(!eq(Scal, XLenVT) : "X",
167                              !eq(Scal, f16) : "F16",
168                              !eq(Scal, f32) : "F32",
169                              !eq(Scal, f64) : "F64");
170}
171
172class GroupVTypeInfo<ValueType Vec, ValueType VecM1, ValueType Mas, int Sew,
173                     VReg Reg, LMULInfo M, ValueType Scal = XLenVT,
174                     RegisterClass ScalarReg = GPR>
175    : VTypeInfo<Vec, Mas, Sew, Reg, M, Scal, ScalarReg>
176{
177  ValueType VectorM1 = VecM1;
178}
179
180defset list<VTypeInfo> AllVectors = {
181  defset list<VTypeInfo> AllIntegerVectors = {
182    defset list<VTypeInfo> NoGroupIntegerVectors = {
183      def VI8MF8: VTypeInfo<vint8mf8_t,  vbool64_t,  8, VR, V_MF8>;
184      def VI8MF4: VTypeInfo<vint8mf4_t,  vbool32_t,  8, VR, V_MF4>;
185      def VI8MF2: VTypeInfo<vint8mf2_t,  vbool16_t,  8, VR, V_MF2>;
186      def VI8M1: VTypeInfo<vint8m1_t,   vbool8_t,   8, VR, V_M1>;
187      def VI16MF4: VTypeInfo<vint16mf4_t, vbool64_t, 16, VR, V_MF4>;
188      def VI16MF2: VTypeInfo<vint16mf2_t, vbool32_t, 16, VR, V_MF2>;
189      def VI16M1: VTypeInfo<vint16m1_t,  vbool16_t, 16, VR, V_M1>;
190      def VI32MF2: VTypeInfo<vint32mf2_t, vbool64_t, 32, VR, V_MF2>;
191      def VI32M1: VTypeInfo<vint32m1_t,  vbool32_t, 32, VR, V_M1>;
192      def VI64M1: VTypeInfo<vint64m1_t,  vbool64_t, 64, VR, V_M1>;
193    }
194    defset list<GroupVTypeInfo> GroupIntegerVectors = {
195      def VI8M2: GroupVTypeInfo<vint8m2_t, vint8m1_t, vbool4_t, 8, VRM2, V_M2>;
196      def VI8M4: GroupVTypeInfo<vint8m4_t, vint8m1_t, vbool2_t, 8, VRM4, V_M4>;
197      def VI8M8: GroupVTypeInfo<vint8m8_t, vint8m1_t, vbool1_t, 8, VRM8, V_M8>;
198
199      def VI16M2: GroupVTypeInfo<vint16m2_t,vint16m1_t,vbool8_t, 16,VRM2, V_M2>;
200      def VI16M4: GroupVTypeInfo<vint16m4_t,vint16m1_t,vbool4_t, 16,VRM4, V_M4>;
201      def VI16M8: GroupVTypeInfo<vint16m8_t,vint16m1_t,vbool2_t, 16,VRM8, V_M8>;
202
203      def VI32M2: GroupVTypeInfo<vint32m2_t,vint32m1_t,vbool16_t,32,VRM2, V_M2>;
204      def VI32M4: GroupVTypeInfo<vint32m4_t,vint32m1_t,vbool8_t, 32,VRM4, V_M4>;
205      def VI32M8: GroupVTypeInfo<vint32m8_t,vint32m1_t,vbool4_t, 32,VRM8, V_M8>;
206
207      def VI64M2: GroupVTypeInfo<vint64m2_t,vint64m1_t,vbool32_t,64,VRM2, V_M2>;
208      def VI64M4: GroupVTypeInfo<vint64m4_t,vint64m1_t,vbool16_t,64,VRM4, V_M4>;
209      def VI64M8: GroupVTypeInfo<vint64m8_t,vint64m1_t,vbool8_t, 64,VRM8, V_M8>;
210    }
211  }
212
213  defset list<VTypeInfo> AllFloatVectors = {
214    defset list<VTypeInfo> NoGroupFloatVectors = {
215      def VF16MF4: VTypeInfo<vfloat16mf4_t, vbool64_t, 16, VR, V_MF4, f16, FPR16>;
216      def VF16MF2: VTypeInfo<vfloat16mf2_t, vbool32_t, 16, VR, V_MF2, f16, FPR16>;
217      def VF16M1:  VTypeInfo<vfloat16m1_t,  vbool16_t, 16, VR, V_M1,  f16, FPR16>;
218
219      def VF32MF2: VTypeInfo<vfloat32mf2_t,vbool64_t, 32, VR, V_MF2, f32, FPR32>;
220      def VF32M1:  VTypeInfo<vfloat32m1_t, vbool32_t, 32, VR, V_M1,  f32, FPR32>;
221
222      def VF64M1: VTypeInfo<vfloat64m1_t, vbool64_t, 64, VR, V_M1, f64, FPR64>;
223    }
224
225    defset list<GroupVTypeInfo> GroupFloatVectors = {
226      def VF16M2: GroupVTypeInfo<vfloat16m2_t, vfloat16m1_t, vbool8_t, 16,
227                                  VRM2, V_M2, f16, FPR16>;
228      def VF16M4: GroupVTypeInfo<vfloat16m4_t, vfloat16m1_t, vbool4_t, 16,
229                                  VRM4, V_M4, f16, FPR16>;
230      def VF16M8: GroupVTypeInfo<vfloat16m8_t, vfloat16m1_t, vbool2_t, 16,
231                                  VRM8, V_M8, f16, FPR16>;
232
233      def VF32M2: GroupVTypeInfo<vfloat32m2_t, vfloat32m1_t, vbool16_t, 32,
234                                  VRM2, V_M2, f32, FPR32>;
235      def VF32M4: GroupVTypeInfo<vfloat32m4_t, vfloat32m1_t, vbool8_t,  32,
236                                  VRM4, V_M4, f32, FPR32>;
237      def VF32M8: GroupVTypeInfo<vfloat32m8_t, vfloat32m1_t, vbool4_t,  32,
238                                  VRM8, V_M8, f32, FPR32>;
239
240      def VF64M2: GroupVTypeInfo<vfloat64m2_t, vfloat64m1_t, vbool32_t, 64,
241                                  VRM2, V_M2, f64, FPR64>;
242      def VF64M4: GroupVTypeInfo<vfloat64m4_t, vfloat64m1_t, vbool16_t, 64,
243                                  VRM4, V_M4, f64, FPR64>;
244      def VF64M8: GroupVTypeInfo<vfloat64m8_t, vfloat64m1_t, vbool8_t,  64,
245                                  VRM8, V_M8, f64, FPR64>;
246    }
247  }
248}
249
250// This functor is used to obtain the int vector type that has the same SEW and
251// multiplier as the input parameter type
252class GetIntVTypeInfo<VTypeInfo vti>
253{
254  // Equivalent integer vector type. Eg.
255  //   VI8M1 → VI8M1 (identity)
256  //   VF64M4 → VI64M4
257  VTypeInfo Vti = !cast<VTypeInfo>(!subst("VF", "VI", !cast<string>(vti)));
258}
259
260class MTypeInfo<ValueType Mas, LMULInfo M, string Bx> {
261  ValueType Mask = Mas;
262  // {SEW, VLMul} values set a valid VType to deal with this mask type.
263  // we assume SEW=8 and set corresponding LMUL.
264  int SEW = 8;
265  LMULInfo LMul = M;
266  string BX = Bx; // Appendix of mask operations.
267  // The pattern fragment which produces the AVL operand, representing the
268  // "natural" vector length for this mask type. For scalable masks this is
269  // VLMax.
270  OutPatFrag AVL = VLMax;
271}
272
273defset list<MTypeInfo> AllMasks = {
274  // vbool<n>_t, <n> = SEW/LMUL, we assume SEW=8 and corresponding LMUL.
275  def : MTypeInfo<vbool64_t, V_MF8, "B1">;
276  def : MTypeInfo<vbool32_t, V_MF4, "B2">;
277  def : MTypeInfo<vbool16_t, V_MF2, "B4">;
278  def : MTypeInfo<vbool8_t, V_M1, "B8">;
279  def : MTypeInfo<vbool4_t, V_M2, "B16">;
280  def : MTypeInfo<vbool2_t, V_M4, "B32">;
281  def : MTypeInfo<vbool1_t, V_M8, "B64">;
282}
283
284class VTypeInfoToWide<VTypeInfo vti, VTypeInfo wti>
285{
286  VTypeInfo Vti = vti;
287  VTypeInfo Wti = wti;
288}
289
290class VTypeInfoToFraction<VTypeInfo vti, VTypeInfo fti>
291{
292  VTypeInfo Vti = vti;
293  VTypeInfo Fti = fti;
294}
295
296defset list<VTypeInfoToWide> AllWidenableIntVectors = {
297  def : VTypeInfoToWide<VI8MF8,  VI16MF4>;
298  def : VTypeInfoToWide<VI8MF4,  VI16MF2>;
299  def : VTypeInfoToWide<VI8MF2,  VI16M1>;
300  def : VTypeInfoToWide<VI8M1,   VI16M2>;
301  def : VTypeInfoToWide<VI8M2,   VI16M4>;
302  def : VTypeInfoToWide<VI8M4,   VI16M8>;
303
304  def : VTypeInfoToWide<VI16MF4, VI32MF2>;
305  def : VTypeInfoToWide<VI16MF2, VI32M1>;
306  def : VTypeInfoToWide<VI16M1,  VI32M2>;
307  def : VTypeInfoToWide<VI16M2,  VI32M4>;
308  def : VTypeInfoToWide<VI16M4,  VI32M8>;
309
310  def : VTypeInfoToWide<VI32MF2, VI64M1>;
311  def : VTypeInfoToWide<VI32M1,  VI64M2>;
312  def : VTypeInfoToWide<VI32M2,  VI64M4>;
313  def : VTypeInfoToWide<VI32M4,  VI64M8>;
314}
315
316defset list<VTypeInfoToWide> AllWidenableFloatVectors = {
317  def : VTypeInfoToWide<VF16MF4, VF32MF2>;
318  def : VTypeInfoToWide<VF16MF2, VF32M1>;
319  def : VTypeInfoToWide<VF16M1, VF32M2>;
320  def : VTypeInfoToWide<VF16M2, VF32M4>;
321  def : VTypeInfoToWide<VF16M4, VF32M8>;
322
323  def : VTypeInfoToWide<VF32MF2, VF64M1>;
324  def : VTypeInfoToWide<VF32M1, VF64M2>;
325  def : VTypeInfoToWide<VF32M2, VF64M4>;
326  def : VTypeInfoToWide<VF32M4, VF64M8>;
327}
328
329defset list<VTypeInfoToFraction> AllFractionableVF2IntVectors = {
330  def : VTypeInfoToFraction<VI16MF4, VI8MF8>;
331  def : VTypeInfoToFraction<VI16MF2, VI8MF4>;
332  def : VTypeInfoToFraction<VI16M1, VI8MF2>;
333  def : VTypeInfoToFraction<VI16M2, VI8M1>;
334  def : VTypeInfoToFraction<VI16M4, VI8M2>;
335  def : VTypeInfoToFraction<VI16M8, VI8M4>;
336  def : VTypeInfoToFraction<VI32MF2, VI16MF4>;
337  def : VTypeInfoToFraction<VI32M1, VI16MF2>;
338  def : VTypeInfoToFraction<VI32M2, VI16M1>;
339  def : VTypeInfoToFraction<VI32M4, VI16M2>;
340  def : VTypeInfoToFraction<VI32M8, VI16M4>;
341  def : VTypeInfoToFraction<VI64M1, VI32MF2>;
342  def : VTypeInfoToFraction<VI64M2, VI32M1>;
343  def : VTypeInfoToFraction<VI64M4, VI32M2>;
344  def : VTypeInfoToFraction<VI64M8, VI32M4>;
345}
346
347defset list<VTypeInfoToFraction> AllFractionableVF4IntVectors = {
348  def : VTypeInfoToFraction<VI32MF2, VI8MF8>;
349  def : VTypeInfoToFraction<VI32M1, VI8MF4>;
350  def : VTypeInfoToFraction<VI32M2, VI8MF2>;
351  def : VTypeInfoToFraction<VI32M4, VI8M1>;
352  def : VTypeInfoToFraction<VI32M8, VI8M2>;
353  def : VTypeInfoToFraction<VI64M1, VI16MF4>;
354  def : VTypeInfoToFraction<VI64M2, VI16MF2>;
355  def : VTypeInfoToFraction<VI64M4, VI16M1>;
356  def : VTypeInfoToFraction<VI64M8, VI16M2>;
357}
358
359defset list<VTypeInfoToFraction> AllFractionableVF8IntVectors = {
360  def : VTypeInfoToFraction<VI64M1, VI8MF8>;
361  def : VTypeInfoToFraction<VI64M2, VI8MF4>;
362  def : VTypeInfoToFraction<VI64M4, VI8MF2>;
363  def : VTypeInfoToFraction<VI64M8, VI8M1>;
364}
365
366defset list<VTypeInfoToWide> AllWidenableIntToFloatVectors = {
367  def : VTypeInfoToWide<VI8MF8, VF16MF4>;
368  def : VTypeInfoToWide<VI8MF4, VF16MF2>;
369  def : VTypeInfoToWide<VI8MF2, VF16M1>;
370  def : VTypeInfoToWide<VI8M1, VF16M2>;
371  def : VTypeInfoToWide<VI8M2, VF16M4>;
372  def : VTypeInfoToWide<VI8M4, VF16M8>;
373
374  def : VTypeInfoToWide<VI16MF4, VF32MF2>;
375  def : VTypeInfoToWide<VI16MF2, VF32M1>;
376  def : VTypeInfoToWide<VI16M1, VF32M2>;
377  def : VTypeInfoToWide<VI16M2, VF32M4>;
378  def : VTypeInfoToWide<VI16M4, VF32M8>;
379
380  def : VTypeInfoToWide<VI32MF2, VF64M1>;
381  def : VTypeInfoToWide<VI32M1, VF64M2>;
382  def : VTypeInfoToWide<VI32M2, VF64M4>;
383  def : VTypeInfoToWide<VI32M4, VF64M8>;
384}
385
386// This class holds the record of the RISCVVPseudoTable below.
387// This represents the information we need in codegen for each pseudo.
388// The definition should be consistent with `struct PseudoInfo` in
389// RISCVBaseInfo.h.
390class CONST8b<bits<8> val> {
391  bits<8> V = val;
392}
393def InvalidIndex : CONST8b<0x80>;
394class RISCVVPseudo {
395  Pseudo Pseudo = !cast<Pseudo>(NAME); // Used as a key.
396  Instruction BaseInstr;
397}
398
399// The actual table.
400def RISCVVPseudosTable : GenericTable {
401  let FilterClass = "RISCVVPseudo";
402  let CppTypeName = "PseudoInfo";
403  let Fields = [ "Pseudo", "BaseInstr" ];
404  let PrimaryKey = [ "Pseudo" ];
405  let PrimaryKeyName = "getPseudoInfo";
406}
407
408def RISCVVIntrinsicsTable : GenericTable {
409  let FilterClass = "RISCVVIntrinsic";
410  let CppTypeName = "RISCVVIntrinsicInfo";
411  let Fields = ["IntrinsicID", "ExtendOperand"];
412  let PrimaryKey = ["IntrinsicID"];
413  let PrimaryKeyName = "getRISCVVIntrinsicInfo";
414}
415
416class RISCVZvlsseg<string IntrName, bits<11> S, bits<3> L, bits<3> IL = V_M1.value> {
417  Intrinsic IntrinsicID = !cast<Intrinsic>(IntrName);
418  bits<11> SEW = S;
419  bits<3> LMUL = L;
420  bits<3> IndexLMUL = IL;
421  Pseudo Pseudo = !cast<Pseudo>(NAME);
422}
423
424def RISCVZvlssegTable : GenericTable {
425  let FilterClass = "RISCVZvlsseg";
426  let Fields = ["IntrinsicID", "SEW", "LMUL", "IndexLMUL", "Pseudo"];
427  let PrimaryKey = ["IntrinsicID", "SEW", "LMUL", "IndexLMUL"];
428  let PrimaryKeyName = "getPseudo";
429}
430
431//===----------------------------------------------------------------------===//
432// Helpers to define the different pseudo instructions.
433//===----------------------------------------------------------------------===//
434
435class PseudoToVInst<string PseudoInst> {
436  string VInst = !subst("_M8", "",
437                 !subst("_M4", "",
438                 !subst("_M2", "",
439                 !subst("_M1", "",
440                 !subst("_MF2", "",
441                 !subst("_MF4", "",
442                 !subst("_MF8", "",
443                 !subst("_B1", "",
444                 !subst("_B2", "",
445                 !subst("_B4", "",
446                 !subst("_B8", "",
447                 !subst("_B16", "",
448                 !subst("_B32", "",
449                 !subst("_B64", "",
450                 !subst("_MASK", "",
451                 !subst("F16", "F",
452                 !subst("F32", "F",
453                 !subst("F64", "F",
454                 !subst("Pseudo", "", PseudoInst)))))))))))))))))));
455}
456
457class ToLowerCase<string Upper> {
458  string L = !subst("FF", "ff",
459             !subst("VLSEG", "vlseg",
460             !subst("VLSSEG", "vlsseg",
461             !subst("VSSEG", "vsseg",
462             !subst("VSSSEG", "vssseg",
463             !subst("VLOXSEG", "vloxseg",
464             !subst("VLUXSEG", "vluxseg",
465             !subst("VSOXSEG", "vsoxseg",
466             !subst("VSUXSEG", "vsuxseg", Upper)))))))));
467}
468
469// Example: PseudoVLSEG2E32_V_M2 -> int_riscv_vlseg2
470// Example: PseudoVLSEG2E32_V_M2_MASK -> int_riscv_vlseg2_mask
471class PseudoToIntrinsic<string PseudoInst, bit IsMasked> {
472  string Intrinsic = !strconcat("int_riscv_",
473                        ToLowerCase<
474                        !subst("E8", "",
475                        !subst("E16", "",
476                        !subst("E32", "",
477                        !subst("E64", "",
478                        !subst("EI8", "",
479                        !subst("EI16", "",
480                        !subst("EI32", "",
481                        !subst("EI64", "",
482                        !subst("_V", "", PseudoToVInst<PseudoInst>.VInst)))))))))>.L,
483                        !if(IsMasked, "_mask", ""));
484}
485
486// The destination vector register group for a masked vector instruction cannot
487// overlap the source mask register (v0), unless the destination vector register
488// is being written with a mask value (e.g., comparisons) or the scalar result
489// of a reduction.
490class GetVRegNoV0<VReg VRegClass> {
491  VReg R = !cond(!eq(VRegClass, VR) : VRNoV0,
492                 !eq(VRegClass, VRM2) : VRM2NoV0,
493                 !eq(VRegClass, VRM4) : VRM4NoV0,
494                 !eq(VRegClass, VRM8) : VRM8NoV0,
495                 !eq(1, 1) : VRegClass);
496}
497
498// Join strings in list using separator and ignoring empty elements
499class Join<list<string> strings, string separator> {
500  string ret = !foldl(!head(strings), !tail(strings), a, b,
501                      !cond(
502                        !and(!empty(a), !empty(b)) : "",
503                        !empty(a) : b,
504                        !empty(b) : a,
505                        1 : a#separator#b));
506}
507
508class VPseudo<Instruction instr, LMULInfo m, dag outs, dag ins> :
509      Pseudo<outs, ins, []>, RISCVVPseudo {
510  let BaseInstr = instr;
511  let VLMul = m.value;
512}
513
514class VPseudoUSLoadNoMask<VReg RetClass>:
515      Pseudo<(outs RetClass:$rd),
516             (ins GPR:$rs1, GPR:$vl, ixlenimm:$sew),[]>,
517      RISCVVPseudo {
518  let mayLoad = 1;
519  let mayStore = 0;
520  let hasSideEffects = 0;
521  let usesCustomInserter = 1;
522  let Uses = [VL, VTYPE];
523  let HasVLOp = 1;
524  let HasSEWOp = 1;
525  let HasDummyMask = 1;
526  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
527}
528
529class VPseudoUSLoadMask<VReg RetClass>:
530      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
531              (ins GetVRegNoV0<RetClass>.R:$merge,
532                   GPR:$rs1,
533                   VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
534      RISCVVPseudo {
535  let mayLoad = 1;
536  let mayStore = 0;
537  let hasSideEffects = 0;
538  let usesCustomInserter = 1;
539  let Constraints = "$rd = $merge";
540  let Uses = [VL, VTYPE];
541  let HasVLOp = 1;
542  let HasSEWOp = 1;
543  let HasMergeOp = 1;
544  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
545}
546
547class VPseudoSLoadNoMask<VReg RetClass>:
548      Pseudo<(outs RetClass:$rd),
549             (ins GPR:$rs1, GPR:$rs2, GPR:$vl, ixlenimm:$sew),[]>,
550      RISCVVPseudo {
551  let mayLoad = 1;
552  let mayStore = 0;
553  let hasSideEffects = 0;
554  let usesCustomInserter = 1;
555  let Uses = [VL, VTYPE];
556  let HasVLOp = 1;
557  let HasSEWOp = 1;
558  let HasDummyMask = 1;
559  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
560}
561
562class VPseudoSLoadMask<VReg RetClass>:
563      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
564              (ins GetVRegNoV0<RetClass>.R:$merge,
565                   GPR:$rs1, GPR:$rs2,
566                   VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
567      RISCVVPseudo {
568  let mayLoad = 1;
569  let mayStore = 0;
570  let hasSideEffects = 0;
571  let usesCustomInserter = 1;
572  let Constraints = "$rd = $merge";
573  let Uses = [VL, VTYPE];
574  let HasVLOp = 1;
575  let HasSEWOp = 1;
576  let HasMergeOp = 1;
577  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
578}
579
580class VPseudoILoadNoMask<VReg RetClass, VReg IdxClass>:
581      Pseudo<(outs RetClass:$rd),
582             (ins GPR:$rs1, IdxClass:$rs2, GPR:$vl, ixlenimm:$sew),[]>,
583      RISCVVPseudo {
584  let mayLoad = 1;
585  let mayStore = 0;
586  let hasSideEffects = 0;
587  let usesCustomInserter = 1;
588  let Uses = [VL, VTYPE];
589  let HasVLOp = 1;
590  let HasSEWOp = 1;
591  let HasDummyMask = 1;
592  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
593}
594
595class VPseudoILoadMask<VReg RetClass, VReg IdxClass>:
596      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
597              (ins GetVRegNoV0<RetClass>.R:$merge,
598                   GPR:$rs1, IdxClass:$rs2,
599                   VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
600      RISCVVPseudo {
601  let mayLoad = 1;
602  let mayStore = 0;
603  let hasSideEffects = 0;
604  let usesCustomInserter = 1;
605  let Constraints = "$rd = $merge";
606  let Uses = [VL, VTYPE];
607  let HasVLOp = 1;
608  let HasSEWOp = 1;
609  let HasMergeOp = 1;
610  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
611}
612
613class VPseudoUSStoreNoMask<VReg StClass>:
614      Pseudo<(outs),
615              (ins StClass:$rd, GPR:$rs1, GPR:$vl, ixlenimm:$sew),[]>,
616      RISCVVPseudo {
617  let mayLoad = 0;
618  let mayStore = 1;
619  let hasSideEffects = 0;
620  let usesCustomInserter = 1;
621  let Uses = [VL, VTYPE];
622  let HasVLOp = 1;
623  let HasSEWOp = 1;
624  let HasDummyMask = 1;
625  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
626}
627
628class VPseudoUSStoreMask<VReg StClass>:
629      Pseudo<(outs),
630              (ins StClass:$rd, GPR:$rs1, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
631      RISCVVPseudo {
632  let mayLoad = 0;
633  let mayStore = 1;
634  let hasSideEffects = 0;
635  let usesCustomInserter = 1;
636  let Uses = [VL, VTYPE];
637  let HasVLOp = 1;
638  let HasSEWOp = 1;
639  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
640}
641
642class VPseudoSStoreNoMask<VReg StClass>:
643      Pseudo<(outs),
644              (ins StClass:$rd, GPR:$rs1, GPR:$rs2, GPR:$vl, ixlenimm:$sew),[]>,
645      RISCVVPseudo {
646  let mayLoad = 0;
647  let mayStore = 1;
648  let hasSideEffects = 0;
649  let usesCustomInserter = 1;
650  let Uses = [VL, VTYPE];
651  let HasVLOp = 1;
652  let HasSEWOp = 1;
653  let HasDummyMask = 1;
654  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
655}
656
657class VPseudoSStoreMask<VReg StClass>:
658      Pseudo<(outs),
659              (ins StClass:$rd, GPR:$rs1, GPR:$rs2, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
660      RISCVVPseudo {
661  let mayLoad = 0;
662  let mayStore = 1;
663  let hasSideEffects = 0;
664  let usesCustomInserter = 1;
665  let Uses = [VL, VTYPE];
666  let HasVLOp = 1;
667  let HasSEWOp = 1;
668  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
669}
670
671// Unary instruction that is never masked so HasDummyMask=0.
672class VPseudoUnaryNoDummyMask<VReg RetClass,
673                              DAGOperand Op2Class> :
674        Pseudo<(outs RetClass:$rd),
675               (ins Op2Class:$rs1, GPR:$vl, ixlenimm:$sew), []>,
676        RISCVVPseudo {
677  let mayLoad = 0;
678  let mayStore = 0;
679  let hasSideEffects = 0;
680  let usesCustomInserter = 1;
681  let Uses = [VL, VTYPE];
682  let HasVLOp = 1;
683  let HasSEWOp = 1;
684  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
685}
686
687class VPseudoNullaryNoMask<VReg RegClass>:
688      Pseudo<(outs RegClass:$rd),
689             (ins GPR:$vl, ixlenimm:$sew),
690             []>, RISCVVPseudo {
691  let mayLoad = 0;
692  let mayStore = 0;
693  let hasSideEffects = 0;
694  let usesCustomInserter = 1;
695  let Uses = [VL, VTYPE];
696  let HasVLOp = 1;
697  let HasSEWOp = 1;
698  let HasDummyMask = 1;
699  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
700}
701
702class VPseudoNullaryMask<VReg RegClass>:
703      Pseudo<(outs GetVRegNoV0<RegClass>.R:$rd),
704             (ins GetVRegNoV0<RegClass>.R:$merge, VMaskOp:$vm, GPR:$vl,
705              ixlenimm:$sew), []>, RISCVVPseudo {
706  let mayLoad = 0;
707  let mayStore = 0;
708  let hasSideEffects = 0;
709  let usesCustomInserter = 1;
710  let Constraints ="$rd = $merge";
711  let Uses = [VL, VTYPE];
712  let HasVLOp = 1;
713  let HasSEWOp = 1;
714  let HasMergeOp = 1;
715  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
716}
717
718// Nullary for pseudo instructions. They are expanded in
719// RISCVExpandPseudoInsts pass.
720class VPseudoNullaryPseudoM<string BaseInst>
721       : Pseudo<(outs VR:$rd), (ins GPR:$vl, ixlenimm:$sew), []>,
722       RISCVVPseudo {
723  let mayLoad = 0;
724  let mayStore = 0;
725  let hasSideEffects = 0;
726  let usesCustomInserter = 1;
727  let Uses = [VL, VTYPE];
728  let HasVLOp = 1;
729  let HasSEWOp = 1;
730  // BaseInstr is not used in RISCVExpandPseudoInsts pass.
731  // Just fill a corresponding real v-inst to pass tablegen check.
732  let BaseInstr = !cast<Instruction>(BaseInst);
733}
734
735// RetClass could be GPR or VReg.
736class VPseudoUnaryNoMask<DAGOperand RetClass, VReg OpClass, string Constraint = ""> :
737        Pseudo<(outs RetClass:$rd),
738               (ins OpClass:$rs2, GPR:$vl, ixlenimm:$sew), []>,
739        RISCVVPseudo {
740  let mayLoad = 0;
741  let mayStore = 0;
742  let hasSideEffects = 0;
743  let usesCustomInserter = 1;
744  let Constraints = Constraint;
745  let Uses = [VL, VTYPE];
746  let HasVLOp = 1;
747  let HasSEWOp = 1;
748  let HasDummyMask = 1;
749  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
750}
751
752class VPseudoUnaryMask<VReg RetClass, VReg OpClass, string Constraint = ""> :
753        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
754               (ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
755                    VMaskOp:$vm, GPR:$vl, ixlenimm:$sew), []>,
756        RISCVVPseudo {
757  let mayLoad = 0;
758  let mayStore = 0;
759  let hasSideEffects = 0;
760  let usesCustomInserter = 1;
761  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
762  let Uses = [VL, VTYPE];
763  let HasVLOp = 1;
764  let HasSEWOp = 1;
765  let HasMergeOp = 1;
766  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
767}
768
769// mask unary operation without maskedoff
770class VPseudoMaskUnarySOutMask:
771        Pseudo<(outs GPR:$rd),
772               (ins VR:$rs1, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew), []>,
773        RISCVVPseudo {
774  let mayLoad = 0;
775  let mayStore = 0;
776  let hasSideEffects = 0;
777  let usesCustomInserter = 1;
778  let Uses = [VL, VTYPE];
779  let HasVLOp = 1;
780  let HasSEWOp = 1;
781  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
782}
783
784// Masked mask operation have no $rd=$merge constraints
785class VPseudoUnaryMOutMask:
786        Pseudo<(outs VR:$rd),
787               (ins VR:$merge, VR:$rs1, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew), []>,
788        RISCVVPseudo {
789  let mayLoad = 0;
790  let mayStore = 0;
791  let hasSideEffects = 0;
792  let usesCustomInserter = 1;
793  let Constraints = "$rd = $merge";
794  let Uses = [VL, VTYPE];
795  let HasVLOp = 1;
796  let HasSEWOp = 1;
797  let HasMergeOp = 1;
798  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
799}
800
801// Mask can be V0~V31
802class VPseudoUnaryAnyMask<VReg RetClass,
803                          VReg Op1Class> :
804      Pseudo<(outs RetClass:$rd),
805             (ins RetClass:$merge,
806                  Op1Class:$rs2,
807                  VR:$vm, GPR:$vl, ixlenimm:$sew),
808             []>,
809      RISCVVPseudo {
810  let mayLoad = 0;
811  let mayStore = 0;
812  let hasSideEffects = 0;
813  let usesCustomInserter = 1;
814  let Constraints = "@earlyclobber $rd, $rd = $merge";
815  let Uses = [VL, VTYPE];
816  let HasVLOp = 1;
817  let HasSEWOp = 1;
818  let HasMergeOp = 1;
819  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
820}
821
822class VPseudoBinaryNoMask<VReg RetClass,
823                          VReg Op1Class,
824                          DAGOperand Op2Class,
825                          string Constraint> :
826        Pseudo<(outs RetClass:$rd),
827               (ins Op1Class:$rs2, Op2Class:$rs1, GPR:$vl, ixlenimm:$sew), []>,
828        RISCVVPseudo {
829  let mayLoad = 0;
830  let mayStore = 0;
831  let hasSideEffects = 0;
832  let usesCustomInserter = 1;
833  let Constraints = Constraint;
834  let Uses = [VL, VTYPE];
835  let HasVLOp = 1;
836  let HasSEWOp = 1;
837  let HasDummyMask = 1;
838  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
839}
840
841class VPseudoIStoreNoMask<VReg StClass, VReg IdxClass>:
842      Pseudo<(outs),
843              (ins StClass:$rd, GPR:$rs1, IdxClass:$rs2, GPR:$vl, ixlenimm:$sew),[]>,
844      RISCVVPseudo {
845  let mayLoad = 0;
846  let mayStore = 1;
847  let hasSideEffects = 0;
848  let usesCustomInserter = 1;
849  let Uses = [VL, VTYPE];
850  let HasVLOp = 1;
851  let HasSEWOp = 1;
852  let HasDummyMask = 1;
853  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
854}
855
856class VPseudoIStoreMask<VReg StClass, VReg IdxClass>:
857      Pseudo<(outs),
858              (ins StClass:$rd, GPR:$rs1, IdxClass:$rs2, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
859      RISCVVPseudo {
860  let mayLoad = 0;
861  let mayStore = 1;
862  let hasSideEffects = 0;
863  let usesCustomInserter = 1;
864  let Uses = [VL, VTYPE];
865  let HasVLOp = 1;
866  let HasSEWOp = 1;
867  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
868}
869
870class VPseudoBinaryMask<VReg RetClass,
871                        VReg Op1Class,
872                        DAGOperand Op2Class,
873                        string Constraint> :
874        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
875                (ins GetVRegNoV0<RetClass>.R:$merge,
876                     Op1Class:$rs2, Op2Class:$rs1,
877                     VMaskOp:$vm, GPR:$vl, ixlenimm:$sew), []>,
878        RISCVVPseudo {
879  let mayLoad = 0;
880  let mayStore = 0;
881  let hasSideEffects = 0;
882  let usesCustomInserter = 1;
883  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
884  let Uses = [VL, VTYPE];
885  let HasVLOp = 1;
886  let HasSEWOp = 1;
887  let HasMergeOp = 1;
888  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
889}
890
891class VPseudoBinaryCarryIn<VReg RetClass,
892                           VReg Op1Class,
893                           DAGOperand Op2Class,
894                           LMULInfo MInfo,
895                           bit CarryIn,
896                           string Constraint> :
897        Pseudo<(outs RetClass:$rd),
898               !if(CarryIn,
899                  (ins Op1Class:$rs2, Op2Class:$rs1, VMV0:$carry, GPR:$vl,
900                       ixlenimm:$sew),
901                  (ins Op1Class:$rs2, Op2Class:$rs1, GPR:$vl, ixlenimm:$sew)), []>,
902        RISCVVPseudo {
903  let mayLoad = 0;
904  let mayStore = 0;
905  let hasSideEffects = 0;
906  let usesCustomInserter = 1;
907  let Constraints = Constraint;
908  let Uses = [VL, VTYPE];
909  let HasVLOp = 1;
910  let HasSEWOp = 1;
911  let HasMergeOp = 0;
912  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
913  let VLMul = MInfo.value;
914}
915
916class VPseudoTernaryNoMask<VReg RetClass,
917                           VReg Op1Class,
918                           DAGOperand Op2Class,
919                           string Constraint> :
920        Pseudo<(outs RetClass:$rd),
921               (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2,
922                    GPR:$vl, ixlenimm:$sew),
923               []>,
924        RISCVVPseudo {
925  let mayLoad = 0;
926  let mayStore = 0;
927  let hasSideEffects = 0;
928  let usesCustomInserter = 1;
929  let Constraints = Join<[Constraint, "$rd = $rs3"], ",">.ret;
930  let Uses = [VL, VTYPE];
931  let HasVLOp = 1;
932  let HasSEWOp = 1;
933  let HasMergeOp = 1;
934  let HasDummyMask = 1;
935  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
936}
937
938class VPseudoAMOWDNoMask<VReg RetClass,
939                         VReg Op1Class> :
940        Pseudo<(outs GetVRegNoV0<RetClass>.R:$vd_wd),
941               (ins GPR:$rs1,
942                    Op1Class:$vs2,
943                    GetVRegNoV0<RetClass>.R:$vd,
944                    GPR:$vl, ixlenimm:$sew), []>,
945        RISCVVPseudo {
946  let mayLoad = 1;
947  let mayStore = 1;
948  let hasSideEffects = 1;
949  let usesCustomInserter = 1;
950  let Constraints = "$vd_wd = $vd";
951  let Uses = [VL, VTYPE];
952  let HasVLOp = 1;
953  let HasSEWOp = 1;
954  let HasDummyMask = 1;
955  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
956}
957
958class VPseudoAMOWDMask<VReg RetClass,
959                       VReg Op1Class> :
960        Pseudo<(outs GetVRegNoV0<RetClass>.R:$vd_wd),
961               (ins GPR:$rs1,
962                    Op1Class:$vs2,
963                    GetVRegNoV0<RetClass>.R:$vd,
964                    VMaskOp:$vm, GPR:$vl, ixlenimm:$sew), []>,
965        RISCVVPseudo {
966  let mayLoad = 1;
967  let mayStore = 1;
968  let hasSideEffects = 1;
969  let usesCustomInserter = 1;
970  let Constraints = "$vd_wd = $vd";
971  let Uses = [VL, VTYPE];
972  let HasVLOp = 1;
973  let HasSEWOp = 1;
974  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
975}
976
977multiclass VPseudoAMOEI<int eew> {
978  // Standard scalar AMO supports 32, 64, and 128 Mem data bits,
979  // and in the base vector "V" extension, only SEW up to ELEN = max(XLEN, FLEN)
980  // are required to be supported.
981  // therefore only [32, 64] is allowed here.
982  foreach sew = [32, 64] in {
983    foreach lmul = MxSet<sew>.m in {
984      defvar octuple_lmul = octuple_from_str<lmul.MX>.ret;
985      // Calculate emul = eew * lmul / sew
986      defvar octuple_emul = !srl(!mul(eew, octuple_lmul), shift_amount<sew>.val);
987      if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
988        defvar emulMX = octuple_to_str<octuple_emul>.ret;
989        defvar lmulMX = octuple_to_str<octuple_lmul>.ret;
990        defvar emul= !cast<LMULInfo>("V_" # emulMX);
991        defvar lmul = !cast<LMULInfo>("V_" # lmulMX);
992        let VLMul = lmul.value in {
993          def "_WD_" # lmulMX # "_" # emulMX : VPseudoAMOWDNoMask<lmul.vrclass, emul.vrclass>;
994          def "_WD_" # lmulMX # "_" # emulMX # "_MASK" : VPseudoAMOWDMask<lmul.vrclass, emul.vrclass>;
995        }
996      }
997    }
998  }
999}
1000
1001multiclass VPseudoAMO {
1002  foreach eew = EEWList in
1003    defm "EI" # eew : VPseudoAMOEI<eew>;
1004}
1005
1006class VPseudoUSSegLoadNoMask<VReg RetClass, bits<11> EEW>:
1007      Pseudo<(outs RetClass:$rd),
1008             (ins GPR:$rs1, GPR:$vl, ixlenimm:$sew),[]>,
1009      RISCVVPseudo,
1010      RISCVZvlsseg<PseudoToIntrinsic<NAME, false>.Intrinsic, EEW, VLMul> {
1011  let mayLoad = 1;
1012  let mayStore = 0;
1013  let hasSideEffects = 0;
1014  let usesCustomInserter = 1;
1015  let Uses = [VL, VTYPE];
1016  let HasVLOp = 1;
1017  let HasSEWOp = 1;
1018  let HasDummyMask = 1;
1019  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1020}
1021
1022class VPseudoUSSegLoadMask<VReg RetClass, bits<11> EEW>:
1023      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1024             (ins GetVRegNoV0<RetClass>.R:$merge, GPR:$rs1,
1025                  VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
1026      RISCVVPseudo,
1027      RISCVZvlsseg<PseudoToIntrinsic<NAME, true>.Intrinsic, EEW, VLMul> {
1028  let mayLoad = 1;
1029  let mayStore = 0;
1030  let hasSideEffects = 0;
1031  let usesCustomInserter = 1;
1032  let Constraints = "$rd = $merge";
1033  let Uses = [VL, VTYPE];
1034  let HasVLOp = 1;
1035  let HasSEWOp = 1;
1036  let HasMergeOp = 1;
1037  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1038}
1039
1040class VPseudoSSegLoadNoMask<VReg RetClass, bits<11> EEW>:
1041      Pseudo<(outs RetClass:$rd),
1042             (ins GPR:$rs1, GPR:$offset, GPR:$vl, ixlenimm:$sew),[]>,
1043      RISCVVPseudo,
1044      RISCVZvlsseg<PseudoToIntrinsic<NAME, false>.Intrinsic, EEW, VLMul> {
1045  let mayLoad = 1;
1046  let mayStore = 0;
1047  let hasSideEffects = 0;
1048  let usesCustomInserter = 1;
1049  let Uses = [VL, VTYPE];
1050  let HasVLOp = 1;
1051  let HasSEWOp = 1;
1052  let HasDummyMask = 1;
1053  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1054}
1055
1056class VPseudoSSegLoadMask<VReg RetClass, bits<11> EEW>:
1057      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1058             (ins GetVRegNoV0<RetClass>.R:$merge, GPR:$rs1,
1059                  GPR:$offset, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
1060      RISCVVPseudo,
1061      RISCVZvlsseg<PseudoToIntrinsic<NAME, true>.Intrinsic, EEW, VLMul> {
1062  let mayLoad = 1;
1063  let mayStore = 0;
1064  let hasSideEffects = 0;
1065  let usesCustomInserter = 1;
1066  let Constraints = "$rd = $merge";
1067  let Uses = [VL, VTYPE];
1068  let HasVLOp = 1;
1069  let HasSEWOp = 1;
1070  let HasMergeOp = 1;
1071  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1072}
1073
1074class VPseudoISegLoadNoMask<VReg RetClass, VReg IdxClass, bits<11> EEW, bits<3> LMUL>:
1075      Pseudo<(outs RetClass:$rd),
1076             (ins GPR:$rs1, IdxClass:$offset, GPR:$vl, ixlenimm:$sew),[]>,
1077      RISCVVPseudo,
1078      RISCVZvlsseg<PseudoToIntrinsic<NAME, false>.Intrinsic, EEW, VLMul, LMUL> {
1079  let mayLoad = 1;
1080  let mayStore = 0;
1081  let hasSideEffects = 0;
1082  let usesCustomInserter = 1;
1083  // For vector indexed segment loads, the destination vector register groups
1084  // cannot overlap the source vector register group
1085  let Constraints = "@earlyclobber $rd";
1086  let Uses = [VL, VTYPE];
1087  let HasVLOp = 1;
1088  let HasSEWOp = 1;
1089  let HasDummyMask = 1;
1090  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1091}
1092
1093class VPseudoISegLoadMask<VReg RetClass, VReg IdxClass, bits<11> EEW, bits<3> LMUL>:
1094      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
1095             (ins GetVRegNoV0<RetClass>.R:$merge, GPR:$rs1,
1096                  IdxClass:$offset, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
1097      RISCVVPseudo,
1098      RISCVZvlsseg<PseudoToIntrinsic<NAME, true>.Intrinsic, EEW, VLMul, LMUL> {
1099  let mayLoad = 1;
1100  let mayStore = 0;
1101  let hasSideEffects = 0;
1102  let usesCustomInserter = 1;
1103  // For vector indexed segment loads, the destination vector register groups
1104  // cannot overlap the source vector register group
1105  let Constraints = "@earlyclobber $rd, $rd = $merge";
1106  let Uses = [VL, VTYPE];
1107  let HasVLOp = 1;
1108  let HasSEWOp = 1;
1109  let HasMergeOp = 1;
1110  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1111}
1112
1113class VPseudoUSSegStoreNoMask<VReg ValClass, bits<11> EEW>:
1114      Pseudo<(outs),
1115             (ins ValClass:$rd, GPR:$rs1, GPR:$vl, ixlenimm:$sew),[]>,
1116      RISCVVPseudo,
1117      RISCVZvlsseg<PseudoToIntrinsic<NAME, false>.Intrinsic, EEW, VLMul> {
1118  let mayLoad = 0;
1119  let mayStore = 1;
1120  let hasSideEffects = 0;
1121  let usesCustomInserter = 1;
1122  let Uses = [VL, VTYPE];
1123  let HasVLOp = 1;
1124  let HasSEWOp = 1;
1125  let HasDummyMask = 1;
1126  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1127}
1128
1129class VPseudoUSSegStoreMask<VReg ValClass, bits<11> EEW>:
1130      Pseudo<(outs),
1131             (ins ValClass:$rd, GPR:$rs1,
1132                  VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
1133      RISCVVPseudo,
1134      RISCVZvlsseg<PseudoToIntrinsic<NAME, true>.Intrinsic, EEW, VLMul> {
1135  let mayLoad = 0;
1136  let mayStore = 1;
1137  let hasSideEffects = 0;
1138  let usesCustomInserter = 1;
1139  let Uses = [VL, VTYPE];
1140  let HasVLOp = 1;
1141  let HasSEWOp = 1;
1142  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1143}
1144
1145class VPseudoSSegStoreNoMask<VReg ValClass, bits<11> EEW>:
1146      Pseudo<(outs),
1147             (ins ValClass:$rd, GPR:$rs1, GPR: $offset, GPR:$vl, ixlenimm:$sew),[]>,
1148      RISCVVPseudo,
1149      RISCVZvlsseg<PseudoToIntrinsic<NAME, false>.Intrinsic, EEW, VLMul> {
1150  let mayLoad = 0;
1151  let mayStore = 1;
1152  let hasSideEffects = 0;
1153  let usesCustomInserter = 1;
1154  let Uses = [VL, VTYPE];
1155  let HasVLOp = 1;
1156  let HasSEWOp = 1;
1157  let HasDummyMask = 1;
1158  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1159}
1160
1161class VPseudoSSegStoreMask<VReg ValClass, bits<11> EEW>:
1162      Pseudo<(outs),
1163             (ins ValClass:$rd, GPR:$rs1, GPR: $offset,
1164                  VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
1165      RISCVVPseudo,
1166      RISCVZvlsseg<PseudoToIntrinsic<NAME, true>.Intrinsic, EEW, VLMul> {
1167  let mayLoad = 0;
1168  let mayStore = 1;
1169  let hasSideEffects = 0;
1170  let usesCustomInserter = 1;
1171  let Uses = [VL, VTYPE];
1172  let HasVLOp = 1;
1173  let HasSEWOp = 1;
1174  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1175}
1176
1177class VPseudoISegStoreNoMask<VReg ValClass, VReg IdxClass, bits<11> EEW, bits<3> LMUL>:
1178      Pseudo<(outs),
1179             (ins ValClass:$rd, GPR:$rs1, IdxClass: $index,
1180                  GPR:$vl, ixlenimm:$sew),[]>,
1181      RISCVVPseudo,
1182      RISCVZvlsseg<PseudoToIntrinsic<NAME, false>.Intrinsic, EEW, VLMul, LMUL> {
1183  let mayLoad = 0;
1184  let mayStore = 1;
1185  let hasSideEffects = 0;
1186  let usesCustomInserter = 1;
1187  let Uses = [VL, VTYPE];
1188  let HasVLOp = 1;
1189  let HasSEWOp = 1;
1190  let HasDummyMask = 1;
1191  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1192}
1193
1194class VPseudoISegStoreMask<VReg ValClass, VReg IdxClass, bits<11> EEW, bits<3> LMUL>:
1195      Pseudo<(outs),
1196             (ins ValClass:$rd, GPR:$rs1, IdxClass: $index,
1197                  VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
1198      RISCVVPseudo,
1199      RISCVZvlsseg<PseudoToIntrinsic<NAME, true>.Intrinsic, EEW, VLMul, LMUL> {
1200  let mayLoad = 0;
1201  let mayStore = 1;
1202  let hasSideEffects = 0;
1203  let usesCustomInserter = 1;
1204  let Uses = [VL, VTYPE];
1205  let HasVLOp = 1;
1206  let HasSEWOp = 1;
1207  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
1208}
1209
1210multiclass VPseudoUSLoad {
1211  foreach lmul = MxList.m in {
1212    defvar LInfo = lmul.MX;
1213    defvar vreg = lmul.vrclass;
1214    let VLMul = lmul.value in {
1215      def "_V_" # LInfo : VPseudoUSLoadNoMask<vreg>;
1216      def "_V_" # LInfo # "_MASK" : VPseudoUSLoadMask<vreg>;
1217    }
1218  }
1219}
1220
1221multiclass VPseudoLoadMask {
1222  foreach mti = AllMasks in {
1223    let VLMul = mti.LMul.value in {
1224      def "_V_" # mti.BX : VPseudoUSLoadNoMask<VR>;
1225    }
1226  }
1227}
1228
1229multiclass VPseudoSLoad {
1230  foreach lmul = MxList.m in {
1231    defvar LInfo = lmul.MX;
1232    defvar vreg = lmul.vrclass;
1233    let VLMul = lmul.value in {
1234      def "_V_" # LInfo : VPseudoSLoadNoMask<vreg>;
1235      def "_V_" # LInfo # "_MASK" : VPseudoSLoadMask<vreg>;
1236    }
1237  }
1238}
1239
1240multiclass VPseudoILoad {
1241  foreach lmul = MxList.m in
1242  foreach idx_lmul = MxList.m in {
1243    defvar LInfo = lmul.MX;
1244    defvar Vreg = lmul.vrclass;
1245    defvar IdxLInfo = idx_lmul.MX;
1246    defvar IdxVreg = idx_lmul.vrclass;
1247    let VLMul = lmul.value in {
1248      def "_V_" # IdxLInfo # "_" # LInfo : VPseudoILoadNoMask<Vreg, IdxVreg>;
1249      def "_V_" # IdxLInfo # "_" # LInfo # "_MASK" : VPseudoILoadMask<Vreg, IdxVreg>;
1250    }
1251  }
1252}
1253
1254multiclass VPseudoUSStore {
1255  foreach lmul = MxList.m in {
1256    defvar LInfo = lmul.MX;
1257    defvar vreg = lmul.vrclass;
1258    let VLMul = lmul.value in {
1259      def "_V_" # LInfo : VPseudoUSStoreNoMask<vreg>;
1260      def "_V_" # LInfo # "_MASK" : VPseudoUSStoreMask<vreg>;
1261    }
1262  }
1263}
1264
1265multiclass VPseudoStoreMask {
1266  foreach mti = AllMasks in {
1267    let VLMul = mti.LMul.value in {
1268      def "_V_" # mti.BX : VPseudoUSStoreNoMask<VR>;
1269    }
1270  }
1271}
1272
1273multiclass VPseudoSStore {
1274  foreach lmul = MxList.m in {
1275    defvar LInfo = lmul.MX;
1276    defvar vreg = lmul.vrclass;
1277    let VLMul = lmul.value in {
1278      def "_V_" # LInfo : VPseudoSStoreNoMask<vreg>;
1279      def "_V_" # LInfo # "_MASK" : VPseudoSStoreMask<vreg>;
1280    }
1281  }
1282}
1283
1284multiclass VPseudoIStore {
1285  foreach lmul = MxList.m in
1286  foreach idx_lmul = MxList.m in {
1287    defvar LInfo = lmul.MX;
1288    defvar Vreg = lmul.vrclass;
1289    defvar IdxLInfo = idx_lmul.MX;
1290    defvar IdxVreg = idx_lmul.vrclass;
1291    let VLMul = lmul.value in {
1292      def "_V_" # IdxLInfo # "_" # LInfo : VPseudoIStoreNoMask<Vreg, IdxVreg>;
1293      def "_V_" # IdxLInfo # "_" # LInfo # "_MASK" : VPseudoIStoreMask<Vreg, IdxVreg>;
1294    }
1295  }
1296}
1297
1298multiclass VPseudoUnaryS_M {
1299  foreach mti = AllMasks in
1300  {
1301    let VLMul = mti.LMul.value in {
1302      def "_M_" # mti.BX : VPseudoUnaryNoMask<GPR, VR>;
1303      def "_M_" # mti.BX # "_MASK" : VPseudoMaskUnarySOutMask;
1304    }
1305  }
1306}
1307
1308multiclass VPseudoUnaryM_M {
1309  defvar constraint = "@earlyclobber $rd";
1310  foreach mti = AllMasks in
1311  {
1312    let VLMul = mti.LMul.value in {
1313      def "_M_" # mti.BX : VPseudoUnaryNoMask<VR, VR, constraint>;
1314      def "_M_" # mti.BX # "_MASK" : VPseudoUnaryMask<VR, VR, constraint>;
1315    }
1316  }
1317}
1318
1319multiclass VPseudoMaskNullaryV {
1320  foreach m = MxList.m in {
1321    let VLMul = m.value in {
1322      def "_V_" # m.MX : VPseudoNullaryNoMask<m.vrclass>;
1323      def "_V_" # m.MX # "_MASK" : VPseudoNullaryMask<m.vrclass>;
1324    }
1325  }
1326}
1327
1328multiclass VPseudoNullaryPseudoM <string BaseInst> {
1329  foreach mti = AllMasks in {
1330    let VLMul = mti.LMul.value in {
1331      def "_M_" # mti.BX : VPseudoNullaryPseudoM<BaseInst # "_MM">;
1332    }
1333  }
1334}
1335
1336multiclass VPseudoUnaryV_M {
1337  defvar constraint = "@earlyclobber $rd";
1338  foreach m = MxList.m in {
1339    let VLMul = m.value in {
1340      def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, VR, constraint>;
1341      def "_" # m.MX # "_MASK" : VPseudoUnaryMask<m.vrclass, VR, constraint>;
1342    }
1343  }
1344}
1345
1346multiclass VPseudoUnaryV_V_AnyMask {
1347  foreach m = MxList.m in {
1348    let VLMul = m.value in
1349      def _VM # "_" # m.MX : VPseudoUnaryAnyMask<m.vrclass, m.vrclass>;
1350  }
1351}
1352
1353multiclass VPseudoBinary<VReg RetClass,
1354                         VReg Op1Class,
1355                         DAGOperand Op2Class,
1356                         LMULInfo MInfo,
1357                         string Constraint = ""> {
1358  let VLMul = MInfo.value in {
1359    def "_" # MInfo.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class,
1360                                             Constraint>;
1361    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMask<RetClass, Op1Class, Op2Class,
1362                                                     Constraint>;
1363  }
1364}
1365
1366multiclass VPseudoBinaryEmul<VReg RetClass,
1367                             VReg Op1Class,
1368                             DAGOperand Op2Class,
1369                             LMULInfo lmul,
1370                             LMULInfo emul,
1371                             string Constraint = ""> {
1372  let VLMul = lmul.value in {
1373    def "_" # lmul.MX # "_" # emul.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class,
1374                                                            Constraint>;
1375    def "_" # lmul.MX # "_" # emul.MX # "_MASK" : VPseudoBinaryMask<RetClass, Op1Class, Op2Class,
1376                                                                    Constraint>;
1377  }
1378}
1379
1380multiclass VPseudoBinaryV_VV<string Constraint = ""> {
1381  foreach m = MxList.m in
1382    defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m, Constraint>;
1383}
1384
1385multiclass VPseudoBinaryV_VV_EEW<int eew, string Constraint = ""> {
1386  foreach m = MxList.m in {
1387    foreach sew = EEWList in {
1388      defvar octuple_lmul = octuple_from_str<m.MX>.ret;
1389      // emul = lmul * eew / sew
1390      defvar octuple_emul = !srl(!mul(octuple_lmul, eew), shift_amount<sew>.val);
1391      if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
1392        defvar emulMX = octuple_to_str<octuple_emul>.ret;
1393        defvar emul = !cast<LMULInfo>("V_" # emulMX);
1394        defm _VV : VPseudoBinaryEmul<m.vrclass, m.vrclass, emul.vrclass, m, emul, Constraint>;
1395      }
1396    }
1397  }
1398}
1399
1400multiclass VPseudoBinaryV_VX<string Constraint = ""> {
1401  foreach m = MxList.m in
1402    defm "_VX" : VPseudoBinary<m.vrclass, m.vrclass, GPR, m, Constraint>;
1403}
1404
1405multiclass VPseudoBinaryV_VF<string Constraint = ""> {
1406  foreach m = MxList.m in
1407    foreach f = FPList.fpinfo in
1408      defm "_V" # f.FX : VPseudoBinary<m.vrclass, m.vrclass,
1409                                       f.fprclass, m, Constraint>;
1410}
1411
1412multiclass VPseudoBinaryV_VI<Operand ImmType = simm5, string Constraint = ""> {
1413  foreach m = MxList.m in
1414    defm _VI : VPseudoBinary<m.vrclass, m.vrclass, ImmType, m, Constraint>;
1415}
1416
1417multiclass VPseudoBinaryM_MM {
1418  foreach m = MxList.m in
1419    let VLMul = m.value in {
1420      def "_MM_" # m.MX : VPseudoBinaryNoMask<VR, VR, VR, "">;
1421    }
1422}
1423
1424// We use earlyclobber here due to
1425// * The destination EEW is smaller than the source EEW and the overlap is
1426//   in the lowest-numbered part of the source register group is legal.
1427//   Otherwise, it is illegal.
1428// * The destination EEW is greater than the source EEW, the source EMUL is
1429//   at least 1, and the overlap is in the highest-numbered part of the
1430//   destination register group is legal. Otherwise, it is illegal.
1431multiclass VPseudoBinaryW_VV {
1432  foreach m = MxList.m[0-5] in
1433    defm _VV : VPseudoBinary<m.wvrclass, m.vrclass, m.vrclass, m,
1434                             "@earlyclobber $rd">;
1435}
1436
1437multiclass VPseudoBinaryW_VX {
1438  foreach m = MxList.m[0-5] in
1439    defm "_VX" : VPseudoBinary<m.wvrclass, m.vrclass, GPR, m,
1440                               "@earlyclobber $rd">;
1441}
1442
1443multiclass VPseudoBinaryW_VF {
1444  foreach m = MxList.m[0-5] in
1445    foreach f = FPList.fpinfo[0-1] in
1446      defm "_V" # f.FX : VPseudoBinary<m.wvrclass, m.vrclass,
1447                                       f.fprclass, m,
1448                                       "@earlyclobber $rd">;
1449}
1450
1451multiclass VPseudoBinaryW_WV {
1452  foreach m = MxList.m[0-5] in
1453    defm _WV : VPseudoBinary<m.wvrclass, m.wvrclass, m.vrclass, m,
1454                             "@earlyclobber $rd">;
1455}
1456
1457multiclass VPseudoBinaryW_WX {
1458  foreach m = MxList.m[0-5] in
1459    defm "_WX" : VPseudoBinary<m.wvrclass, m.wvrclass, GPR, m,
1460                               "@earlyclobber $rd">;
1461}
1462
1463multiclass VPseudoBinaryW_WF {
1464  foreach m = MxList.m[0-5] in
1465    foreach f = FPList.fpinfo[0-1] in
1466      defm "_W" # f.FX : VPseudoBinary<m.wvrclass, m.wvrclass,
1467                                       f.fprclass, m,
1468                                       "@earlyclobber $rd">;
1469}
1470
1471multiclass VPseudoBinaryV_WV {
1472  foreach m = MxList.m[0-5] in
1473    defm _WV : VPseudoBinary<m.vrclass, m.wvrclass, m.vrclass, m,
1474                             "@earlyclobber $rd">;
1475}
1476
1477multiclass VPseudoBinaryV_WX {
1478  foreach m = MxList.m[0-5] in
1479    defm _WX : VPseudoBinary<m.vrclass, m.wvrclass, GPR, m,
1480                             "@earlyclobber $rd">;
1481}
1482
1483multiclass VPseudoBinaryV_WI {
1484  foreach m = MxList.m[0-5] in
1485    defm _WI : VPseudoBinary<m.vrclass, m.wvrclass, uimm5, m,
1486                             "@earlyclobber $rd">;
1487}
1488
1489// For vadc and vsbc, the instruction encoding is reserved if the destination
1490// vector register is v0.
1491// For vadc and vsbc, CarryIn == 1 and CarryOut == 0
1492multiclass VPseudoBinaryV_VM<bit CarryOut = 0, bit CarryIn = 1,
1493                             string Constraint = ""> {
1494  foreach m = MxList.m in
1495    def "_VV" # !if(CarryIn, "M", "") # "_" # m.MX :
1496      VPseudoBinaryCarryIn<!if(CarryOut, VR,
1497                           !if(!and(CarryIn, !not(CarryOut)),
1498                               GetVRegNoV0<m.vrclass>.R, m.vrclass)),
1499                           m.vrclass, m.vrclass, m, CarryIn, Constraint>;
1500}
1501
1502multiclass VPseudoBinaryV_XM<bit CarryOut = 0, bit CarryIn = 1,
1503                             string Constraint = ""> {
1504  foreach m = MxList.m in
1505    def "_VX" # !if(CarryIn, "M", "") # "_" # m.MX :
1506      VPseudoBinaryCarryIn<!if(CarryOut, VR,
1507                           !if(!and(CarryIn, !not(CarryOut)),
1508                               GetVRegNoV0<m.vrclass>.R, m.vrclass)),
1509                           m.vrclass, GPR, m, CarryIn, Constraint>;
1510}
1511
1512multiclass VPseudoBinaryV_FM {
1513  foreach m = MxList.m in
1514    foreach f = FPList.fpinfo in
1515      def "_V" # f.FX # "M_" # m.MX :
1516        VPseudoBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
1517                             m.vrclass, f.fprclass, m, /*CarryIn=*/1, "">;
1518}
1519
1520multiclass VPseudoBinaryV_IM<bit CarryOut = 0, bit CarryIn = 1,
1521                             string Constraint = ""> {
1522  foreach m = MxList.m in
1523    def "_VI" # !if(CarryIn, "M", "") # "_" # m.MX :
1524      VPseudoBinaryCarryIn<!if(CarryOut, VR,
1525                           !if(!and(CarryIn, !not(CarryOut)),
1526                               GetVRegNoV0<m.vrclass>.R, m.vrclass)),
1527                           m.vrclass, simm5, m, CarryIn, Constraint>;
1528}
1529
1530multiclass VPseudoUnaryV_V_X_I_NoDummyMask {
1531  foreach m = MxList.m in {
1532    let VLMul = m.value in {
1533      def "_V_" # m.MX : VPseudoUnaryNoDummyMask<m.vrclass, m.vrclass>;
1534      def "_X_" # m.MX : VPseudoUnaryNoDummyMask<m.vrclass, GPR>;
1535      def "_I_" # m.MX : VPseudoUnaryNoDummyMask<m.vrclass, simm5>;
1536    }
1537  }
1538}
1539
1540multiclass VPseudoUnaryV_F_NoDummyMask {
1541  foreach m = MxList.m in {
1542    foreach f = FPList.fpinfo in {
1543      let VLMul = m.value in {
1544        def "_" # f.FX # "_" # m.MX : VPseudoUnaryNoDummyMask<m.vrclass, f.fprclass>;
1545      }
1546    }
1547  }
1548}
1549
1550multiclass VPseudoUnaryV_V {
1551  foreach m = MxList.m in {
1552    let VLMul = m.value in {
1553      def "_V_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.vrclass>;
1554      def "_V_" # m.MX # "_MASK" : VPseudoUnaryMask<m.vrclass, m.vrclass>;
1555    }
1556  }
1557}
1558
1559multiclass PseudoUnaryV_VF2 {
1560  defvar constraints = "@earlyclobber $rd";
1561  foreach m = MxList.m[1-6] in
1562  {
1563    let VLMul = m.value in {
1564      def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.f2vrclass, constraints>;
1565      def "_" # m.MX # "_MASK" : VPseudoUnaryMask<m.vrclass, m.f2vrclass,
1566                                                  constraints>;
1567    }
1568  }
1569}
1570
1571multiclass PseudoUnaryV_VF4 {
1572  defvar constraints = "@earlyclobber $rd";
1573  foreach m = MxList.m[2-6] in
1574  {
1575    let VLMul = m.value in {
1576      def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.f4vrclass, constraints>;
1577      def "_" # m.MX # "_MASK" : VPseudoUnaryMask<m.vrclass, m.f4vrclass,
1578                                                  constraints>;
1579    }
1580  }
1581}
1582
1583multiclass PseudoUnaryV_VF8 {
1584  defvar constraints = "@earlyclobber $rd";
1585  foreach m = MxList.m[3-6] in
1586  {
1587    let VLMul = m.value in {
1588      def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.f8vrclass, constraints>;
1589      def "_" # m.MX # "_MASK" : VPseudoUnaryMask<m.vrclass, m.f8vrclass,
1590                                                  constraints>;
1591    }
1592  }
1593}
1594
1595// The destination EEW is 1.
1596// The source EEW is 8, 16, 32, or 64.
1597// When the destination EEW is different from source EEW, we need to use
1598// @earlyclobber to avoid the overlap between destination and source registers.
1599multiclass VPseudoBinaryM_VV {
1600  foreach m = MxList.m in
1601    defm _VV : VPseudoBinary<VR, m.vrclass, m.vrclass, m, "@earlyclobber $rd">;
1602}
1603
1604multiclass VPseudoBinaryM_VX {
1605  foreach m = MxList.m in
1606    defm "_VX" :
1607      VPseudoBinary<VR, m.vrclass, GPR, m, "@earlyclobber $rd">;
1608}
1609
1610multiclass VPseudoBinaryM_VF {
1611  foreach m = MxList.m in
1612    foreach f = FPList.fpinfo in
1613      defm "_V" # f.FX :
1614        VPseudoBinary<VR, m.vrclass, f.fprclass, m, "@earlyclobber $rd">;
1615}
1616
1617multiclass VPseudoBinaryM_VI {
1618  foreach m = MxList.m in
1619    defm _VI : VPseudoBinary<VR, m.vrclass, simm5, m, "@earlyclobber $rd">;
1620}
1621
1622multiclass VPseudoBinaryV_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
1623  defm "" : VPseudoBinaryV_VV<Constraint>;
1624  defm "" : VPseudoBinaryV_VX<Constraint>;
1625  defm "" : VPseudoBinaryV_VI<ImmType, Constraint>;
1626}
1627
1628multiclass VPseudoBinaryV_VV_VX {
1629  defm "" : VPseudoBinaryV_VV;
1630  defm "" : VPseudoBinaryV_VX;
1631}
1632
1633multiclass VPseudoBinaryV_VV_VF {
1634  defm "" : VPseudoBinaryV_VV;
1635  defm "" : VPseudoBinaryV_VF;
1636}
1637
1638multiclass VPseudoBinaryV_VX_VI<Operand ImmType = simm5> {
1639  defm "" : VPseudoBinaryV_VX;
1640  defm "" : VPseudoBinaryV_VI<ImmType>;
1641}
1642
1643multiclass VPseudoBinaryW_VV_VX {
1644  defm "" : VPseudoBinaryW_VV;
1645  defm "" : VPseudoBinaryW_VX;
1646}
1647
1648multiclass VPseudoBinaryW_VV_VF {
1649  defm "" : VPseudoBinaryW_VV;
1650  defm "" : VPseudoBinaryW_VF;
1651}
1652
1653multiclass VPseudoBinaryW_WV_WX {
1654  defm "" : VPseudoBinaryW_WV;
1655  defm "" : VPseudoBinaryW_WX;
1656}
1657
1658multiclass VPseudoBinaryW_WV_WF {
1659  defm "" : VPseudoBinaryW_WV;
1660  defm "" : VPseudoBinaryW_WF;
1661}
1662
1663multiclass VPseudoBinaryV_VM_XM_IM {
1664  defm "" : VPseudoBinaryV_VM;
1665  defm "" : VPseudoBinaryV_XM;
1666  defm "" : VPseudoBinaryV_IM;
1667}
1668
1669multiclass VPseudoBinaryV_VM_XM {
1670  defm "" : VPseudoBinaryV_VM;
1671  defm "" : VPseudoBinaryV_XM;
1672}
1673
1674multiclass VPseudoBinaryM_VM_XM_IM<string Constraint> {
1675  defm "" : VPseudoBinaryV_VM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>;
1676  defm "" : VPseudoBinaryV_XM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>;
1677  defm "" : VPseudoBinaryV_IM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>;
1678}
1679
1680multiclass VPseudoBinaryM_VM_XM<string Constraint> {
1681  defm "" : VPseudoBinaryV_VM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>;
1682  defm "" : VPseudoBinaryV_XM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>;
1683}
1684
1685multiclass VPseudoBinaryM_V_X_I<string Constraint> {
1686  defm "" : VPseudoBinaryV_VM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>;
1687  defm "" : VPseudoBinaryV_XM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>;
1688  defm "" : VPseudoBinaryV_IM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>;
1689}
1690
1691multiclass VPseudoBinaryM_V_X<string Constraint> {
1692  defm "" : VPseudoBinaryV_VM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>;
1693  defm "" : VPseudoBinaryV_XM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>;
1694}
1695
1696multiclass VPseudoBinaryV_WV_WX_WI {
1697  defm "" : VPseudoBinaryV_WV;
1698  defm "" : VPseudoBinaryV_WX;
1699  defm "" : VPseudoBinaryV_WI;
1700}
1701
1702multiclass VPseudoTernary<VReg RetClass,
1703                          VReg Op1Class,
1704                          RegisterClass Op2Class,
1705                          LMULInfo MInfo,
1706                          string Constraint = ""> {
1707  let VLMul = MInfo.value in {
1708    def "_" # MInfo.MX : VPseudoTernaryNoMask<RetClass, Op1Class, Op2Class, Constraint>;
1709    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMask<RetClass, Op1Class, Op2Class, Constraint>;
1710  }
1711}
1712
1713multiclass VPseudoTernaryV_VV<string Constraint = ""> {
1714  foreach m = MxList.m in
1715    defm _VV : VPseudoTernary<m.vrclass, m.vrclass, m.vrclass, m, Constraint>;
1716}
1717
1718multiclass VPseudoTernaryV_VX<string Constraint = ""> {
1719  foreach m = MxList.m in
1720    defm _VX : VPseudoTernary<m.vrclass, m.vrclass, GPR, m, Constraint>;
1721}
1722
1723multiclass VPseudoTernaryV_VX_AAXA<string Constraint = ""> {
1724  foreach m = MxList.m in
1725    defm "_VX" : VPseudoTernary<m.vrclass, GPR, m.vrclass, m, Constraint>;
1726}
1727
1728multiclass VPseudoTernaryV_VF_AAXA<string Constraint = ""> {
1729  foreach m = MxList.m in
1730    foreach f = FPList.fpinfo in
1731      defm "_V" # f.FX : VPseudoTernary<m.vrclass, f.fprclass, m.vrclass,
1732                                        m, Constraint>;
1733}
1734
1735multiclass VPseudoTernaryW_VV {
1736  defvar constraint = "@earlyclobber $rd";
1737  foreach m = MxList.m[0-5] in
1738    defm _VV : VPseudoTernary<m.wvrclass, m.vrclass, m.vrclass, m, constraint>;
1739}
1740
1741multiclass VPseudoTernaryW_VX {
1742  defvar constraint = "@earlyclobber $rd";
1743  foreach m = MxList.m[0-5] in
1744    defm "_VX" : VPseudoTernary<m.wvrclass, GPR, m.vrclass, m, constraint>;
1745}
1746
1747multiclass VPseudoTernaryW_VF {
1748  defvar constraint = "@earlyclobber $rd";
1749  foreach m = MxList.m[0-5] in
1750    foreach f = FPList.fpinfo[0-1] in
1751      defm "_V" # f.FX : VPseudoTernary<m.wvrclass, f.fprclass, m.vrclass, m,
1752                                        constraint>;
1753}
1754
1755multiclass VPseudoTernaryV_VI<Operand ImmType = simm5, string Constraint = ""> {
1756  foreach m = MxList.m in
1757    defm _VI : VPseudoTernary<m.vrclass, m.vrclass, ImmType, m, Constraint>;
1758}
1759
1760multiclass VPseudoTernaryV_VV_VX_AAXA<string Constraint = ""> {
1761  defm "" : VPseudoTernaryV_VV<Constraint>;
1762  defm "" : VPseudoTernaryV_VX_AAXA<Constraint>;
1763}
1764
1765multiclass VPseudoTernaryV_VV_VF_AAXA<string Constraint = ""> {
1766  defm "" : VPseudoTernaryV_VV<Constraint>;
1767  defm "" : VPseudoTernaryV_VF_AAXA<Constraint>;
1768}
1769
1770multiclass VPseudoTernaryV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
1771  defm "" : VPseudoTernaryV_VX<Constraint>;
1772  defm "" : VPseudoTernaryV_VI<ImmType, Constraint>;
1773}
1774
1775multiclass VPseudoTernaryW_VV_VX {
1776  defm "" : VPseudoTernaryW_VV;
1777  defm "" : VPseudoTernaryW_VX;
1778}
1779
1780multiclass VPseudoTernaryW_VV_VF {
1781  defm "" : VPseudoTernaryW_VV;
1782  defm "" : VPseudoTernaryW_VF;
1783}
1784
1785multiclass VPseudoBinaryM_VV_VX_VI {
1786  defm "" : VPseudoBinaryM_VV;
1787  defm "" : VPseudoBinaryM_VX;
1788  defm "" : VPseudoBinaryM_VI;
1789}
1790
1791multiclass VPseudoBinaryM_VV_VX {
1792  defm "" : VPseudoBinaryM_VV;
1793  defm "" : VPseudoBinaryM_VX;
1794}
1795
1796multiclass VPseudoBinaryM_VV_VF {
1797  defm "" : VPseudoBinaryM_VV;
1798  defm "" : VPseudoBinaryM_VF;
1799}
1800
1801multiclass VPseudoBinaryM_VX_VI {
1802  defm "" : VPseudoBinaryM_VX;
1803  defm "" : VPseudoBinaryM_VI;
1804}
1805
1806multiclass VPseudoReductionV_VS {
1807  foreach m = MxList.m in {
1808    let WritesElement0 = 1 in
1809    defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>;
1810  }
1811}
1812
1813multiclass VPseudoConversion<VReg RetClass,
1814                             VReg Op1Class,
1815                             LMULInfo MInfo,
1816                             string Constraint = ""> {
1817  let VLMul = MInfo.value in {
1818    def "_" # MInfo.MX : VPseudoUnaryNoMask<RetClass, Op1Class, Constraint>;
1819    def "_" # MInfo.MX # "_MASK" : VPseudoUnaryMask<RetClass, Op1Class,
1820                                                    Constraint>;
1821  }
1822}
1823
1824multiclass VPseudoConversionV_V {
1825  foreach m = MxList.m in
1826    defm _V : VPseudoConversion<m.vrclass, m.vrclass, m>;
1827}
1828
1829multiclass VPseudoConversionW_V {
1830  defvar constraint = "@earlyclobber $rd";
1831  foreach m = MxList.m[0-5] in
1832    defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint>;
1833}
1834
1835multiclass VPseudoConversionV_W {
1836  defvar constraint = "@earlyclobber $rd";
1837  foreach m = MxList.m[0-5] in
1838    defm _W : VPseudoConversion<m.vrclass, m.wvrclass, m, constraint>;
1839}
1840
1841multiclass VPseudoUSSegLoad<bit isFF> {
1842  foreach eew = EEWList in {
1843    foreach lmul = MxSet<eew>.m in {
1844      defvar LInfo = lmul.MX;
1845      let VLMul = lmul.value in {
1846        foreach nf = NFSet<lmul>.L in {
1847          defvar vreg = SegRegClass<lmul, nf>.RC;
1848          defvar FFStr = !if(isFF, "FF", "");
1849          def nf # "E" # eew # FFStr # "_V_" # LInfo :
1850            VPseudoUSSegLoadNoMask<vreg, eew>;
1851          def nf # "E" # eew # FFStr # "_V_" # LInfo # "_MASK" :
1852            VPseudoUSSegLoadMask<vreg, eew>;
1853        }
1854      }
1855    }
1856  }
1857}
1858
1859multiclass VPseudoSSegLoad {
1860  foreach eew = EEWList in {
1861    foreach lmul = MxSet<eew>.m in {
1862      defvar LInfo = lmul.MX;
1863      let VLMul = lmul.value in {
1864        foreach nf = NFSet<lmul>.L in {
1865          defvar vreg = SegRegClass<lmul, nf>.RC;
1866          def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegLoadNoMask<vreg, eew>;
1867          def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegLoadMask<vreg, eew>;
1868        }
1869      }
1870    }
1871  }
1872}
1873
1874multiclass VPseudoISegLoad {
1875  foreach idx_eew = EEWList in {  // EEW for index argument.
1876    foreach idx_lmul = MxSet<idx_eew>.m in {  // LMUL for index argument.
1877      foreach val_lmul = MxList.m in {  // LMUL for the value.
1878        defvar IdxLInfo = idx_lmul.MX;
1879        defvar IdxVreg = idx_lmul.vrclass;
1880        defvar ValLInfo = val_lmul.MX;
1881        let VLMul = val_lmul.value in {
1882          foreach nf = NFSet<val_lmul>.L in {
1883            defvar ValVreg = SegRegClass<val_lmul, nf>.RC;
1884            def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo :
1885              VPseudoISegLoadNoMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value>;
1886            def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_MASK" :
1887              VPseudoISegLoadMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value>;
1888          }
1889        }
1890      }
1891    }
1892  }
1893}
1894
1895multiclass VPseudoUSSegStore {
1896  foreach eew = EEWList in {
1897    foreach lmul = MxSet<eew>.m in {
1898      defvar LInfo = lmul.MX;
1899      let VLMul = lmul.value in {
1900        foreach nf = NFSet<lmul>.L in {
1901          defvar vreg = SegRegClass<lmul, nf>.RC;
1902          def nf # "E" # eew # "_V_" # LInfo : VPseudoUSSegStoreNoMask<vreg, eew>;
1903          def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSSegStoreMask<vreg, eew>;
1904        }
1905      }
1906    }
1907  }
1908}
1909
1910multiclass VPseudoSSegStore {
1911  foreach eew = EEWList in {
1912    foreach lmul = MxSet<eew>.m in {
1913      defvar LInfo = lmul.MX;
1914      let VLMul = lmul.value in {
1915        foreach nf = NFSet<lmul>.L in {
1916          defvar vreg = SegRegClass<lmul, nf>.RC;
1917          def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegStoreNoMask<vreg, eew>;
1918          def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegStoreMask<vreg, eew>;
1919        }
1920      }
1921    }
1922  }
1923}
1924
1925multiclass VPseudoISegStore {
1926  foreach idx_eew = EEWList in {  // EEW for index argument.
1927    foreach idx_lmul = MxSet<idx_eew>.m in {  // LMUL for index argument.
1928      foreach val_lmul = MxList.m in {  // LMUL for the value.
1929        defvar IdxLInfo = idx_lmul.MX;
1930        defvar IdxVreg = idx_lmul.vrclass;
1931        defvar ValLInfo = val_lmul.MX;
1932        let VLMul = val_lmul.value in {
1933          foreach nf = NFSet<val_lmul>.L in {
1934            defvar ValVreg = SegRegClass<val_lmul, nf>.RC;
1935            def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo :
1936              VPseudoISegStoreNoMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value>;
1937            def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_MASK" :
1938              VPseudoISegStoreMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value>;
1939          }
1940        }
1941      }
1942    }
1943  }
1944}
1945
1946//===----------------------------------------------------------------------===//
1947// Helpers to define the intrinsic patterns.
1948//===----------------------------------------------------------------------===//
1949
1950class VPatUnaryNoMask<string intrinsic_name,
1951                      string inst,
1952                      string kind,
1953                      ValueType result_type,
1954                      ValueType op2_type,
1955                      int sew,
1956                      LMULInfo vlmul,
1957                      VReg op2_reg_class> :
1958  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
1959                   (op2_type op2_reg_class:$rs2),
1960                   (XLenVT (VLOp GPR:$vl)))),
1961                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
1962                   (op2_type op2_reg_class:$rs2),
1963                   GPR:$vl, sew)>;
1964
1965class VPatUnaryMask<string intrinsic_name,
1966                    string inst,
1967                    string kind,
1968                    ValueType result_type,
1969                    ValueType op2_type,
1970                    ValueType mask_type,
1971                    int sew,
1972                    LMULInfo vlmul,
1973                    VReg result_reg_class,
1974                    VReg op2_reg_class> :
1975  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
1976                   (result_type result_reg_class:$merge),
1977                   (op2_type op2_reg_class:$rs2),
1978                   (mask_type V0),
1979                   (XLenVT (VLOp GPR:$vl)))),
1980                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_MASK")
1981                   (result_type result_reg_class:$merge),
1982                   (op2_type op2_reg_class:$rs2),
1983                   (mask_type V0), GPR:$vl, sew)>;
1984
1985class VPatMaskUnaryNoMask<string intrinsic_name,
1986                          string inst,
1987                          MTypeInfo mti> :
1988  Pat<(mti.Mask (!cast<Intrinsic>(intrinsic_name)
1989                (mti.Mask VR:$rs2),
1990                (XLenVT (VLOp GPR:$vl)))),
1991                (!cast<Instruction>(inst#"_M_"#mti.BX)
1992                (mti.Mask VR:$rs2),
1993                GPR:$vl, mti.SEW)>;
1994
1995class VPatMaskUnaryMask<string intrinsic_name,
1996                        string inst,
1997                        MTypeInfo mti> :
1998  Pat<(mti.Mask (!cast<Intrinsic>(intrinsic_name#"_mask")
1999                (mti.Mask VR:$merge),
2000                (mti.Mask VR:$rs2),
2001                (mti.Mask V0),
2002                (XLenVT (VLOp GPR:$vl)))),
2003                (!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK")
2004                (mti.Mask VR:$merge),
2005                (mti.Mask VR:$rs2),
2006                (mti.Mask V0), GPR:$vl, mti.SEW)>;
2007
2008class VPatUnaryAnyMask<string intrinsic,
2009                       string inst,
2010                       string kind,
2011                       ValueType result_type,
2012                       ValueType op1_type,
2013                       ValueType mask_type,
2014                       int sew,
2015                       LMULInfo vlmul,
2016                       VReg result_reg_class,
2017                       VReg op1_reg_class> :
2018  Pat<(result_type (!cast<Intrinsic>(intrinsic)
2019                   (result_type result_reg_class:$merge),
2020                   (op1_type op1_reg_class:$rs1),
2021                   (mask_type VR:$rs2),
2022                   (XLenVT (VLOp GPR:$vl)))),
2023                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
2024                   (result_type result_reg_class:$merge),
2025                   (op1_type op1_reg_class:$rs1),
2026                   (mask_type VR:$rs2),
2027                   GPR:$vl, sew)>;
2028
2029class VPatBinaryNoMask<string intrinsic_name,
2030                       string inst,
2031                       ValueType result_type,
2032                       ValueType op1_type,
2033                       ValueType op2_type,
2034                       int sew,
2035                       VReg op1_reg_class,
2036                       DAGOperand op2_kind> :
2037  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
2038                   (op1_type op1_reg_class:$rs1),
2039                   (op2_type op2_kind:$rs2),
2040                   (XLenVT (VLOp GPR:$vl)))),
2041                   (!cast<Instruction>(inst)
2042                   (op1_type op1_reg_class:$rs1),
2043                   (op2_type op2_kind:$rs2),
2044                   GPR:$vl, sew)>;
2045
2046class VPatBinaryMask<string intrinsic_name,
2047                     string inst,
2048                     ValueType result_type,
2049                     ValueType op1_type,
2050                     ValueType op2_type,
2051                     ValueType mask_type,
2052                     int sew,
2053                     VReg result_reg_class,
2054                     VReg op1_reg_class,
2055                     DAGOperand op2_kind> :
2056  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
2057                   (result_type result_reg_class:$merge),
2058                   (op1_type op1_reg_class:$rs1),
2059                   (op2_type op2_kind:$rs2),
2060                   (mask_type V0),
2061                   (XLenVT (VLOp GPR:$vl)))),
2062                   (!cast<Instruction>(inst#"_MASK")
2063                   (result_type result_reg_class:$merge),
2064                   (op1_type op1_reg_class:$rs1),
2065                   (op2_type op2_kind:$rs2),
2066                   (mask_type V0), GPR:$vl, sew)>;
2067
2068class VPatTernaryNoMask<string intrinsic,
2069                        string inst,
2070                        string kind,
2071                        ValueType result_type,
2072                        ValueType op1_type,
2073                        ValueType op2_type,
2074                        ValueType mask_type,
2075                        int sew,
2076                        LMULInfo vlmul,
2077                        VReg result_reg_class,
2078                        RegisterClass op1_reg_class,
2079                        DAGOperand op2_kind> :
2080  Pat<(result_type (!cast<Intrinsic>(intrinsic)
2081                    (result_type result_reg_class:$rs3),
2082                    (op1_type op1_reg_class:$rs1),
2083                    (op2_type op2_kind:$rs2),
2084                    (XLenVT (VLOp GPR:$vl)))),
2085                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
2086                    result_reg_class:$rs3,
2087                    (op1_type op1_reg_class:$rs1),
2088                    op2_kind:$rs2,
2089                    GPR:$vl, sew)>;
2090
2091class VPatTernaryMask<string intrinsic,
2092                      string inst,
2093                      string kind,
2094                      ValueType result_type,
2095                      ValueType op1_type,
2096                      ValueType op2_type,
2097                      ValueType mask_type,
2098                      int sew,
2099                      LMULInfo vlmul,
2100                      VReg result_reg_class,
2101                      RegisterClass op1_reg_class,
2102                      DAGOperand op2_kind> :
2103  Pat<(result_type (!cast<Intrinsic>(intrinsic#"_mask")
2104                    (result_type result_reg_class:$rs3),
2105                    (op1_type op1_reg_class:$rs1),
2106                    (op2_type op2_kind:$rs2),
2107                    (mask_type V0),
2108                    (XLenVT (VLOp GPR:$vl)))),
2109                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX # "_MASK")
2110                    result_reg_class:$rs3,
2111                    (op1_type op1_reg_class:$rs1),
2112                    op2_kind:$rs2,
2113                    (mask_type V0),
2114                    GPR:$vl, sew)>;
2115
2116class VPatAMOWDNoMask<string intrinsic_name,
2117                    string inst,
2118                    ValueType result_type,
2119                    ValueType op1_type,
2120                    int sew,
2121                    LMULInfo vlmul,
2122                    LMULInfo emul,
2123                    VReg op1_reg_class> :
2124  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
2125                    GPR:$rs1,
2126                    (op1_type op1_reg_class:$vs2),
2127                    (result_type vlmul.vrclass:$vd),
2128                    (XLenVT (VLOp GPR:$vl)))),
2129                   (!cast<Instruction>(inst # "_WD_" # vlmul.MX # "_" # emul.MX)
2130                    $rs1, $vs2, $vd,
2131                    GPR:$vl, sew)>;
2132
2133class VPatAMOWDMask<string intrinsic_name,
2134                    string inst,
2135                    ValueType result_type,
2136                    ValueType op1_type,
2137                    ValueType mask_type,
2138                    int sew,
2139                    LMULInfo vlmul,
2140                    LMULInfo emul,
2141                    VReg op1_reg_class> :
2142  Pat<(result_type (!cast<Intrinsic>(intrinsic_name # "_mask")
2143                    GPR:$rs1,
2144                    (op1_type op1_reg_class:$vs2),
2145                    (result_type vlmul.vrclass:$vd),
2146                    (mask_type V0),
2147                    (XLenVT (VLOp GPR:$vl)))),
2148                   (!cast<Instruction>(inst # "_WD_" # vlmul.MX # "_" # emul.MX # "_MASK")
2149                    $rs1, $vs2, $vd,
2150                    (mask_type V0), GPR:$vl, sew)>;
2151
2152multiclass VPatUSLoad<string intrinsic,
2153                      string inst,
2154                      LLVMType type,
2155                      LLVMType mask_type,
2156                      int sew,
2157                      LMULInfo vlmul,
2158                      VReg reg_class>
2159{
2160    defvar Intr = !cast<Intrinsic>(intrinsic);
2161    defvar Pseudo = !cast<Instruction>(inst#"_V_"#vlmul.MX);
2162    def : Pat<(type (Intr GPR:$rs1, (XLenVT (VLOp GPR:$vl)))),
2163                    (Pseudo $rs1, GPR:$vl, sew)>;
2164    defvar IntrMask = !cast<Intrinsic>(intrinsic # "_mask");
2165    defvar PseudoMask = !cast<Instruction>(inst#"_V_"#vlmul.MX#"_MASK");
2166    def : Pat<(type (IntrMask (type GetVRegNoV0<reg_class>.R:$merge),
2167                               GPR:$rs1, (mask_type V0), (XLenVT (VLOp GPR:$vl)))),
2168                    (PseudoMask $merge,
2169                                $rs1, (mask_type V0), GPR:$vl, sew)>;
2170}
2171
2172multiclass VPatUSLoadFF<string inst,
2173                        LLVMType type,
2174                        LLVMType mask_type,
2175                        int sew,
2176                        LMULInfo vlmul,
2177                        VReg reg_class>
2178{
2179    defvar Pseudo = !cast<Instruction>(inst#"_V_"#vlmul.MX);
2180    def : Pat<(type (riscv_vleff GPR:$rs1, (XLenVT (VLOp GPR:$vl)))),
2181                    (Pseudo $rs1, GPR:$vl, sew)>;
2182    defvar PseudoMask = !cast<Instruction>(inst#"_V_"#vlmul.MX#"_MASK");
2183    def : Pat<(type (riscv_vleff_mask (type GetVRegNoV0<reg_class>.R:$merge),
2184                                      GPR:$rs1, (mask_type V0), (XLenVT (VLOp GPR:$vl)))),
2185                    (PseudoMask $merge,
2186                                $rs1, (mask_type V0), GPR:$vl, sew)>;
2187}
2188
2189multiclass VPatSLoad<string intrinsic,
2190                     string inst,
2191                     LLVMType type,
2192                     LLVMType mask_type,
2193                     int sew,
2194                     LMULInfo vlmul,
2195                     VReg reg_class>
2196{
2197    defvar Intr = !cast<Intrinsic>(intrinsic);
2198    defvar Pseudo = !cast<Instruction>(inst#"_V_"#vlmul.MX);
2199    def : Pat<(type (Intr GPR:$rs1, GPR:$rs2, (XLenVT (VLOp GPR:$vl)))),
2200                    (Pseudo $rs1, $rs2, GPR:$vl, sew)>;
2201    defvar IntrMask = !cast<Intrinsic>(intrinsic # "_mask");
2202    defvar PseudoMask = !cast<Instruction>(inst#"_V_"#vlmul.MX#"_MASK");
2203    def : Pat<(type (IntrMask (type GetVRegNoV0<reg_class>.R:$merge),
2204                               GPR:$rs1, GPR:$rs2, (mask_type V0), (XLenVT (VLOp GPR:$vl)))),
2205                    (PseudoMask $merge,
2206                                $rs1, $rs2, (mask_type V0), GPR:$vl, sew)>;
2207}
2208
2209multiclass VPatILoad<string intrinsic,
2210                     string inst,
2211                     LLVMType type,
2212                     LLVMType idx_type,
2213                     LLVMType mask_type,
2214                     int sew,
2215                     LMULInfo vlmul,
2216                     LMULInfo idx_vlmul,
2217                     VReg reg_class,
2218                     VReg idx_reg_class>
2219{
2220    defvar Intr = !cast<Intrinsic>(intrinsic);
2221    defvar Pseudo = !cast<Instruction>(inst#"_V_"#idx_vlmul.MX#"_"#vlmul.MX);
2222    def : Pat<(type (Intr GPR:$rs1, (idx_type idx_reg_class:$rs2), (XLenVT (VLOp GPR:$vl)))),
2223                    (Pseudo $rs1, $rs2, GPR:$vl, sew)>;
2224
2225    defvar IntrMask = !cast<Intrinsic>(intrinsic # "_mask");
2226    defvar PseudoMask = !cast<Instruction>(inst#"_V_"#idx_vlmul.MX#"_"#vlmul.MX#"_MASK");
2227    def : Pat<(type (IntrMask (type GetVRegNoV0<reg_class>.R:$merge),
2228                               GPR:$rs1, (idx_type idx_reg_class:$rs2),
2229                               (mask_type V0), (XLenVT (VLOp GPR:$vl)))),
2230                    (PseudoMask $merge,
2231                                $rs1, $rs2, (mask_type V0), GPR:$vl, sew)>;
2232}
2233
2234multiclass VPatUSStore<string intrinsic,
2235                       string inst,
2236                       LLVMType type,
2237                       LLVMType mask_type,
2238                       int sew,
2239                       LMULInfo vlmul,
2240                       VReg reg_class>
2241{
2242    defvar Intr = !cast<Intrinsic>(intrinsic);
2243    defvar Pseudo = !cast<Instruction>(inst#"_V_"#vlmul.MX);
2244    def : Pat<(Intr (type reg_class:$rs3), GPR:$rs1, (XLenVT (VLOp GPR:$vl))),
2245                    (Pseudo $rs3, $rs1, GPR:$vl, sew)>;
2246    defvar IntrMask = !cast<Intrinsic>(intrinsic # "_mask");
2247    defvar PseudoMask = !cast<Instruction>(inst#"_V_"#vlmul.MX#"_MASK");
2248    def : Pat<(IntrMask (type reg_class:$rs3), GPR:$rs1, (mask_type V0), (XLenVT (VLOp GPR:$vl))),
2249              (PseudoMask $rs3, $rs1, (mask_type V0), GPR:$vl, sew)>;
2250}
2251
2252multiclass VPatSStore<string intrinsic,
2253                      string inst,
2254                      LLVMType type,
2255                      LLVMType mask_type,
2256                      int sew,
2257                      LMULInfo vlmul,
2258                      VReg reg_class>
2259{
2260    defvar Intr = !cast<Intrinsic>(intrinsic);
2261    defvar Pseudo = !cast<Instruction>(inst#"_V_"#vlmul.MX);
2262    def : Pat<(Intr (type reg_class:$rs3), GPR:$rs1, GPR:$rs2, (XLenVT (VLOp GPR:$vl))),
2263                    (Pseudo $rs3, $rs1, $rs2, GPR:$vl, sew)>;
2264    defvar IntrMask = !cast<Intrinsic>(intrinsic # "_mask");
2265    defvar PseudoMask = !cast<Instruction>(inst#"_V_"#vlmul.MX#"_MASK");
2266    def : Pat<(IntrMask (type reg_class:$rs3), GPR:$rs1, GPR:$rs2, (mask_type V0), (XLenVT (VLOp GPR:$vl))),
2267              (PseudoMask $rs3, $rs1, $rs2, (mask_type V0), GPR:$vl, sew)>;
2268}
2269
2270multiclass VPatIStore<string intrinsic,
2271                      string inst,
2272                      LLVMType type,
2273                      LLVMType idx_type,
2274                      LLVMType mask_type,
2275                      int sew,
2276                      LMULInfo vlmul,
2277                      LMULInfo idx_vlmul,
2278                      VReg reg_class,
2279                      VReg idx_reg_class>
2280{
2281    defvar Intr = !cast<Intrinsic>(intrinsic);
2282    defvar Pseudo = !cast<Instruction>(inst#"_V_"#idx_vlmul.MX#"_"#vlmul.MX);
2283    def : Pat<(Intr (type reg_class:$rs3), GPR:$rs1,
2284                    (idx_type idx_reg_class:$rs2), (XLenVT (VLOp GPR:$vl))),
2285              (Pseudo $rs3, $rs1, $rs2, GPR:$vl, sew)>;
2286    defvar IntrMask = !cast<Intrinsic>(intrinsic # "_mask");
2287    defvar PseudoMask = !cast<Instruction>(inst#"_V_"#idx_vlmul.MX#"_"#vlmul.MX#"_MASK");
2288    def : Pat<(IntrMask (type reg_class:$rs3), GPR:$rs1,
2289                        (idx_type idx_reg_class:$rs2), (mask_type V0), (XLenVT (VLOp GPR:$vl))),
2290              (PseudoMask $rs3, $rs1, $rs2, (mask_type V0), GPR:$vl, sew)>;
2291}
2292
2293multiclass VPatUnaryS_M<string intrinsic_name,
2294                             string inst>
2295{
2296  foreach mti = AllMasks in {
2297    def : Pat<(XLenVT (!cast<Intrinsic>(intrinsic_name)
2298                      (mti.Mask VR:$rs1), (XLenVT (VLOp GPR:$vl)))),
2299                      (!cast<Instruction>(inst#"_M_"#mti.BX) $rs1,
2300                      GPR:$vl, mti.SEW)>;
2301    def : Pat<(XLenVT (!cast<Intrinsic>(intrinsic_name # "_mask")
2302                      (mti.Mask VR:$rs1), (mti.Mask V0), (XLenVT (VLOp GPR:$vl)))),
2303                      (!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK") $rs1,
2304                      (mti.Mask V0), GPR:$vl, mti.SEW)>;
2305  }
2306}
2307
2308multiclass VPatUnaryV_V_AnyMask<string intrinsic, string instruction,
2309                                list<VTypeInfo> vtilist> {
2310  foreach vti = vtilist in {
2311    def : VPatUnaryAnyMask<intrinsic, instruction, "VM",
2312                           vti.Vector, vti.Vector, vti.Mask,
2313                           vti.SEW, vti.LMul, vti.RegClass,
2314                           vti.RegClass>;
2315  }
2316}
2317
2318multiclass VPatUnaryM_M<string intrinsic,
2319                         string inst>
2320{
2321  foreach mti = AllMasks in {
2322    def : VPatMaskUnaryNoMask<intrinsic, inst, mti>;
2323    def : VPatMaskUnaryMask<intrinsic, inst, mti>;
2324  }
2325}
2326
2327multiclass VPatUnaryV_M<string intrinsic, string instruction>
2328{
2329  foreach vti = AllIntegerVectors in {
2330    def : VPatUnaryNoMask<intrinsic, instruction, "M", vti.Vector, vti.Mask,
2331                          vti.SEW, vti.LMul, VR>;
2332    def : VPatUnaryMask<intrinsic, instruction, "M", vti.Vector, vti.Mask,
2333                        vti.Mask, vti.SEW, vti.LMul, vti.RegClass, VR>;
2334  }
2335}
2336
2337multiclass VPatUnaryV_VF<string intrinsic, string instruction, string suffix,
2338                         list<VTypeInfoToFraction> fractionList>
2339{
2340  foreach vtiTofti = fractionList in
2341  {
2342      defvar vti = vtiTofti.Vti;
2343      defvar fti = vtiTofti.Fti;
2344      def : VPatUnaryNoMask<intrinsic, instruction, suffix,
2345                            vti.Vector, fti.Vector,
2346                            vti.SEW, vti.LMul, fti.RegClass>;
2347      def : VPatUnaryMask<intrinsic, instruction, suffix,
2348                          vti.Vector, fti.Vector, vti.Mask,
2349                          vti.SEW, vti.LMul, vti.RegClass, fti.RegClass>;
2350   }
2351}
2352
2353multiclass VPatUnaryV_V<string intrinsic, string instruction,
2354                        list<VTypeInfo> vtilist> {
2355  foreach vti = vtilist in {
2356    def : VPatUnaryNoMask<intrinsic, instruction, "V",
2357                          vti.Vector, vti.Vector,
2358                          vti.SEW, vti.LMul, vti.RegClass>;
2359    def : VPatUnaryMask<intrinsic, instruction, "V",
2360                        vti.Vector, vti.Vector, vti.Mask,
2361                        vti.SEW, vti.LMul, vti.RegClass, vti.RegClass>;
2362  }
2363}
2364
2365multiclass VPatNullaryV<string intrinsic, string instruction>
2366{
2367  foreach vti = AllIntegerVectors in {
2368    def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic)
2369                          (XLenVT (VLOp GPR:$vl)))),
2370                          (!cast<Instruction>(instruction#"_V_" # vti.LMul.MX)
2371                          GPR:$vl, vti.SEW)>;
2372    def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic # "_mask")
2373                          (vti.Vector vti.RegClass:$merge),
2374                          (vti.Mask V0), (XLenVT (VLOp GPR:$vl)))),
2375                          (!cast<Instruction>(instruction#"_V_" # vti.LMul.MX # "_MASK")
2376                          vti.RegClass:$merge, (vti.Mask V0),
2377                          GPR:$vl, vti.SEW)>;
2378  }
2379}
2380
2381multiclass VPatNullaryM<string intrinsic, string inst> {
2382  foreach mti = AllMasks in
2383    def : Pat<(mti.Mask (!cast<Intrinsic>(intrinsic)
2384                        (XLenVT (VLOp GPR:$vl)))),
2385                        (!cast<Instruction>(inst#"_M_"#mti.BX)
2386                        GPR:$vl, mti.SEW)>;
2387}
2388
2389multiclass VPatBinary<string intrinsic,
2390                      string inst,
2391                      ValueType result_type,
2392                      ValueType op1_type,
2393                      ValueType op2_type,
2394                      ValueType mask_type,
2395                      int sew,
2396                      VReg result_reg_class,
2397                      VReg op1_reg_class,
2398                      DAGOperand op2_kind>
2399{
2400  def : VPatBinaryNoMask<intrinsic, inst, result_type, op1_type, op2_type,
2401                         sew, op1_reg_class, op2_kind>;
2402  def : VPatBinaryMask<intrinsic, inst, result_type, op1_type, op2_type,
2403                       mask_type, sew, result_reg_class, op1_reg_class,
2404                       op2_kind>;
2405}
2406
2407multiclass VPatBinaryCarryIn<string intrinsic,
2408                             string inst,
2409                             string kind,
2410                             ValueType result_type,
2411                             ValueType op1_type,
2412                             ValueType op2_type,
2413                             ValueType mask_type,
2414                             int sew,
2415                             LMULInfo vlmul,
2416                             VReg op1_reg_class,
2417                             DAGOperand op2_kind>
2418{
2419  def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
2420                         (op1_type op1_reg_class:$rs1),
2421                         (op2_type op2_kind:$rs2),
2422                         (mask_type V0),
2423                         (XLenVT (VLOp GPR:$vl)))),
2424                         (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
2425                         (op1_type op1_reg_class:$rs1),
2426                         (op2_type op2_kind:$rs2),
2427                         (mask_type V0), GPR:$vl, sew)>;
2428}
2429
2430multiclass VPatBinaryMaskOut<string intrinsic,
2431                             string inst,
2432                             string kind,
2433                             ValueType result_type,
2434                             ValueType op1_type,
2435                             ValueType op2_type,
2436                             int sew,
2437                             LMULInfo vlmul,
2438                             VReg op1_reg_class,
2439                             DAGOperand op2_kind>
2440{
2441  def : Pat<(result_type (!cast<Intrinsic>(intrinsic)
2442                         (op1_type op1_reg_class:$rs1),
2443                         (op2_type op2_kind:$rs2),
2444                         (XLenVT (VLOp GPR:$vl)))),
2445                         (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
2446                         (op1_type op1_reg_class:$rs1),
2447                         (op2_type op2_kind:$rs2),
2448                         GPR:$vl, sew)>;
2449}
2450
2451multiclass VPatConversion<string intrinsic,
2452                          string inst,
2453                          string kind,
2454                          ValueType result_type,
2455                          ValueType op1_type,
2456                          ValueType mask_type,
2457                          int sew,
2458                          LMULInfo vlmul,
2459                          VReg result_reg_class,
2460                          VReg op1_reg_class>
2461{
2462  def : VPatUnaryNoMask<intrinsic, inst, kind, result_type, op1_type,
2463                        sew, vlmul, op1_reg_class>;
2464  def : VPatUnaryMask<intrinsic, inst, kind, result_type, op1_type,
2465                      mask_type, sew, vlmul, result_reg_class, op1_reg_class>;
2466}
2467
2468multiclass VPatBinaryV_VV<string intrinsic, string instruction,
2469                          list<VTypeInfo> vtilist> {
2470  foreach vti = vtilist in
2471    defm : VPatBinary<intrinsic, instruction # "_VV_" # vti.LMul.MX,
2472                      vti.Vector, vti.Vector, vti.Vector,vti.Mask,
2473                      vti.SEW, vti.RegClass,
2474                      vti.RegClass, vti.RegClass>;
2475}
2476
2477multiclass VPatBinaryV_VV_INT<string intrinsic, string instruction,
2478                          list<VTypeInfo> vtilist> {
2479  foreach vti = vtilist in {
2480    defvar ivti = GetIntVTypeInfo<vti>.Vti;
2481    defm : VPatBinary<intrinsic, instruction # "_VV_" # vti.LMul.MX,
2482                      vti.Vector, vti.Vector, ivti.Vector, vti.Mask,
2483                      vti.SEW, vti.RegClass,
2484                      vti.RegClass, vti.RegClass>;
2485  }
2486}
2487
2488multiclass VPatBinaryV_VV_INT_EEW<string intrinsic, string instruction,
2489                                  int eew, list<VTypeInfo> vtilist> {
2490  foreach vti = vtilist in {
2491    // emul = lmul * eew / sew
2492    defvar vlmul = vti.LMul;
2493    defvar octuple_lmul = octuple_from_str<vlmul.MX>.ret;
2494    defvar octuple_emul = !srl(!mul(octuple_lmul, eew), shift_amount<vti.SEW>.val);
2495    if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
2496      defvar emul_str = octuple_to_str<octuple_emul>.ret;
2497      defvar ivti = !cast<VTypeInfo>("VI" # eew # emul_str);
2498      defvar inst = instruction # "_VV_" # vti.LMul.MX # "_" # emul_str;
2499      defm : VPatBinary<intrinsic, inst,
2500                        vti.Vector, vti.Vector, ivti.Vector, vti.Mask,
2501                        vti.SEW, vti.RegClass,
2502                        vti.RegClass, ivti.RegClass>;
2503    }
2504  }
2505}
2506
2507multiclass VPatBinaryV_VX<string intrinsic, string instruction,
2508                          list<VTypeInfo> vtilist> {
2509  foreach vti = vtilist in {
2510    defvar kind = "V"#vti.ScalarSuffix;
2511    defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#vti.LMul.MX,
2512                      vti.Vector, vti.Vector, vti.Scalar, vti.Mask,
2513                      vti.SEW, vti.RegClass,
2514                      vti.RegClass, vti.ScalarRegClass>;
2515  }
2516}
2517
2518multiclass VPatBinaryV_VX_INT<string intrinsic, string instruction,
2519                          list<VTypeInfo> vtilist> {
2520  foreach vti = vtilist in
2521    defm : VPatBinary<intrinsic, instruction # "_VX_" # vti.LMul.MX,
2522                      vti.Vector, vti.Vector, XLenVT, vti.Mask,
2523                      vti.SEW, vti.RegClass,
2524                      vti.RegClass, GPR>;
2525}
2526
2527multiclass VPatBinaryV_VI<string intrinsic, string instruction,
2528                          list<VTypeInfo> vtilist, Operand imm_type> {
2529  foreach vti = vtilist in
2530    defm : VPatBinary<intrinsic, instruction # "_VI_" # vti.LMul.MX,
2531                      vti.Vector, vti.Vector, XLenVT, vti.Mask,
2532                      vti.SEW, vti.RegClass,
2533                      vti.RegClass, imm_type>;
2534}
2535
2536multiclass VPatBinaryM_MM<string intrinsic, string instruction> {
2537  foreach mti = AllMasks in
2538    def : VPatBinaryNoMask<intrinsic, instruction # "_MM_" # mti.LMul.MX,
2539                           mti.Mask, mti.Mask, mti.Mask,
2540                           mti.SEW, VR, VR>;
2541}
2542
2543multiclass VPatBinaryW_VV<string intrinsic, string instruction,
2544                          list<VTypeInfoToWide> vtilist> {
2545  foreach VtiToWti = vtilist in {
2546    defvar Vti = VtiToWti.Vti;
2547    defvar Wti = VtiToWti.Wti;
2548    defm : VPatBinary<intrinsic, instruction # "_VV_" # Vti.LMul.MX,
2549                      Wti.Vector, Vti.Vector, Vti.Vector, Vti.Mask,
2550                      Vti.SEW, Wti.RegClass,
2551                      Vti.RegClass, Vti.RegClass>;
2552  }
2553}
2554
2555multiclass VPatBinaryW_VX<string intrinsic, string instruction,
2556                          list<VTypeInfoToWide> vtilist> {
2557  foreach VtiToWti = vtilist in {
2558    defvar Vti = VtiToWti.Vti;
2559    defvar Wti = VtiToWti.Wti;
2560    defvar kind = "V"#Vti.ScalarSuffix;
2561    defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
2562                      Wti.Vector, Vti.Vector, Vti.Scalar, Vti.Mask,
2563                      Vti.SEW, Wti.RegClass,
2564                      Vti.RegClass, Vti.ScalarRegClass>;
2565  }
2566}
2567
2568multiclass VPatBinaryW_WV<string intrinsic, string instruction,
2569                          list<VTypeInfoToWide> vtilist> {
2570  foreach VtiToWti = vtilist in {
2571    defvar Vti = VtiToWti.Vti;
2572    defvar Wti = VtiToWti.Wti;
2573    defm : VPatBinary<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
2574                      Wti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
2575                      Vti.SEW, Wti.RegClass,
2576                      Wti.RegClass, Vti.RegClass>;
2577  }
2578}
2579
2580multiclass VPatBinaryW_WX<string intrinsic, string instruction,
2581                          list<VTypeInfoToWide> vtilist> {
2582  foreach VtiToWti = vtilist in {
2583    defvar Vti = VtiToWti.Vti;
2584    defvar Wti = VtiToWti.Wti;
2585    defvar kind = "W"#Vti.ScalarSuffix;
2586    defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
2587                      Wti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
2588                      Vti.SEW, Wti.RegClass,
2589                      Wti.RegClass, Vti.ScalarRegClass>;
2590  }
2591}
2592
2593multiclass VPatBinaryV_WV<string intrinsic, string instruction,
2594                          list<VTypeInfoToWide> vtilist> {
2595  foreach VtiToWti = vtilist in {
2596    defvar Vti = VtiToWti.Vti;
2597    defvar Wti = VtiToWti.Wti;
2598    defm : VPatBinary<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
2599                      Vti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
2600                      Vti.SEW, Vti.RegClass,
2601                      Wti.RegClass, Vti.RegClass>;
2602  }
2603}
2604
2605multiclass VPatBinaryV_WX<string intrinsic, string instruction,
2606                          list<VTypeInfoToWide> vtilist> {
2607  foreach VtiToWti = vtilist in {
2608    defvar Vti = VtiToWti.Vti;
2609    defvar Wti = VtiToWti.Wti;
2610    defvar kind = "W"#Vti.ScalarSuffix;
2611    defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
2612                      Vti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
2613                      Vti.SEW, Vti.RegClass,
2614                      Wti.RegClass, Vti.ScalarRegClass>;
2615  }
2616}
2617
2618multiclass VPatBinaryV_WI<string intrinsic, string instruction,
2619                          list<VTypeInfoToWide> vtilist> {
2620  foreach VtiToWti = vtilist in {
2621    defvar Vti = VtiToWti.Vti;
2622    defvar Wti = VtiToWti.Wti;
2623    defm : VPatBinary<intrinsic, instruction # "_WI_" # Vti.LMul.MX,
2624                      Vti.Vector, Wti.Vector, XLenVT, Vti.Mask,
2625                      Vti.SEW, Vti.RegClass,
2626                      Wti.RegClass, uimm5>;
2627  }
2628}
2629
2630multiclass VPatBinaryV_VM<string intrinsic, string instruction,
2631                          bit CarryOut = 0,
2632                          list<VTypeInfo> vtilist = AllIntegerVectors> {
2633  foreach vti = vtilist in
2634    defm : VPatBinaryCarryIn<intrinsic, instruction, "VVM",
2635                             !if(CarryOut, vti.Mask, vti.Vector),
2636                             vti.Vector, vti.Vector, vti.Mask,
2637                             vti.SEW, vti.LMul,
2638                             vti.RegClass, vti.RegClass>;
2639}
2640
2641multiclass VPatBinaryV_XM<string intrinsic, string instruction,
2642                          bit CarryOut = 0,
2643                          list<VTypeInfo> vtilist = AllIntegerVectors> {
2644  foreach vti = vtilist in
2645    defm : VPatBinaryCarryIn<intrinsic, instruction,
2646                             "V"#vti.ScalarSuffix#"M",
2647                             !if(CarryOut, vti.Mask, vti.Vector),
2648                             vti.Vector, vti.Scalar, vti.Mask,
2649                             vti.SEW, vti.LMul,
2650                             vti.RegClass, vti.ScalarRegClass>;
2651}
2652
2653multiclass VPatBinaryV_IM<string intrinsic, string instruction,
2654                          bit CarryOut = 0> {
2655  foreach vti = AllIntegerVectors in
2656    defm : VPatBinaryCarryIn<intrinsic, instruction, "VIM",
2657                             !if(CarryOut, vti.Mask, vti.Vector),
2658                             vti.Vector, XLenVT, vti.Mask,
2659                             vti.SEW, vti.LMul,
2660                             vti.RegClass, simm5>;
2661}
2662
2663multiclass VPatBinaryV_V<string intrinsic, string instruction> {
2664  foreach vti = AllIntegerVectors in
2665    defm : VPatBinaryMaskOut<intrinsic, instruction, "VV",
2666                             vti.Mask, vti.Vector, vti.Vector,
2667                             vti.SEW, vti.LMul,
2668                             vti.RegClass, vti.RegClass>;
2669}
2670
2671multiclass VPatBinaryV_X<string intrinsic, string instruction> {
2672  foreach vti = AllIntegerVectors in
2673    defm : VPatBinaryMaskOut<intrinsic, instruction, "VX",
2674                             vti.Mask, vti.Vector, XLenVT,
2675                             vti.SEW, vti.LMul,
2676                             vti.RegClass, GPR>;
2677}
2678
2679multiclass VPatBinaryV_I<string intrinsic, string instruction> {
2680  foreach vti = AllIntegerVectors in
2681    defm : VPatBinaryMaskOut<intrinsic, instruction, "VI",
2682                             vti.Mask, vti.Vector, XLenVT,
2683                             vti.SEW, vti.LMul,
2684                             vti.RegClass, simm5>;
2685}
2686
2687multiclass VPatBinaryM_VV<string intrinsic, string instruction,
2688                          list<VTypeInfo> vtilist> {
2689  foreach vti = vtilist in
2690    defm : VPatBinary<intrinsic, instruction # "_VV_" # vti.LMul.MX,
2691                      vti.Mask, vti.Vector, vti.Vector, vti.Mask,
2692                      vti.SEW, VR,
2693                      vti.RegClass, vti.RegClass>;
2694}
2695
2696multiclass VPatBinaryM_VX<string intrinsic, string instruction,
2697                          list<VTypeInfo> vtilist> {
2698  foreach vti = vtilist in {
2699    defvar kind = "V"#vti.ScalarSuffix;
2700    defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#vti.LMul.MX,
2701                      vti.Mask, vti.Vector, vti.Scalar, vti.Mask,
2702                      vti.SEW, VR,
2703                      vti.RegClass, vti.ScalarRegClass>;
2704  }
2705}
2706
2707multiclass VPatBinaryM_VI<string intrinsic, string instruction,
2708                          list<VTypeInfo> vtilist> {
2709  foreach vti = vtilist in
2710    defm : VPatBinary<intrinsic, instruction # "_VI_" # vti.LMul.MX,
2711                      vti.Mask, vti.Vector, XLenVT, vti.Mask,
2712                      vti.SEW, VR,
2713                      vti.RegClass, simm5>;
2714}
2715
2716multiclass VPatBinaryV_VV_VX_VI<string intrinsic, string instruction,
2717                                list<VTypeInfo> vtilist, Operand ImmType = simm5>
2718{
2719  defm "" : VPatBinaryV_VV<intrinsic, instruction, vtilist>;
2720  defm "" : VPatBinaryV_VX<intrinsic, instruction, vtilist>;
2721  defm "" : VPatBinaryV_VI<intrinsic, instruction, vtilist, ImmType>;
2722}
2723
2724multiclass VPatBinaryV_VV_VX<string intrinsic, string instruction,
2725                             list<VTypeInfo> vtilist>
2726{
2727  defm "" : VPatBinaryV_VV<intrinsic, instruction, vtilist>;
2728  defm "" : VPatBinaryV_VX<intrinsic, instruction, vtilist>;
2729}
2730
2731multiclass VPatBinaryV_VX_VI<string intrinsic, string instruction,
2732                             list<VTypeInfo> vtilist>
2733{
2734  defm "" : VPatBinaryV_VX<intrinsic, instruction, vtilist>;
2735  defm "" : VPatBinaryV_VI<intrinsic, instruction, vtilist, simm5>;
2736}
2737
2738multiclass VPatBinaryW_VV_VX<string intrinsic, string instruction,
2739                             list<VTypeInfoToWide> vtilist>
2740{
2741  defm "" : VPatBinaryW_VV<intrinsic, instruction, vtilist>;
2742  defm "" : VPatBinaryW_VX<intrinsic, instruction, vtilist>;
2743}
2744
2745multiclass VPatBinaryW_WV_WX<string intrinsic, string instruction,
2746                             list<VTypeInfoToWide> vtilist>
2747{
2748  defm "" : VPatBinaryW_WV<intrinsic, instruction, vtilist>;
2749  defm "" : VPatBinaryW_WX<intrinsic, instruction, vtilist>;
2750}
2751
2752multiclass VPatBinaryV_WV_WX_WI<string intrinsic, string instruction,
2753                                list<VTypeInfoToWide> vtilist>
2754{
2755  defm "" : VPatBinaryV_WV<intrinsic, instruction, vtilist>;
2756  defm "" : VPatBinaryV_WX<intrinsic, instruction, vtilist>;
2757  defm "" : VPatBinaryV_WI<intrinsic, instruction, vtilist>;
2758}
2759
2760multiclass VPatBinaryV_VM_XM_IM<string intrinsic, string instruction>
2761{
2762  defm "" : VPatBinaryV_VM<intrinsic, instruction>;
2763  defm "" : VPatBinaryV_XM<intrinsic, instruction>;
2764  defm "" : VPatBinaryV_IM<intrinsic, instruction>;
2765}
2766
2767multiclass VPatBinaryM_VM_XM_IM<string intrinsic, string instruction>
2768{
2769  defm "" : VPatBinaryV_VM<intrinsic, instruction, /*CarryOut=*/1>;
2770  defm "" : VPatBinaryV_XM<intrinsic, instruction, /*CarryOut=*/1>;
2771  defm "" : VPatBinaryV_IM<intrinsic, instruction, /*CarryOut=*/1>;
2772}
2773
2774multiclass VPatBinaryM_V_X_I<string intrinsic, string instruction>
2775{
2776  defm "" : VPatBinaryV_V<intrinsic, instruction>;
2777  defm "" : VPatBinaryV_X<intrinsic, instruction>;
2778  defm "" : VPatBinaryV_I<intrinsic, instruction>;
2779}
2780
2781multiclass VPatBinaryV_VM_XM<string intrinsic, string instruction>
2782{
2783  defm "" : VPatBinaryV_VM<intrinsic, instruction>;
2784  defm "" : VPatBinaryV_XM<intrinsic, instruction>;
2785}
2786
2787multiclass VPatBinaryM_VM_XM<string intrinsic, string instruction>
2788{
2789  defm "" : VPatBinaryV_VM<intrinsic, instruction, /*CarryOut=*/1>;
2790  defm "" : VPatBinaryV_XM<intrinsic, instruction, /*CarryOut=*/1>;
2791}
2792
2793multiclass VPatBinaryM_V_X<string intrinsic, string instruction>
2794{
2795  defm "" : VPatBinaryV_V<intrinsic, instruction>;
2796  defm "" : VPatBinaryV_X<intrinsic, instruction>;
2797}
2798
2799multiclass VPatTernary<string intrinsic,
2800                       string inst,
2801                       string kind,
2802                       ValueType result_type,
2803                       ValueType op1_type,
2804                       ValueType op2_type,
2805                       ValueType mask_type,
2806                       int sew,
2807                       LMULInfo vlmul,
2808                       VReg result_reg_class,
2809                       RegisterClass op1_reg_class,
2810                       DAGOperand op2_kind> {
2811  def : VPatTernaryNoMask<intrinsic, inst, kind, result_type, op1_type, op2_type,
2812                    mask_type, sew, vlmul, result_reg_class, op1_reg_class,
2813                    op2_kind>;
2814  def : VPatTernaryMask<intrinsic, inst, kind, result_type, op1_type, op2_type,
2815                        mask_type, sew, vlmul, result_reg_class, op1_reg_class,
2816                        op2_kind>;
2817}
2818
2819multiclass VPatTernaryV_VV<string intrinsic, string instruction,
2820                           list<VTypeInfo> vtilist> {
2821  foreach vti = vtilist in
2822    defm : VPatTernary<intrinsic, instruction, "VV",
2823                       vti.Vector, vti.Vector, vti.Vector, vti.Mask,
2824                       vti.SEW, vti.LMul, vti.RegClass,
2825                       vti.RegClass, vti.RegClass>;
2826}
2827
2828multiclass VPatTernaryV_VX<string intrinsic, string instruction,
2829                           list<VTypeInfo> vtilist> {
2830  foreach vti = vtilist in
2831    defm : VPatTernary<intrinsic, instruction, "VX",
2832                       vti.Vector, vti.Vector, XLenVT, vti.Mask,
2833                       vti.SEW, vti.LMul, vti.RegClass,
2834                       vti.RegClass, GPR>;
2835}
2836
2837multiclass VPatTernaryV_VX_AAXA<string intrinsic, string instruction,
2838                           list<VTypeInfo> vtilist> {
2839  foreach vti = vtilist in
2840    defm : VPatTernary<intrinsic, instruction,
2841                       "V"#vti.ScalarSuffix,
2842                       vti.Vector, vti.Scalar, vti.Vector, vti.Mask,
2843                       vti.SEW, vti.LMul, vti.RegClass,
2844                       vti.ScalarRegClass, vti.RegClass>;
2845}
2846
2847multiclass VPatTernaryV_VI<string intrinsic, string instruction,
2848                           list<VTypeInfo> vtilist, Operand Imm_type> {
2849  foreach vti = vtilist in
2850    defm : VPatTernary<intrinsic, instruction, "VI",
2851                      vti.Vector, vti.Vector, XLenVT, vti.Mask,
2852                      vti.SEW, vti.LMul, vti.RegClass,
2853                      vti.RegClass, Imm_type>;
2854}
2855
2856multiclass VPatTernaryW_VV<string intrinsic, string instruction,
2857                           list<VTypeInfoToWide> vtilist> {
2858  foreach vtiToWti = vtilist in {
2859    defvar vti = vtiToWti.Vti;
2860    defvar wti = vtiToWti.Wti;
2861    defm : VPatTernary<intrinsic, instruction, "VV",
2862                      wti.Vector, vti.Vector, vti.Vector,
2863                      vti.Mask, vti.SEW, vti.LMul,
2864                      wti.RegClass, vti.RegClass, vti.RegClass>;
2865  }
2866}
2867
2868multiclass VPatTernaryW_VX<string intrinsic, string instruction,
2869                           list<VTypeInfoToWide> vtilist> {
2870  foreach vtiToWti = vtilist in {
2871    defvar vti = vtiToWti.Vti;
2872    defvar wti = vtiToWti.Wti;
2873    defm : VPatTernary<intrinsic, instruction,
2874                       "V"#vti.ScalarSuffix,
2875                       wti.Vector, vti.Scalar, vti.Vector,
2876                       vti.Mask, vti.SEW, vti.LMul,
2877                       wti.RegClass, vti.ScalarRegClass, vti.RegClass>;
2878  }
2879}
2880
2881multiclass VPatTernaryV_VV_VX_AAXA<string intrinsic, string instruction,
2882                              list<VTypeInfo> vtilist> {
2883  defm "" : VPatTernaryV_VV<intrinsic, instruction, vtilist>;
2884  defm "" : VPatTernaryV_VX_AAXA<intrinsic, instruction, vtilist>;
2885}
2886
2887multiclass VPatTernaryV_VX_VI<string intrinsic, string instruction,
2888                              list<VTypeInfo> vtilist, Operand Imm_type = simm5> {
2889  defm "" : VPatTernaryV_VX<intrinsic, instruction, vtilist>;
2890  defm "" : VPatTernaryV_VI<intrinsic, instruction, vtilist, Imm_type>;
2891}
2892
2893multiclass VPatBinaryM_VV_VX_VI<string intrinsic, string instruction,
2894                                list<VTypeInfo> vtilist>
2895{
2896  defm "" : VPatBinaryM_VV<intrinsic, instruction, vtilist>;
2897  defm "" : VPatBinaryM_VX<intrinsic, instruction, vtilist>;
2898  defm "" : VPatBinaryM_VI<intrinsic, instruction, vtilist>;
2899}
2900
2901multiclass VPatTernaryW_VV_VX<string intrinsic, string instruction,
2902                              list<VTypeInfoToWide> vtilist> {
2903  defm "" : VPatTernaryW_VV<intrinsic, instruction, vtilist>;
2904  defm "" : VPatTernaryW_VX<intrinsic, instruction, vtilist>;
2905}
2906
2907multiclass VPatBinaryM_VV_VX<string intrinsic, string instruction,
2908                             list<VTypeInfo> vtilist>
2909{
2910  defm "" : VPatBinaryM_VV<intrinsic, instruction, vtilist>;
2911  defm "" : VPatBinaryM_VX<intrinsic, instruction, vtilist>;
2912}
2913
2914multiclass VPatBinaryM_VX_VI<string intrinsic, string instruction,
2915                             list<VTypeInfo> vtilist>
2916{
2917  defm "" : VPatBinaryM_VX<intrinsic, instruction, vtilist>;
2918  defm "" : VPatBinaryM_VI<intrinsic, instruction, vtilist>;
2919}
2920
2921multiclass VPatBinaryV_VV_VX_VI_INT<string intrinsic, string instruction,
2922                                    list<VTypeInfo> vtilist, Operand ImmType = simm5>
2923{
2924  defm "" : VPatBinaryV_VV_INT<intrinsic, instruction, vtilist>;
2925  defm "" : VPatBinaryV_VX_INT<intrinsic, instruction, vtilist>;
2926  defm "" : VPatBinaryV_VI<intrinsic, instruction, vtilist, ImmType>;
2927}
2928
2929multiclass VPatReductionV_VS<string intrinsic, string instruction, bit IsFloat = 0> {
2930  foreach vti = !if(IsFloat, NoGroupFloatVectors, NoGroupIntegerVectors) in
2931  {
2932    defvar vectorM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # vti.SEW # "M1");
2933    defm : VPatTernary<intrinsic, instruction, "VS",
2934                       vectorM1.Vector, vti.Vector,
2935                       vectorM1.Vector, vti.Mask,
2936                       vti.SEW, vti.LMul,
2937                       VR, vti.RegClass, VR>;
2938  }
2939  foreach gvti = !if(IsFloat, GroupFloatVectors, GroupIntegerVectors) in
2940  {
2941    defm : VPatTernary<intrinsic, instruction, "VS",
2942                       gvti.VectorM1, gvti.Vector,
2943                       gvti.VectorM1, gvti.Mask,
2944                       gvti.SEW, gvti.LMul,
2945                       VR, gvti.RegClass, VR>;
2946  }
2947}
2948
2949multiclass VPatReductionW_VS<string intrinsic, string instruction, bit IsFloat = 0> {
2950  foreach vti = !if(IsFloat, AllFloatVectors, AllIntegerVectors) in
2951  {
2952    defvar wtiSEW = !mul(vti.SEW, 2);
2953    if !le(wtiSEW, 64) then {
2954      defvar wtiM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # wtiSEW # "M1");
2955      defm : VPatTernary<intrinsic, instruction, "VS",
2956                         wtiM1.Vector, vti.Vector,
2957                         wtiM1.Vector, vti.Mask,
2958                         vti.SEW, vti.LMul,
2959                         wtiM1.RegClass, vti.RegClass,
2960                         wtiM1.RegClass>;
2961    }
2962  }
2963}
2964
2965multiclass VPatConversionVI_VF<string intrinsic,
2966                               string instruction>
2967{
2968  foreach fvti = AllFloatVectors in
2969  {
2970    defvar ivti = GetIntVTypeInfo<fvti>.Vti;
2971
2972    defm : VPatConversion<intrinsic, instruction, "V",
2973                          ivti.Vector, fvti.Vector, ivti.Mask, fvti.SEW,
2974                          fvti.LMul, ivti.RegClass, fvti.RegClass>;
2975  }
2976}
2977
2978multiclass VPatConversionVF_VI<string intrinsic,
2979                               string instruction>
2980{
2981  foreach fvti = AllFloatVectors in
2982  {
2983    defvar ivti = GetIntVTypeInfo<fvti>.Vti;
2984
2985    defm : VPatConversion<intrinsic, instruction, "V",
2986                          fvti.Vector, ivti.Vector, fvti.Mask, ivti.SEW,
2987                          ivti.LMul, fvti.RegClass, ivti.RegClass>;
2988  }
2989}
2990
2991multiclass VPatConversionWI_VF<string intrinsic, string instruction> {
2992  foreach fvtiToFWti = AllWidenableFloatVectors in
2993  {
2994    defvar fvti = fvtiToFWti.Vti;
2995    defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
2996
2997    defm : VPatConversion<intrinsic, instruction, "V",
2998                          iwti.Vector, fvti.Vector, iwti.Mask, fvti.SEW,
2999                          fvti.LMul, iwti.RegClass, fvti.RegClass>;
3000  }
3001}
3002
3003multiclass VPatConversionWF_VI<string intrinsic, string instruction> {
3004  foreach vtiToWti = AllWidenableIntToFloatVectors in
3005  {
3006    defvar vti = vtiToWti.Vti;
3007    defvar fwti = vtiToWti.Wti;
3008
3009    defm : VPatConversion<intrinsic, instruction, "V",
3010                          fwti.Vector, vti.Vector, fwti.Mask, vti.SEW,
3011                          vti.LMul, fwti.RegClass, vti.RegClass>;
3012  }
3013}
3014
3015multiclass VPatConversionWF_VF <string intrinsic, string instruction> {
3016  foreach fvtiToFWti = AllWidenableFloatVectors in
3017  {
3018    defvar fvti = fvtiToFWti.Vti;
3019    defvar fwti = fvtiToFWti.Wti;
3020
3021    defm : VPatConversion<intrinsic, instruction, "V",
3022                          fwti.Vector, fvti.Vector, fwti.Mask, fvti.SEW,
3023                          fvti.LMul, fwti.RegClass, fvti.RegClass>;
3024  }
3025}
3026
3027multiclass VPatConversionVI_WF <string intrinsic, string instruction> {
3028  foreach vtiToWti = AllWidenableIntToFloatVectors in
3029  {
3030    defvar vti = vtiToWti.Vti;
3031    defvar fwti = vtiToWti.Wti;
3032
3033    defm : VPatConversion<intrinsic, instruction, "W",
3034                          vti.Vector, fwti.Vector, vti.Mask, vti.SEW,
3035                          vti.LMul, vti.RegClass, fwti.RegClass>;
3036  }
3037}
3038
3039multiclass VPatConversionVF_WI <string intrinsic, string instruction> {
3040  foreach fvtiToFWti = AllWidenableFloatVectors in
3041  {
3042    defvar fvti = fvtiToFWti.Vti;
3043    defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
3044
3045    defm : VPatConversion<intrinsic, instruction, "W",
3046                          fvti.Vector, iwti.Vector, fvti.Mask, fvti.SEW,
3047                          fvti.LMul, fvti.RegClass, iwti.RegClass>;
3048  }
3049}
3050
3051multiclass VPatConversionVF_WF <string intrinsic, string instruction> {
3052  foreach fvtiToFWti = AllWidenableFloatVectors in
3053  {
3054    defvar fvti = fvtiToFWti.Vti;
3055    defvar fwti = fvtiToFWti.Wti;
3056
3057    defm : VPatConversion<intrinsic, instruction, "W",
3058                          fvti.Vector, fwti.Vector, fvti.Mask, fvti.SEW,
3059                          fvti.LMul, fvti.RegClass, fwti.RegClass>;
3060  }
3061}
3062
3063multiclass VPatAMOWD<string intrinsic,
3064                     string inst,
3065                     ValueType result_type,
3066                     ValueType offset_type,
3067                     ValueType mask_type,
3068                     int sew,
3069                     LMULInfo vlmul,
3070                     LMULInfo emul,
3071                     VReg op1_reg_class>
3072{
3073  def : VPatAMOWDNoMask<intrinsic, inst, result_type, offset_type,
3074                        sew, vlmul, emul, op1_reg_class>;
3075  def : VPatAMOWDMask<intrinsic, inst, result_type, offset_type,
3076                      mask_type, sew, vlmul, emul, op1_reg_class>;
3077}
3078
3079multiclass VPatAMOV_WD<string intrinsic,
3080                       string inst,
3081                       list<VTypeInfo> vtilist> {
3082  foreach eew = EEWList in {
3083    foreach vti = vtilist in {
3084      if !or(!eq(vti.SEW, 32), !eq(vti.SEW, 64)) then {
3085        defvar octuple_lmul = octuple_from_str<vti.LMul.MX>.ret;
3086        // Calculate emul = eew * lmul / sew
3087        defvar octuple_emul = !srl(!mul(eew, octuple_lmul), shift_amount<vti.SEW>.val);
3088        if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
3089          defvar emulMX = octuple_to_str<octuple_emul>.ret;
3090          defvar offsetVti = !cast<VTypeInfo>("VI" # eew # emulMX);
3091          defvar inst_ei = inst # "EI" # eew;
3092          defm : VPatAMOWD<intrinsic, inst_ei,
3093                           vti.Vector, offsetVti.Vector,
3094                           vti.Mask, vti.SEW, vti.LMul, offsetVti.LMul, offsetVti.RegClass>;
3095        }
3096      }
3097    }
3098  }
3099}
3100
3101//===----------------------------------------------------------------------===//
3102// Pseudo instructions
3103//===----------------------------------------------------------------------===//
3104
3105let Predicates = [HasStdExtV] in {
3106
3107//===----------------------------------------------------------------------===//
3108// Pseudo Instructions for CodeGen
3109//===----------------------------------------------------------------------===//
3110let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
3111  def PseudoVMV1R_V : VPseudo<VMV1R_V, V_M1, (outs VR:$vd), (ins VR:$vs2)>;
3112  def PseudoVMV2R_V : VPseudo<VMV2R_V, V_M2, (outs VRM2:$vd), (ins VRM2:$vs2)>;
3113  def PseudoVMV4R_V : VPseudo<VMV4R_V, V_M4, (outs VRM4:$vd), (ins VRM4:$vs2)>;
3114  def PseudoVMV8R_V : VPseudo<VMV8R_V, V_M8, (outs VRM8:$vd), (ins VRM8:$vs2)>;
3115}
3116
3117let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1 in {
3118  def PseudoReadVLENB : Pseudo<(outs GPR:$rd), (ins),
3119                               [(set GPR:$rd, (riscv_read_vlenb))]>;
3120}
3121
3122let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1,
3123    Uses = [VL] in
3124def PseudoReadVL : Pseudo<(outs GPR:$rd), (ins),
3125                          [(set GPR:$rd, (riscv_read_vl))]>;
3126
3127//===----------------------------------------------------------------------===//
3128// 6. Configuration-Setting Instructions
3129//===----------------------------------------------------------------------===//
3130
3131// Pseudos.
3132let hasSideEffects = 1, mayLoad = 0, mayStore = 0, Defs = [VL, VTYPE] in {
3133def PseudoVSETVLI : Pseudo<(outs GPR:$rd), (ins GPR:$rs1, VTypeIOp:$vtypei), []>;
3134def PseudoVSETIVLI : Pseudo<(outs GPR:$rd), (ins uimm5:$rs1, VTypeIOp:$vtypei), []>;
3135}
3136
3137//===----------------------------------------------------------------------===//
3138// 7. Vector Loads and Stores
3139//===----------------------------------------------------------------------===//
3140
3141//===----------------------------------------------------------------------===//
3142// 7.4 Vector Unit-Stride Instructions
3143//===----------------------------------------------------------------------===//
3144
3145// Pseudos Unit-Stride Loads and Stores
3146foreach eew = EEWList in {
3147  defm PseudoVLE # eew : VPseudoUSLoad;
3148  defm PseudoVSE # eew : VPseudoUSStore;
3149}
3150
3151defm PseudoVLE1 : VPseudoLoadMask;
3152defm PseudoVSE1 : VPseudoStoreMask;
3153
3154//===----------------------------------------------------------------------===//
3155// 7.5 Vector Strided Instructions
3156//===----------------------------------------------------------------------===//
3157
3158// Vector Strided Loads and Stores
3159foreach eew = EEWList in {
3160  defm PseudoVLSE # eew : VPseudoSLoad;
3161  defm PseudoVSSE # eew : VPseudoSStore;
3162}
3163
3164//===----------------------------------------------------------------------===//
3165// 7.6 Vector Indexed Instructions
3166//===----------------------------------------------------------------------===//
3167
3168// Vector Indexed Loads and Stores
3169foreach eew = EEWList in {
3170  defm PseudoVLUXEI # eew : VPseudoILoad;
3171  defm PseudoVLOXEI # eew : VPseudoILoad;
3172  defm PseudoVSOXEI # eew : VPseudoIStore;
3173  defm PseudoVSUXEI # eew : VPseudoIStore;
3174}
3175
3176//===----------------------------------------------------------------------===//
3177// 7.7. Unit-stride Fault-Only-First Loads
3178//===----------------------------------------------------------------------===//
3179
3180// vleff may update VL register
3181let hasSideEffects = 1, Defs = [VL] in
3182foreach eew = EEWList in {
3183  defm PseudoVLE # eew # FF : VPseudoUSLoad;
3184}
3185
3186//===----------------------------------------------------------------------===//
3187// 7.8. Vector Load/Store Segment Instructions
3188//===----------------------------------------------------------------------===//
3189defm PseudoVLSEG : VPseudoUSSegLoad</*fault-only-first*/false>;
3190defm PseudoVLSSEG : VPseudoSSegLoad;
3191defm PseudoVLOXSEG : VPseudoISegLoad;
3192defm PseudoVLUXSEG : VPseudoISegLoad;
3193defm PseudoVSSEG : VPseudoUSSegStore;
3194defm PseudoVSSSEG : VPseudoSSegStore;
3195defm PseudoVSOXSEG : VPseudoISegStore;
3196defm PseudoVSUXSEG : VPseudoISegStore;
3197
3198// vlseg<nf>e<eew>ff.v may update VL register
3199let hasSideEffects = 1, Defs = [VL] in
3200defm PseudoVLSEG : VPseudoUSSegLoad</*fault-only-first*/true>;
3201
3202//===----------------------------------------------------------------------===//
3203// 8. Vector AMO Operations
3204//===----------------------------------------------------------------------===//
3205defm PseudoVAMOSWAP : VPseudoAMO;
3206defm PseudoVAMOADD : VPseudoAMO;
3207defm PseudoVAMOXOR : VPseudoAMO;
3208defm PseudoVAMOAND : VPseudoAMO;
3209defm PseudoVAMOOR : VPseudoAMO;
3210defm PseudoVAMOMIN : VPseudoAMO;
3211defm PseudoVAMOMAX : VPseudoAMO;
3212defm PseudoVAMOMINU : VPseudoAMO;
3213defm PseudoVAMOMAXU : VPseudoAMO;
3214
3215//===----------------------------------------------------------------------===//
3216// 12. Vector Integer Arithmetic Instructions
3217//===----------------------------------------------------------------------===//
3218
3219//===----------------------------------------------------------------------===//
3220// 12.1. Vector Single-Width Integer Add and Subtract
3221//===----------------------------------------------------------------------===//
3222defm PseudoVADD        : VPseudoBinaryV_VV_VX_VI;
3223defm PseudoVSUB        : VPseudoBinaryV_VV_VX;
3224defm PseudoVRSUB       : VPseudoBinaryV_VX_VI;
3225
3226//===----------------------------------------------------------------------===//
3227// 12.2. Vector Widening Integer Add/Subtract
3228//===----------------------------------------------------------------------===//
3229defm PseudoVWADDU    : VPseudoBinaryW_VV_VX;
3230defm PseudoVWSUBU    : VPseudoBinaryW_VV_VX;
3231defm PseudoVWADD     : VPseudoBinaryW_VV_VX;
3232defm PseudoVWSUB     : VPseudoBinaryW_VV_VX;
3233defm PseudoVWADDU    : VPseudoBinaryW_WV_WX;
3234defm PseudoVWSUBU    : VPseudoBinaryW_WV_WX;
3235defm PseudoVWADD     : VPseudoBinaryW_WV_WX;
3236defm PseudoVWSUB     : VPseudoBinaryW_WV_WX;
3237
3238//===----------------------------------------------------------------------===//
3239// 12.3. Vector Integer Extension
3240//===----------------------------------------------------------------------===//
3241defm PseudoVZEXT_VF2 : PseudoUnaryV_VF2;
3242defm PseudoVZEXT_VF4 : PseudoUnaryV_VF4;
3243defm PseudoVZEXT_VF8 : PseudoUnaryV_VF8;
3244defm PseudoVSEXT_VF2 : PseudoUnaryV_VF2;
3245defm PseudoVSEXT_VF4 : PseudoUnaryV_VF4;
3246defm PseudoVSEXT_VF8 : PseudoUnaryV_VF8;
3247
3248//===----------------------------------------------------------------------===//
3249// 12.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
3250//===----------------------------------------------------------------------===//
3251defm PseudoVADC        : VPseudoBinaryV_VM_XM_IM;
3252defm PseudoVMADC       : VPseudoBinaryM_VM_XM_IM<"@earlyclobber $rd">;
3253defm PseudoVMADC       : VPseudoBinaryM_V_X_I<"@earlyclobber $rd">;
3254
3255defm PseudoVSBC        : VPseudoBinaryV_VM_XM;
3256defm PseudoVMSBC       : VPseudoBinaryM_VM_XM<"@earlyclobber $rd">;
3257defm PseudoVMSBC       : VPseudoBinaryM_V_X<"@earlyclobber $rd">;
3258
3259//===----------------------------------------------------------------------===//
3260// 12.5. Vector Bitwise Logical Instructions
3261//===----------------------------------------------------------------------===//
3262defm PseudoVAND        : VPseudoBinaryV_VV_VX_VI;
3263defm PseudoVOR         : VPseudoBinaryV_VV_VX_VI;
3264defm PseudoVXOR        : VPseudoBinaryV_VV_VX_VI;
3265
3266//===----------------------------------------------------------------------===//
3267// 12.6. Vector Single-Width Bit Shift Instructions
3268//===----------------------------------------------------------------------===//
3269defm PseudoVSLL        : VPseudoBinaryV_VV_VX_VI<uimm5>;
3270defm PseudoVSRL        : VPseudoBinaryV_VV_VX_VI<uimm5>;
3271defm PseudoVSRA        : VPseudoBinaryV_VV_VX_VI<uimm5>;
3272
3273//===----------------------------------------------------------------------===//
3274// 12.7. Vector Narrowing Integer Right Shift Instructions
3275//===----------------------------------------------------------------------===//
3276defm PseudoVNSRL       : VPseudoBinaryV_WV_WX_WI;
3277defm PseudoVNSRA       : VPseudoBinaryV_WV_WX_WI;
3278
3279//===----------------------------------------------------------------------===//
3280// 12.8. Vector Integer Comparison Instructions
3281//===----------------------------------------------------------------------===//
3282defm PseudoVMSEQ       : VPseudoBinaryM_VV_VX_VI;
3283defm PseudoVMSNE       : VPseudoBinaryM_VV_VX_VI;
3284defm PseudoVMSLTU      : VPseudoBinaryM_VV_VX;
3285defm PseudoVMSLT       : VPseudoBinaryM_VV_VX;
3286defm PseudoVMSLEU      : VPseudoBinaryM_VV_VX_VI;
3287defm PseudoVMSLE       : VPseudoBinaryM_VV_VX_VI;
3288defm PseudoVMSGTU      : VPseudoBinaryM_VX_VI;
3289defm PseudoVMSGT       : VPseudoBinaryM_VX_VI;
3290
3291//===----------------------------------------------------------------------===//
3292// 12.9. Vector Integer Min/Max Instructions
3293//===----------------------------------------------------------------------===//
3294defm PseudoVMINU       : VPseudoBinaryV_VV_VX;
3295defm PseudoVMIN        : VPseudoBinaryV_VV_VX;
3296defm PseudoVMAXU       : VPseudoBinaryV_VV_VX;
3297defm PseudoVMAX        : VPseudoBinaryV_VV_VX;
3298
3299//===----------------------------------------------------------------------===//
3300// 12.10. Vector Single-Width Integer Multiply Instructions
3301//===----------------------------------------------------------------------===//
3302defm PseudoVMUL        : VPseudoBinaryV_VV_VX;
3303defm PseudoVMULH       : VPseudoBinaryV_VV_VX;
3304defm PseudoVMULHU      : VPseudoBinaryV_VV_VX;
3305defm PseudoVMULHSU     : VPseudoBinaryV_VV_VX;
3306
3307//===----------------------------------------------------------------------===//
3308// 12.11. Vector Integer Divide Instructions
3309//===----------------------------------------------------------------------===//
3310defm PseudoVDIVU       : VPseudoBinaryV_VV_VX;
3311defm PseudoVDIV        : VPseudoBinaryV_VV_VX;
3312defm PseudoVREMU       : VPseudoBinaryV_VV_VX;
3313defm PseudoVREM        : VPseudoBinaryV_VV_VX;
3314
3315//===----------------------------------------------------------------------===//
3316// 12.12. Vector Widening Integer Multiply Instructions
3317//===----------------------------------------------------------------------===//
3318defm PseudoVWMUL       : VPseudoBinaryW_VV_VX;
3319defm PseudoVWMULU      : VPseudoBinaryW_VV_VX;
3320defm PseudoVWMULSU     : VPseudoBinaryW_VV_VX;
3321
3322//===----------------------------------------------------------------------===//
3323// 12.13. Vector Single-Width Integer Multiply-Add Instructions
3324//===----------------------------------------------------------------------===//
3325defm PseudoVMACC       : VPseudoTernaryV_VV_VX_AAXA;
3326defm PseudoVNMSAC      : VPseudoTernaryV_VV_VX_AAXA;
3327defm PseudoVMADD       : VPseudoTernaryV_VV_VX_AAXA;
3328defm PseudoVNMSUB      : VPseudoTernaryV_VV_VX_AAXA;
3329
3330//===----------------------------------------------------------------------===//
3331// 12.14. Vector Widening Integer Multiply-Add Instructions
3332//===----------------------------------------------------------------------===//
3333defm PseudoVWMACCU     : VPseudoTernaryW_VV_VX;
3334defm PseudoVWMACC      : VPseudoTernaryW_VV_VX;
3335defm PseudoVWMACCSU    : VPseudoTernaryW_VV_VX;
3336defm PseudoVWMACCUS    : VPseudoTernaryW_VX;
3337
3338//===----------------------------------------------------------------------===//
3339// 12.16. Vector Integer Merge Instructions
3340//===----------------------------------------------------------------------===//
3341defm PseudoVMERGE      : VPseudoBinaryV_VM_XM_IM;
3342
3343//===----------------------------------------------------------------------===//
3344// 12.17. Vector Integer Move Instructions
3345//===----------------------------------------------------------------------===//
3346defm PseudoVMV_V       : VPseudoUnaryV_V_X_I_NoDummyMask;
3347
3348//===----------------------------------------------------------------------===//
3349// 13.1. Vector Single-Width Saturating Add and Subtract
3350//===----------------------------------------------------------------------===//
3351let Defs = [VXSAT], hasSideEffects = 1 in {
3352  defm PseudoVSADDU      : VPseudoBinaryV_VV_VX_VI;
3353  defm PseudoVSADD       : VPseudoBinaryV_VV_VX_VI;
3354  defm PseudoVSSUBU      : VPseudoBinaryV_VV_VX;
3355  defm PseudoVSSUB       : VPseudoBinaryV_VV_VX;
3356}
3357
3358//===----------------------------------------------------------------------===//
3359// 13.2. Vector Single-Width Averaging Add and Subtract
3360//===----------------------------------------------------------------------===//
3361let Uses = [VL, VTYPE, VXRM], hasSideEffects = 1 in {
3362  defm PseudoVAADDU      : VPseudoBinaryV_VV_VX;
3363  defm PseudoVAADD       : VPseudoBinaryV_VV_VX;
3364  defm PseudoVASUBU      : VPseudoBinaryV_VV_VX;
3365  defm PseudoVASUB       : VPseudoBinaryV_VV_VX;
3366}
3367
3368//===----------------------------------------------------------------------===//
3369// 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
3370//===----------------------------------------------------------------------===//
3371let Uses = [VL, VTYPE, VXRM], Defs = [VXSAT], hasSideEffects = 1 in {
3372  defm PseudoVSMUL      : VPseudoBinaryV_VV_VX;
3373}
3374
3375//===----------------------------------------------------------------------===//
3376// 13.4. Vector Single-Width Scaling Shift Instructions
3377//===----------------------------------------------------------------------===//
3378let Uses = [VL, VTYPE, VXRM], hasSideEffects = 1 in {
3379  defm PseudoVSSRL        : VPseudoBinaryV_VV_VX_VI<uimm5>;
3380  defm PseudoVSSRA        : VPseudoBinaryV_VV_VX_VI<uimm5>;
3381}
3382
3383//===----------------------------------------------------------------------===//
3384// 13.5. Vector Narrowing Fixed-Point Clip Instructions
3385//===----------------------------------------------------------------------===//
3386let Uses = [VL, VTYPE, VXRM], Defs = [VXSAT], hasSideEffects = 1 in {
3387  defm PseudoVNCLIP     : VPseudoBinaryV_WV_WX_WI;
3388  defm PseudoVNCLIPU    : VPseudoBinaryV_WV_WX_WI;
3389}
3390
3391} // Predicates = [HasStdExtV]
3392
3393let Predicates = [HasStdExtV, HasStdExtF] in {
3394//===----------------------------------------------------------------------===//
3395// 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions
3396//===----------------------------------------------------------------------===//
3397defm PseudoVFADD       : VPseudoBinaryV_VV_VF;
3398defm PseudoVFSUB       : VPseudoBinaryV_VV_VF;
3399defm PseudoVFRSUB      : VPseudoBinaryV_VF;
3400
3401//===----------------------------------------------------------------------===//
3402// 14.3. Vector Widening Floating-Point Add/Subtract Instructions
3403//===----------------------------------------------------------------------===//
3404defm PseudoVFWADD     : VPseudoBinaryW_VV_VF;
3405defm PseudoVFWSUB     : VPseudoBinaryW_VV_VF;
3406defm PseudoVFWADD     : VPseudoBinaryW_WV_WF;
3407defm PseudoVFWSUB     : VPseudoBinaryW_WV_WF;
3408
3409//===----------------------------------------------------------------------===//
3410// 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
3411//===----------------------------------------------------------------------===//
3412defm PseudoVFMUL       : VPseudoBinaryV_VV_VF;
3413defm PseudoVFDIV       : VPseudoBinaryV_VV_VF;
3414defm PseudoVFRDIV      : VPseudoBinaryV_VF;
3415
3416//===----------------------------------------------------------------------===//
3417// 14.5. Vector Widening Floating-Point Multiply
3418//===----------------------------------------------------------------------===//
3419defm PseudoVFWMUL      : VPseudoBinaryW_VV_VF;
3420
3421//===----------------------------------------------------------------------===//
3422// 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
3423//===----------------------------------------------------------------------===//
3424defm PseudoVFMACC      : VPseudoTernaryV_VV_VF_AAXA;
3425defm PseudoVFNMACC     : VPseudoTernaryV_VV_VF_AAXA;
3426defm PseudoVFMSAC      : VPseudoTernaryV_VV_VF_AAXA;
3427defm PseudoVFNMSAC     : VPseudoTernaryV_VV_VF_AAXA;
3428defm PseudoVFMADD      : VPseudoTernaryV_VV_VF_AAXA;
3429defm PseudoVFNMADD     : VPseudoTernaryV_VV_VF_AAXA;
3430defm PseudoVFMSUB      : VPseudoTernaryV_VV_VF_AAXA;
3431defm PseudoVFNMSUB     : VPseudoTernaryV_VV_VF_AAXA;
3432
3433//===----------------------------------------------------------------------===//
3434// 14.7. Vector Widening Floating-Point Fused Multiply-Add Instructions
3435//===----------------------------------------------------------------------===//
3436defm PseudoVFWMACC     : VPseudoTernaryW_VV_VF;
3437defm PseudoVFWNMACC    : VPseudoTernaryW_VV_VF;
3438defm PseudoVFWMSAC     : VPseudoTernaryW_VV_VF;
3439defm PseudoVFWNMSAC    : VPseudoTernaryW_VV_VF;
3440
3441//===----------------------------------------------------------------------===//
3442// 14.8. Vector Floating-Point Square-Root Instruction
3443//===----------------------------------------------------------------------===//
3444defm PseudoVFSQRT      : VPseudoUnaryV_V;
3445
3446//===----------------------------------------------------------------------===//
3447// 14.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
3448//===----------------------------------------------------------------------===//
3449defm PseudoVFRSQRT7    : VPseudoUnaryV_V;
3450
3451//===----------------------------------------------------------------------===//
3452// 14.10. Vector Floating-Point Reciprocal Estimate Instruction
3453//===----------------------------------------------------------------------===//
3454defm PseudoVFREC7      : VPseudoUnaryV_V;
3455
3456//===----------------------------------------------------------------------===//
3457// 14.11. Vector Floating-Point Min/Max Instructions
3458//===----------------------------------------------------------------------===//
3459defm PseudoVFMIN       : VPseudoBinaryV_VV_VF;
3460defm PseudoVFMAX       : VPseudoBinaryV_VV_VF;
3461
3462//===----------------------------------------------------------------------===//
3463// 14.12. Vector Floating-Point Sign-Injection Instructions
3464//===----------------------------------------------------------------------===//
3465defm PseudoVFSGNJ      : VPseudoBinaryV_VV_VF;
3466defm PseudoVFSGNJN     : VPseudoBinaryV_VV_VF;
3467defm PseudoVFSGNJX     : VPseudoBinaryV_VV_VF;
3468
3469//===----------------------------------------------------------------------===//
3470// 14.13. Vector Floating-Point Compare Instructions
3471//===----------------------------------------------------------------------===//
3472defm PseudoVMFEQ       : VPseudoBinaryM_VV_VF;
3473defm PseudoVMFNE       : VPseudoBinaryM_VV_VF;
3474defm PseudoVMFLT       : VPseudoBinaryM_VV_VF;
3475defm PseudoVMFLE       : VPseudoBinaryM_VV_VF;
3476defm PseudoVMFGT       : VPseudoBinaryM_VF;
3477defm PseudoVMFGE       : VPseudoBinaryM_VF;
3478
3479//===----------------------------------------------------------------------===//
3480// 14.14. Vector Floating-Point Classify Instruction
3481//===----------------------------------------------------------------------===//
3482defm PseudoVFCLASS     : VPseudoUnaryV_V;
3483
3484//===----------------------------------------------------------------------===//
3485// 14.15. Vector Floating-Point Merge Instruction
3486//===----------------------------------------------------------------------===//
3487defm PseudoVFMERGE     : VPseudoBinaryV_FM;
3488
3489//===----------------------------------------------------------------------===//
3490// 14.16. Vector Floating-Point Move Instruction
3491//===----------------------------------------------------------------------===//
3492defm PseudoVFMV_V      : VPseudoUnaryV_F_NoDummyMask;
3493
3494//===----------------------------------------------------------------------===//
3495// 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions
3496//===----------------------------------------------------------------------===//
3497defm PseudoVFCVT_XU_F : VPseudoConversionV_V;
3498defm PseudoVFCVT_X_F : VPseudoConversionV_V;
3499defm PseudoVFCVT_RTZ_XU_F : VPseudoConversionV_V;
3500defm PseudoVFCVT_RTZ_X_F : VPseudoConversionV_V;
3501defm PseudoVFCVT_F_XU : VPseudoConversionV_V;
3502defm PseudoVFCVT_F_X : VPseudoConversionV_V;
3503
3504//===----------------------------------------------------------------------===//
3505// 14.18. Widening Floating-Point/Integer Type-Convert Instructions
3506//===----------------------------------------------------------------------===//
3507defm PseudoVFWCVT_XU_F : VPseudoConversionW_V;
3508defm PseudoVFWCVT_X_F : VPseudoConversionW_V;
3509defm PseudoVFWCVT_RTZ_XU_F : VPseudoConversionW_V;
3510defm PseudoVFWCVT_RTZ_X_F : VPseudoConversionW_V;
3511defm PseudoVFWCVT_F_XU : VPseudoConversionW_V;
3512defm PseudoVFWCVT_F_X : VPseudoConversionW_V;
3513defm PseudoVFWCVT_F_F : VPseudoConversionW_V;
3514
3515//===----------------------------------------------------------------------===//
3516// 14.19. Narrowing Floating-Point/Integer Type-Convert Instructions
3517//===----------------------------------------------------------------------===//
3518defm PseudoVFNCVT_XU_F : VPseudoConversionV_W;
3519defm PseudoVFNCVT_X_F : VPseudoConversionV_W;
3520defm PseudoVFNCVT_RTZ_XU_F : VPseudoConversionV_W;
3521defm PseudoVFNCVT_RTZ_X_F : VPseudoConversionV_W;
3522defm PseudoVFNCVT_F_XU : VPseudoConversionV_W;
3523defm PseudoVFNCVT_F_X : VPseudoConversionV_W;
3524defm PseudoVFNCVT_F_F : VPseudoConversionV_W;
3525defm PseudoVFNCVT_ROD_F_F : VPseudoConversionV_W;
3526} // Predicates = [HasStdExtV, HasStdExtF]
3527
3528let Predicates = [HasStdExtV] in {
3529//===----------------------------------------------------------------------===//
3530// 15.1. Vector Single-Width Integer Reduction Instructions
3531//===----------------------------------------------------------------------===//
3532defm PseudoVREDSUM     : VPseudoReductionV_VS;
3533defm PseudoVREDAND     : VPseudoReductionV_VS;
3534defm PseudoVREDOR      : VPseudoReductionV_VS;
3535defm PseudoVREDXOR     : VPseudoReductionV_VS;
3536defm PseudoVREDMINU    : VPseudoReductionV_VS;
3537defm PseudoVREDMIN     : VPseudoReductionV_VS;
3538defm PseudoVREDMAXU    : VPseudoReductionV_VS;
3539defm PseudoVREDMAX     : VPseudoReductionV_VS;
3540
3541//===----------------------------------------------------------------------===//
3542// 15.2. Vector Widening Integer Reduction Instructions
3543//===----------------------------------------------------------------------===//
3544defm PseudoVWREDSUMU   : VPseudoReductionV_VS;
3545defm PseudoVWREDSUM    : VPseudoReductionV_VS;
3546} // Predicates = [HasStdExtV]
3547
3548let Predicates = [HasStdExtV, HasStdExtF] in {
3549//===----------------------------------------------------------------------===//
3550// 15.3. Vector Single-Width Floating-Point Reduction Instructions
3551//===----------------------------------------------------------------------===//
3552defm PseudoVFREDOSUM   : VPseudoReductionV_VS;
3553defm PseudoVFREDSUM    : VPseudoReductionV_VS;
3554defm PseudoVFREDMIN    : VPseudoReductionV_VS;
3555defm PseudoVFREDMAX    : VPseudoReductionV_VS;
3556
3557//===----------------------------------------------------------------------===//
3558// 15.4. Vector Widening Floating-Point Reduction Instructions
3559//===----------------------------------------------------------------------===//
3560defm PseudoVFWREDSUM   : VPseudoReductionV_VS;
3561defm PseudoVFWREDOSUM  : VPseudoReductionV_VS;
3562
3563} // Predicates = [HasStdExtV, HasStdExtF]
3564
3565//===----------------------------------------------------------------------===//
3566// 16. Vector Mask Instructions
3567//===----------------------------------------------------------------------===//
3568
3569//===----------------------------------------------------------------------===//
3570// 16.1 Vector Mask-Register Logical Instructions
3571//===----------------------------------------------------------------------===//
3572
3573defm PseudoVMAND: VPseudoBinaryM_MM;
3574defm PseudoVMNAND: VPseudoBinaryM_MM;
3575defm PseudoVMANDNOT: VPseudoBinaryM_MM;
3576defm PseudoVMXOR: VPseudoBinaryM_MM;
3577defm PseudoVMOR: VPseudoBinaryM_MM;
3578defm PseudoVMNOR: VPseudoBinaryM_MM;
3579defm PseudoVMORNOT: VPseudoBinaryM_MM;
3580defm PseudoVMXNOR: VPseudoBinaryM_MM;
3581
3582// Pseudo insturctions
3583defm PseudoVMCLR : VPseudoNullaryPseudoM<"VMXOR">;
3584defm PseudoVMSET : VPseudoNullaryPseudoM<"VMXNOR">;
3585
3586//===----------------------------------------------------------------------===//
3587// 16.2. Vector mask population count vpopc
3588//===----------------------------------------------------------------------===//
3589
3590defm PseudoVPOPC: VPseudoUnaryS_M;
3591
3592//===----------------------------------------------------------------------===//
3593// 16.3. vfirst find-first-set mask bit
3594//===----------------------------------------------------------------------===//
3595
3596defm PseudoVFIRST: VPseudoUnaryS_M;
3597
3598//===----------------------------------------------------------------------===//
3599// 16.4. vmsbf.m set-before-first mask bit
3600//===----------------------------------------------------------------------===//
3601defm PseudoVMSBF: VPseudoUnaryM_M;
3602
3603//===----------------------------------------------------------------------===//
3604// 16.5. vmsif.m set-including-first mask bit
3605//===----------------------------------------------------------------------===//
3606defm PseudoVMSIF: VPseudoUnaryM_M;
3607
3608//===----------------------------------------------------------------------===//
3609// 16.6. vmsof.m set-only-first mask bit
3610//===----------------------------------------------------------------------===//
3611defm PseudoVMSOF: VPseudoUnaryM_M;
3612
3613//===----------------------------------------------------------------------===//
3614// 16.8.  Vector Iota Instruction
3615//===----------------------------------------------------------------------===//
3616defm PseudoVIOTA_M: VPseudoUnaryV_M;
3617
3618//===----------------------------------------------------------------------===//
3619// 16.9. Vector Element Index Instruction
3620//===----------------------------------------------------------------------===//
3621defm PseudoVID : VPseudoMaskNullaryV;
3622
3623//===----------------------------------------------------------------------===//
3624// 17. Vector Permutation Instructions
3625//===----------------------------------------------------------------------===//
3626
3627//===----------------------------------------------------------------------===//
3628// 17.1. Integer Scalar Move Instructions
3629//===----------------------------------------------------------------------===//
3630
3631let Predicates = [HasStdExtV] in {
3632let mayLoad = 0, mayStore = 0, hasSideEffects = 0, usesCustomInserter = 1,
3633    Uses = [VL, VTYPE] in {
3634  foreach m = MxList.m in {
3635    let VLMul = m.value in {
3636      let HasSEWOp = 1, BaseInstr = VMV_X_S in
3637      def PseudoVMV_X_S # "_" # m.MX: Pseudo<(outs GPR:$rd),
3638                                             (ins m.vrclass:$rs2, ixlenimm:$sew),
3639                                             []>, RISCVVPseudo;
3640      let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VMV_S_X, WritesElement0 = 1,
3641          Constraints = "$rd = $rs1" in
3642      def PseudoVMV_S_X # "_" # m.MX: Pseudo<(outs m.vrclass:$rd),
3643                                             (ins m.vrclass:$rs1, GPR:$rs2,
3644                                                  GPR:$vl, ixlenimm:$sew),
3645                                             []>, RISCVVPseudo;
3646    }
3647  }
3648}
3649} // Predicates = [HasStdExtV]
3650
3651//===----------------------------------------------------------------------===//
3652// 17.2. Floating-Point Scalar Move Instructions
3653//===----------------------------------------------------------------------===//
3654
3655let Predicates = [HasStdExtV, HasStdExtF] in {
3656let mayLoad = 0, mayStore = 0, hasSideEffects = 0, usesCustomInserter = 1,
3657    Uses = [VL, VTYPE] in {
3658  foreach m = MxList.m in {
3659    foreach f = FPList.fpinfo in {
3660      let VLMul = m.value in {
3661        let HasSEWOp = 1, BaseInstr = VFMV_F_S in
3662        def "PseudoVFMV_" # f.FX # "_S_" # m.MX :
3663                                          Pseudo<(outs f.fprclass:$rd),
3664                                                 (ins m.vrclass:$rs2,
3665                                                      ixlenimm:$sew),
3666                                                 []>, RISCVVPseudo;
3667        let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VFMV_S_F, WritesElement0 = 1,
3668            Constraints = "$rd = $rs1" in
3669        def "PseudoVFMV_S_" # f.FX # "_" # m.MX :
3670                                          Pseudo<(outs m.vrclass:$rd),
3671                                                 (ins m.vrclass:$rs1, f.fprclass:$rs2,
3672                                                      GPR:$vl, ixlenimm:$sew),
3673                                                 []>, RISCVVPseudo;
3674      }
3675    }
3676  }
3677}
3678} // Predicates = [HasStdExtV, HasStdExtF]
3679
3680//===----------------------------------------------------------------------===//
3681// 17.3. Vector Slide Instructions
3682//===----------------------------------------------------------------------===//
3683let Predicates = [HasStdExtV] in {
3684  defm PseudoVSLIDEUP    : VPseudoTernaryV_VX_VI<uimm5, "@earlyclobber $rd">;
3685  defm PseudoVSLIDEDOWN  : VPseudoTernaryV_VX_VI<uimm5>;
3686  defm PseudoVSLIDE1UP   : VPseudoBinaryV_VX<"@earlyclobber $rd">;
3687  defm PseudoVSLIDE1DOWN : VPseudoBinaryV_VX;
3688} // Predicates = [HasStdExtV]
3689
3690let Predicates = [HasStdExtV, HasStdExtF] in {
3691  defm PseudoVFSLIDE1UP  : VPseudoBinaryV_VF<"@earlyclobber $rd">;
3692  defm PseudoVFSLIDE1DOWN : VPseudoBinaryV_VF;
3693} // Predicates = [HasStdExtV, HasStdExtF]
3694
3695//===----------------------------------------------------------------------===//
3696// 17.4. Vector Register Gather Instructions
3697//===----------------------------------------------------------------------===//
3698defm PseudoVRGATHER    : VPseudoBinaryV_VV_VX_VI<uimm5, "@earlyclobber $rd">;
3699defm PseudoVRGATHEREI16 : VPseudoBinaryV_VV_EEW</* eew */ 16, "@earlyclobber $rd">;
3700
3701//===----------------------------------------------------------------------===//
3702// 17.5. Vector Compress Instruction
3703//===----------------------------------------------------------------------===//
3704defm PseudoVCOMPRESS : VPseudoUnaryV_V_AnyMask;
3705
3706//===----------------------------------------------------------------------===//
3707// Patterns.
3708//===----------------------------------------------------------------------===//
3709let Predicates = [HasStdExtV] in {
3710
3711//===----------------------------------------------------------------------===//
3712// 7. Vector Loads and Stores
3713//===----------------------------------------------------------------------===//
3714
3715//===----------------------------------------------------------------------===//
3716// 7.4 Vector Unit-Stride Instructions
3717//===----------------------------------------------------------------------===//
3718
3719foreach vti = AllVectors in
3720{
3721  defm : VPatUSLoad<"int_riscv_vle",
3722                    "PseudoVLE" # vti.SEW,
3723                    vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass>;
3724  defm : VPatUSLoadFF<"PseudoVLE" # vti.SEW # "FF",
3725                      vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass>;
3726  defm : VPatUSStore<"int_riscv_vse",
3727                     "PseudoVSE" # vti.SEW,
3728                     vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass>;
3729}
3730
3731foreach vti = AllMasks in {
3732  defvar PseudoVLE1 = !cast<Instruction>("PseudoVLE1_V_"#vti.BX);
3733  def : Pat<(vti.Mask (int_riscv_vle1 GPR:$rs1, (XLenVT (VLOp GPR:$vl)))),
3734            (PseudoVLE1 $rs1, GPR:$vl, vti.SEW)>;
3735  defvar PseudoVSE1 = !cast<Instruction>("PseudoVSE1_V_"#vti.BX);
3736  def : Pat<(int_riscv_vse1 (vti.Mask VR:$rs3), GPR:$rs1, (XLenVT (VLOp GPR:$vl))),
3737            (PseudoVSE1 $rs3, $rs1, GPR:$vl, vti.SEW)>;
3738}
3739
3740//===----------------------------------------------------------------------===//
3741// 7.5 Vector Strided Instructions
3742//===----------------------------------------------------------------------===//
3743
3744foreach vti = AllVectors in
3745{
3746  defm : VPatSLoad<"int_riscv_vlse",
3747                   "PseudoVLSE" # vti.SEW,
3748                   vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass>;
3749  defm : VPatSStore<"int_riscv_vsse",
3750                    "PseudoVSSE" # vti.SEW,
3751                    vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass>;
3752}
3753
3754//===----------------------------------------------------------------------===//
3755// 7.6 Vector Indexed Instructions
3756//===----------------------------------------------------------------------===//
3757
3758foreach vti = AllVectors in
3759foreach eew = EEWList in {
3760  defvar vlmul = vti.LMul;
3761  defvar octuple_lmul = octuple_from_str<vti.LMul.MX>.ret;
3762  defvar log_sew = shift_amount<vti.SEW>.val;
3763  // The data vector register group has EEW=SEW, EMUL=LMUL, while the offset
3764  // vector register group has EEW encoding in the instruction and EMUL=(EEW/SEW)*LMUL.
3765  // calculate octuple elmul which is (eew * octuple_lmul) >> log_sew
3766  defvar octuple_elmul = !srl(!mul(eew, octuple_lmul), log_sew);
3767  // legal octuple elmul should be more than 0 and less than equal 64
3768  if !gt(octuple_elmul, 0) then {
3769    if !le(octuple_elmul, 64) then {
3770       defvar elmul_str = octuple_to_str<octuple_elmul>.ret;
3771       defvar elmul =!cast<LMULInfo>("V_" # elmul_str);
3772       defvar idx_vti = !cast<VTypeInfo>("VI" # eew # elmul_str);
3773
3774       defm : VPatILoad<"int_riscv_vluxei",
3775                        "PseudoVLUXEI"#eew,
3776                         vti.Vector, idx_vti.Vector, vti.Mask, vti.SEW,
3777                         vlmul, elmul, vti.RegClass, idx_vti.RegClass>;
3778       defm : VPatILoad<"int_riscv_vloxei",
3779                        "PseudoVLOXEI"#eew,
3780                         vti.Vector, idx_vti.Vector, vti.Mask, vti.SEW,
3781                         vlmul, elmul, vti.RegClass, idx_vti.RegClass>;
3782       defm : VPatIStore<"int_riscv_vsoxei",
3783                          "PseudoVSOXEI"#eew,
3784                          vti.Vector, idx_vti.Vector, vti.Mask, vti.SEW,
3785                          vlmul, elmul, vti.RegClass, idx_vti.RegClass>;
3786       defm : VPatIStore<"int_riscv_vsuxei",
3787                          "PseudoVSUXEI"#eew,
3788                          vti.Vector, idx_vti.Vector, vti.Mask, vti.SEW,
3789                          vlmul, elmul, vti.RegClass, idx_vti.RegClass>;
3790    }
3791  }
3792}
3793} // Predicates = [HasStdExtV]
3794
3795//===----------------------------------------------------------------------===//
3796// 8. Vector AMO Operations
3797//===----------------------------------------------------------------------===//
3798let Predicates = [HasStdExtZvamo] in {
3799  defm "" : VPatAMOV_WD<"int_riscv_vamoswap", "PseudoVAMOSWAP", AllIntegerVectors>;
3800  defm "" : VPatAMOV_WD<"int_riscv_vamoadd", "PseudoVAMOADD", AllIntegerVectors>;
3801  defm "" : VPatAMOV_WD<"int_riscv_vamoxor", "PseudoVAMOXOR", AllIntegerVectors>;
3802  defm "" : VPatAMOV_WD<"int_riscv_vamoand", "PseudoVAMOAND", AllIntegerVectors>;
3803  defm "" : VPatAMOV_WD<"int_riscv_vamoor", "PseudoVAMOOR", AllIntegerVectors>;
3804  defm "" : VPatAMOV_WD<"int_riscv_vamomin", "PseudoVAMOMIN", AllIntegerVectors>;
3805  defm "" : VPatAMOV_WD<"int_riscv_vamomax", "PseudoVAMOMAX", AllIntegerVectors>;
3806  defm "" : VPatAMOV_WD<"int_riscv_vamominu", "PseudoVAMOMINU", AllIntegerVectors>;
3807  defm "" : VPatAMOV_WD<"int_riscv_vamomaxu", "PseudoVAMOMAXU", AllIntegerVectors>;
3808} // Predicates = [HasStdExtZvamo]
3809
3810let Predicates = [HasStdExtZvamo, HasStdExtF] in {
3811  defm "" : VPatAMOV_WD<"int_riscv_vamoswap", "PseudoVAMOSWAP", AllFloatVectors>;
3812} // Predicates = [HasStdExtZvamo, HasStdExtF]
3813
3814//===----------------------------------------------------------------------===//
3815// 12. Vector Integer Arithmetic Instructions
3816//===----------------------------------------------------------------------===//
3817
3818let Predicates = [HasStdExtV] in {
3819//===----------------------------------------------------------------------===//
3820// 12.1. Vector Single-Width Integer Add and Subtract
3821//===----------------------------------------------------------------------===//
3822defm "" : VPatBinaryV_VV_VX_VI<"int_riscv_vadd", "PseudoVADD", AllIntegerVectors>;
3823defm "" : VPatBinaryV_VV_VX<"int_riscv_vsub", "PseudoVSUB", AllIntegerVectors>;
3824defm "" : VPatBinaryV_VX_VI<"int_riscv_vrsub", "PseudoVRSUB", AllIntegerVectors>;
3825
3826//===----------------------------------------------------------------------===//
3827// 12.2. Vector Widening Integer Add/Subtract
3828//===----------------------------------------------------------------------===//
3829defm "" : VPatBinaryW_VV_VX<"int_riscv_vwaddu", "PseudoVWADDU", AllWidenableIntVectors>;
3830defm "" : VPatBinaryW_VV_VX<"int_riscv_vwsubu", "PseudoVWSUBU", AllWidenableIntVectors>;
3831defm "" : VPatBinaryW_VV_VX<"int_riscv_vwadd", "PseudoVWADD", AllWidenableIntVectors>;
3832defm "" : VPatBinaryW_VV_VX<"int_riscv_vwsub", "PseudoVWSUB", AllWidenableIntVectors>;
3833defm "" : VPatBinaryW_WV_WX<"int_riscv_vwaddu_w", "PseudoVWADDU", AllWidenableIntVectors>;
3834defm "" : VPatBinaryW_WV_WX<"int_riscv_vwsubu_w", "PseudoVWSUBU", AllWidenableIntVectors>;
3835defm "" : VPatBinaryW_WV_WX<"int_riscv_vwadd_w", "PseudoVWADD", AllWidenableIntVectors>;
3836defm "" : VPatBinaryW_WV_WX<"int_riscv_vwsub_w", "PseudoVWSUB", AllWidenableIntVectors>;
3837
3838//===----------------------------------------------------------------------===//
3839// 12.3. Vector Integer Extension
3840//===----------------------------------------------------------------------===//
3841defm "" : VPatUnaryV_VF<"int_riscv_vzext", "PseudoVZEXT", "VF2",
3842                        AllFractionableVF2IntVectors>;
3843defm "" : VPatUnaryV_VF<"int_riscv_vzext", "PseudoVZEXT", "VF4",
3844                        AllFractionableVF4IntVectors>;
3845defm "" : VPatUnaryV_VF<"int_riscv_vzext", "PseudoVZEXT", "VF8",
3846                        AllFractionableVF8IntVectors>;
3847defm "" : VPatUnaryV_VF<"int_riscv_vsext", "PseudoVSEXT", "VF2",
3848                        AllFractionableVF2IntVectors>;
3849defm "" : VPatUnaryV_VF<"int_riscv_vsext", "PseudoVSEXT", "VF4",
3850                        AllFractionableVF4IntVectors>;
3851defm "" : VPatUnaryV_VF<"int_riscv_vsext", "PseudoVSEXT", "VF8",
3852                        AllFractionableVF8IntVectors>;
3853
3854//===----------------------------------------------------------------------===//
3855// 12.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
3856//===----------------------------------------------------------------------===//
3857defm "" : VPatBinaryV_VM_XM_IM<"int_riscv_vadc", "PseudoVADC">;
3858defm "" : VPatBinaryM_VM_XM_IM<"int_riscv_vmadc_carry_in", "PseudoVMADC">;
3859defm "" : VPatBinaryM_V_X_I<"int_riscv_vmadc", "PseudoVMADC">;
3860
3861defm "" : VPatBinaryV_VM_XM<"int_riscv_vsbc", "PseudoVSBC">;
3862defm "" : VPatBinaryM_VM_XM<"int_riscv_vmsbc_borrow_in", "PseudoVMSBC">;
3863defm "" : VPatBinaryM_V_X<"int_riscv_vmsbc", "PseudoVMSBC">;
3864
3865//===----------------------------------------------------------------------===//
3866// 12.5. Vector Bitwise Logical Instructions
3867//===----------------------------------------------------------------------===//
3868defm "" : VPatBinaryV_VV_VX_VI<"int_riscv_vand", "PseudoVAND", AllIntegerVectors>;
3869defm "" : VPatBinaryV_VV_VX_VI<"int_riscv_vor", "PseudoVOR", AllIntegerVectors>;
3870defm "" : VPatBinaryV_VV_VX_VI<"int_riscv_vxor", "PseudoVXOR", AllIntegerVectors>;
3871
3872//===----------------------------------------------------------------------===//
3873// 12.6. Vector Single-Width Bit Shift Instructions
3874//===----------------------------------------------------------------------===//
3875defm "" : VPatBinaryV_VV_VX_VI<"int_riscv_vsll", "PseudoVSLL", AllIntegerVectors,
3876                               uimm5>;
3877defm "" : VPatBinaryV_VV_VX_VI<"int_riscv_vsrl", "PseudoVSRL", AllIntegerVectors,
3878                               uimm5>;
3879defm "" : VPatBinaryV_VV_VX_VI<"int_riscv_vsra", "PseudoVSRA", AllIntegerVectors,
3880                               uimm5>;
3881
3882//===----------------------------------------------------------------------===//
3883// 12.7. Vector Narrowing Integer Right Shift Instructions
3884//===----------------------------------------------------------------------===//
3885defm "" : VPatBinaryV_WV_WX_WI<"int_riscv_vnsrl", "PseudoVNSRL", AllWidenableIntVectors>;
3886defm "" : VPatBinaryV_WV_WX_WI<"int_riscv_vnsra", "PseudoVNSRA", AllWidenableIntVectors>;
3887
3888//===----------------------------------------------------------------------===//
3889// 12.8. Vector Integer Comparison Instructions
3890//===----------------------------------------------------------------------===//
3891defm "" : VPatBinaryM_VV_VX_VI<"int_riscv_vmseq", "PseudoVMSEQ", AllIntegerVectors>;
3892defm "" : VPatBinaryM_VV_VX_VI<"int_riscv_vmsne", "PseudoVMSNE", AllIntegerVectors>;
3893defm "" : VPatBinaryM_VV_VX<"int_riscv_vmsltu", "PseudoVMSLTU", AllIntegerVectors>;
3894defm "" : VPatBinaryM_VV_VX<"int_riscv_vmslt", "PseudoVMSLT", AllIntegerVectors>;
3895defm "" : VPatBinaryM_VV_VX_VI<"int_riscv_vmsleu", "PseudoVMSLEU", AllIntegerVectors>;
3896defm "" : VPatBinaryM_VV_VX_VI<"int_riscv_vmsle", "PseudoVMSLE", AllIntegerVectors>;
3897
3898defm "" : VPatBinaryM_VX_VI<"int_riscv_vmsgtu", "PseudoVMSGTU", AllIntegerVectors>;
3899defm "" : VPatBinaryM_VX_VI<"int_riscv_vmsgt", "PseudoVMSGT", AllIntegerVectors>;
3900
3901// Match vmslt(u).vx intrinsics to vmsle(u).vi if the scalar is -15 to 16. This
3902// avoids the user needing to know that there is no vmslt(u).vi instruction.
3903// This is limited to vmslt(u).vx as there is no vmsge().vx intrinsic or
3904// instruction.
3905foreach vti = AllIntegerVectors in {
3906  def : Pat<(vti.Mask (int_riscv_vmslt (vti.Vector vti.RegClass:$rs1),
3907                                       (vti.Scalar simm5_plus1:$rs2), (XLenVT (VLOp GPR:$vl)))),
3908            (!cast<Instruction>("PseudoVMSLE_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
3909                                                               (DecImm simm5_plus1:$rs2),
3910                                                               GPR:$vl,
3911                                                               vti.SEW)>;
3912  def : Pat<(vti.Mask (int_riscv_vmslt_mask (vti.Mask VR:$merge),
3913                                            (vti.Vector vti.RegClass:$rs1),
3914                                            (vti.Scalar simm5_plus1:$rs2),
3915                                            (vti.Mask V0),
3916                                            (XLenVT (VLOp GPR:$vl)))),
3917            (!cast<Instruction>("PseudoVMSLE_VI_"#vti.LMul.MX#"_MASK")
3918                                                      VR:$merge,
3919                                                      vti.RegClass:$rs1,
3920                                                      (DecImm simm5_plus1:$rs2),
3921                                                      (vti.Mask V0),
3922                                                      GPR:$vl,
3923                                                      vti.SEW)>;
3924
3925  def : Pat<(vti.Mask (int_riscv_vmsltu (vti.Vector vti.RegClass:$rs1),
3926                                        (vti.Scalar simm5_plus1:$rs2),
3927                                        (XLenVT (VLOp GPR:$vl)))),
3928            (!cast<Instruction>("PseudoVMSLEU_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
3929                                                                (DecImm simm5_plus1:$rs2),
3930                                                                GPR:$vl,
3931                                                                vti.SEW)>;
3932  def : Pat<(vti.Mask (int_riscv_vmsltu_mask (vti.Mask VR:$merge),
3933                                             (vti.Vector vti.RegClass:$rs1),
3934                                             (vti.Scalar simm5_plus1:$rs2),
3935                                             (vti.Mask V0),
3936                                             (XLenVT (VLOp GPR:$vl)))),
3937            (!cast<Instruction>("PseudoVMSLEU_VI_"#vti.LMul.MX#"_MASK")
3938                                                      VR:$merge,
3939                                                      vti.RegClass:$rs1,
3940                                                      (DecImm simm5_plus1:$rs2),
3941                                                      (vti.Mask V0),
3942                                                      GPR:$vl,
3943                                                      vti.SEW)>;
3944
3945  // Special cases to avoid matching vmsltu.vi 0 (always false) to
3946  // vmsleu.vi -1 (always true). Instead match to vmsne.vv.
3947  def : Pat<(vti.Mask (int_riscv_vmsltu (vti.Vector vti.RegClass:$rs1),
3948                                        (vti.Scalar 0), (XLenVT (VLOp GPR:$vl)))),
3949            (!cast<Instruction>("PseudoVMSNE_VV_"#vti.LMul.MX) vti.RegClass:$rs1,
3950                                                               vti.RegClass:$rs1,
3951                                                               GPR:$vl,
3952                                                               vti.SEW)>;
3953  def : Pat<(vti.Mask (int_riscv_vmsltu_mask (vti.Mask VR:$merge),
3954                                             (vti.Vector vti.RegClass:$rs1),
3955                                             (vti.Scalar 0),
3956                                             (vti.Mask V0),
3957                                             (XLenVT (VLOp GPR:$vl)))),
3958            (!cast<Instruction>("PseudoVMSNE_VV_"#vti.LMul.MX#"_MASK")
3959                                                     VR:$merge,
3960                                                     vti.RegClass:$rs1,
3961                                                     vti.RegClass:$rs1,
3962                                                     (vti.Mask V0),
3963                                                     GPR:$vl,
3964                                                     vti.SEW)>;
3965}
3966
3967//===----------------------------------------------------------------------===//
3968// 12.9. Vector Integer Min/Max Instructions
3969//===----------------------------------------------------------------------===//
3970defm "" : VPatBinaryV_VV_VX<"int_riscv_vminu", "PseudoVMINU", AllIntegerVectors>;
3971defm "" : VPatBinaryV_VV_VX<"int_riscv_vmin", "PseudoVMIN", AllIntegerVectors>;
3972defm "" : VPatBinaryV_VV_VX<"int_riscv_vmaxu", "PseudoVMAXU", AllIntegerVectors>;
3973defm "" : VPatBinaryV_VV_VX<"int_riscv_vmax", "PseudoVMAX", AllIntegerVectors>;
3974
3975//===----------------------------------------------------------------------===//
3976// 12.10. Vector Single-Width Integer Multiply Instructions
3977//===----------------------------------------------------------------------===//
3978defm "" : VPatBinaryV_VV_VX<"int_riscv_vmul", "PseudoVMUL", AllIntegerVectors>;
3979defm "" : VPatBinaryV_VV_VX<"int_riscv_vmulh", "PseudoVMULH", AllIntegerVectors>;
3980defm "" : VPatBinaryV_VV_VX<"int_riscv_vmulhu", "PseudoVMULHU", AllIntegerVectors>;
3981defm "" : VPatBinaryV_VV_VX<"int_riscv_vmulhsu", "PseudoVMULHSU", AllIntegerVectors>;
3982
3983//===----------------------------------------------------------------------===//
3984// 12.11. Vector Integer Divide Instructions
3985//===----------------------------------------------------------------------===//
3986defm "" : VPatBinaryV_VV_VX<"int_riscv_vdivu", "PseudoVDIVU", AllIntegerVectors>;
3987defm "" : VPatBinaryV_VV_VX<"int_riscv_vdiv", "PseudoVDIV", AllIntegerVectors>;
3988defm "" : VPatBinaryV_VV_VX<"int_riscv_vremu", "PseudoVREMU", AllIntegerVectors>;
3989defm "" : VPatBinaryV_VV_VX<"int_riscv_vrem", "PseudoVREM", AllIntegerVectors>;
3990
3991//===----------------------------------------------------------------------===//
3992// 12.12. Vector Widening Integer Multiply Instructions
3993//===----------------------------------------------------------------------===//
3994defm "" : VPatBinaryW_VV_VX<"int_riscv_vwmul", "PseudoVWMUL", AllWidenableIntVectors>;
3995defm "" : VPatBinaryW_VV_VX<"int_riscv_vwmulu", "PseudoVWMULU", AllWidenableIntVectors>;
3996defm "" : VPatBinaryW_VV_VX<"int_riscv_vwmulsu", "PseudoVWMULSU", AllWidenableIntVectors>;
3997
3998//===----------------------------------------------------------------------===//
3999// 12.13. Vector Single-Width Integer Multiply-Add Instructions
4000//===----------------------------------------------------------------------===//
4001defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vmadd", "PseudoVMADD", AllIntegerVectors>;
4002defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vnmsub", "PseudoVNMSUB", AllIntegerVectors>;
4003defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vmacc", "PseudoVMACC", AllIntegerVectors>;
4004defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vnmsac", "PseudoVNMSAC", AllIntegerVectors>;
4005
4006//===----------------------------------------------------------------------===//
4007// 12.14. Vector Widening Integer Multiply-Add Instructions
4008//===----------------------------------------------------------------------===//
4009defm "" : VPatTernaryW_VV_VX<"int_riscv_vwmaccu", "PseudoVWMACCU", AllWidenableIntVectors>;
4010defm "" : VPatTernaryW_VV_VX<"int_riscv_vwmacc", "PseudoVWMACC", AllWidenableIntVectors>;
4011defm "" : VPatTernaryW_VV_VX<"int_riscv_vwmaccsu", "PseudoVWMACCSU", AllWidenableIntVectors>;
4012defm "" : VPatTernaryW_VX<"int_riscv_vwmaccus", "PseudoVWMACCUS", AllWidenableIntVectors>;
4013
4014//===----------------------------------------------------------------------===//
4015// 12.16. Vector Integer Merge Instructions
4016//===----------------------------------------------------------------------===//
4017defm "" : VPatBinaryV_VM_XM_IM<"int_riscv_vmerge", "PseudoVMERGE">;
4018
4019//===----------------------------------------------------------------------===//
4020// 12.17. Vector Integer Move Instructions
4021//===----------------------------------------------------------------------===//
4022foreach vti = AllVectors in {
4023  def : Pat<(vti.Vector (int_riscv_vmv_v_v (vti.Vector vti.RegClass:$rs1),
4024                                           (XLenVT (VLOp GPR:$vl)))),
4025            (!cast<Instruction>("PseudoVMV_V_V_"#vti.LMul.MX)
4026             $rs1, GPR:$vl, vti.SEW)>;
4027}
4028
4029foreach vti = AllIntegerVectors in {
4030  def : Pat<(vti.Vector (int_riscv_vmv_v_x GPR:$rs2, (XLenVT (VLOp GPR:$vl)))),
4031            (!cast<Instruction>("PseudoVMV_V_X_"#vti.LMul.MX)
4032             $rs2, GPR:$vl, vti.SEW)>;
4033  def : Pat<(vti.Vector (int_riscv_vmv_v_x simm5:$imm5, (XLenVT (VLOp GPR:$vl)))),
4034            (!cast<Instruction>("PseudoVMV_V_I_"#vti.LMul.MX)
4035             simm5:$imm5, GPR:$vl, vti.SEW)>;
4036}
4037
4038//===----------------------------------------------------------------------===//
4039// 13.1. Vector Single-Width Saturating Add and Subtract
4040//===----------------------------------------------------------------------===//
4041defm "" : VPatBinaryV_VV_VX_VI<"int_riscv_vsaddu", "PseudoVSADDU", AllIntegerVectors>;
4042defm "" : VPatBinaryV_VV_VX_VI<"int_riscv_vsadd", "PseudoVSADD", AllIntegerVectors>;
4043defm "" : VPatBinaryV_VV_VX<"int_riscv_vssubu", "PseudoVSSUBU", AllIntegerVectors>;
4044defm "" : VPatBinaryV_VV_VX<"int_riscv_vssub", "PseudoVSSUB", AllIntegerVectors>;
4045
4046//===----------------------------------------------------------------------===//
4047// 13.2. Vector Single-Width Averaging Add and Subtract
4048//===----------------------------------------------------------------------===//
4049defm "" : VPatBinaryV_VV_VX<"int_riscv_vaaddu", "PseudoVAADDU", AllIntegerVectors>;
4050defm "" : VPatBinaryV_VV_VX<"int_riscv_vaadd", "PseudoVAADD", AllIntegerVectors>;
4051defm "" : VPatBinaryV_VV_VX<"int_riscv_vasubu", "PseudoVASUBU", AllIntegerVectors>;
4052defm "" : VPatBinaryV_VV_VX<"int_riscv_vasub", "PseudoVASUB", AllIntegerVectors>;
4053
4054//===----------------------------------------------------------------------===//
4055// 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
4056//===----------------------------------------------------------------------===//
4057defm "" : VPatBinaryV_VV_VX<"int_riscv_vsmul", "PseudoVSMUL", AllIntegerVectors>;
4058
4059//===----------------------------------------------------------------------===//
4060// 13.4. Vector Single-Width Scaling Shift Instructions
4061//===----------------------------------------------------------------------===//
4062defm "" : VPatBinaryV_VV_VX_VI<"int_riscv_vssrl", "PseudoVSSRL", AllIntegerVectors,
4063                               uimm5>;
4064defm "" : VPatBinaryV_VV_VX_VI<"int_riscv_vssra", "PseudoVSSRA", AllIntegerVectors,
4065                               uimm5>;
4066
4067//===----------------------------------------------------------------------===//
4068// 13.5. Vector Narrowing Fixed-Point Clip Instructions
4069//===----------------------------------------------------------------------===//
4070defm "" : VPatBinaryV_WV_WX_WI<"int_riscv_vnclipu", "PseudoVNCLIPU", AllWidenableIntVectors>;
4071defm "" : VPatBinaryV_WV_WX_WI<"int_riscv_vnclip", "PseudoVNCLIP", AllWidenableIntVectors>;
4072
4073} // Predicates = [HasStdExtV]
4074
4075let Predicates = [HasStdExtV, HasStdExtF] in {
4076//===----------------------------------------------------------------------===//
4077// 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions
4078//===----------------------------------------------------------------------===//
4079defm "" : VPatBinaryV_VV_VX<"int_riscv_vfadd", "PseudoVFADD", AllFloatVectors>;
4080defm "" : VPatBinaryV_VV_VX<"int_riscv_vfsub", "PseudoVFSUB", AllFloatVectors>;
4081defm "" : VPatBinaryV_VX<"int_riscv_vfrsub", "PseudoVFRSUB", AllFloatVectors>;
4082
4083//===----------------------------------------------------------------------===//
4084// 14.3. Vector Widening Floating-Point Add/Subtract Instructions
4085//===----------------------------------------------------------------------===//
4086defm "" : VPatBinaryW_VV_VX<"int_riscv_vfwadd", "PseudoVFWADD", AllWidenableFloatVectors>;
4087defm "" : VPatBinaryW_VV_VX<"int_riscv_vfwsub", "PseudoVFWSUB", AllWidenableFloatVectors>;
4088defm "" : VPatBinaryW_WV_WX<"int_riscv_vfwadd_w", "PseudoVFWADD", AllWidenableFloatVectors>;
4089defm "" : VPatBinaryW_WV_WX<"int_riscv_vfwsub_w", "PseudoVFWSUB", AllWidenableFloatVectors>;
4090
4091//===----------------------------------------------------------------------===//
4092// 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
4093//===----------------------------------------------------------------------===//
4094defm "" : VPatBinaryV_VV_VX<"int_riscv_vfmul", "PseudoVFMUL", AllFloatVectors>;
4095defm "" : VPatBinaryV_VV_VX<"int_riscv_vfdiv", "PseudoVFDIV", AllFloatVectors>;
4096defm "" : VPatBinaryV_VX<"int_riscv_vfrdiv", "PseudoVFRDIV", AllFloatVectors>;
4097
4098//===----------------------------------------------------------------------===//
4099// 14.5. Vector Widening Floating-Point Multiply
4100//===----------------------------------------------------------------------===//
4101defm "" : VPatBinaryW_VV_VX<"int_riscv_vfwmul", "PseudoVFWMUL", AllWidenableFloatVectors>;
4102
4103//===----------------------------------------------------------------------===//
4104// 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
4105//===----------------------------------------------------------------------===//
4106defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfmacc", "PseudoVFMACC", AllFloatVectors>;
4107defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfnmacc", "PseudoVFNMACC", AllFloatVectors>;
4108defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfmsac", "PseudoVFMSAC", AllFloatVectors>;
4109defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfnmsac", "PseudoVFNMSAC", AllFloatVectors>;
4110defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfmadd", "PseudoVFMADD", AllFloatVectors>;
4111defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfnmadd", "PseudoVFNMADD", AllFloatVectors>;
4112defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfmsub", "PseudoVFMSUB", AllFloatVectors>;
4113defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfnmsub", "PseudoVFNMSUB", AllFloatVectors>;
4114
4115//===----------------------------------------------------------------------===//
4116// 14.7. Vector Widening Floating-Point Fused Multiply-Add Instructions
4117//===----------------------------------------------------------------------===//
4118defm "" : VPatTernaryW_VV_VX<"int_riscv_vfwmacc", "PseudoVFWMACC", AllWidenableFloatVectors>;
4119defm "" : VPatTernaryW_VV_VX<"int_riscv_vfwnmacc", "PseudoVFWNMACC", AllWidenableFloatVectors>;
4120defm "" : VPatTernaryW_VV_VX<"int_riscv_vfwmsac", "PseudoVFWMSAC", AllWidenableFloatVectors>;
4121defm "" : VPatTernaryW_VV_VX<"int_riscv_vfwnmsac", "PseudoVFWNMSAC", AllWidenableFloatVectors>;
4122
4123//===----------------------------------------------------------------------===//
4124// 14.8. Vector Floating-Point Square-Root Instruction
4125//===----------------------------------------------------------------------===//
4126defm "" : VPatUnaryV_V<"int_riscv_vfsqrt", "PseudoVFSQRT", AllFloatVectors>;
4127
4128//===----------------------------------------------------------------------===//
4129// 14.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
4130//===----------------------------------------------------------------------===//
4131defm "" : VPatUnaryV_V<"int_riscv_vfrsqrt7", "PseudoVFRSQRT7", AllFloatVectors>;
4132
4133//===----------------------------------------------------------------------===//
4134// 14.10. Vector Floating-Point Reciprocal Estimate Instruction
4135//===----------------------------------------------------------------------===//
4136defm "" : VPatUnaryV_V<"int_riscv_vfrec7", "PseudoVFREC7", AllFloatVectors>;
4137
4138//===----------------------------------------------------------------------===//
4139// 14.11. Vector Floating-Point Min/Max Instructions
4140//===----------------------------------------------------------------------===//
4141defm "" : VPatBinaryV_VV_VX<"int_riscv_vfmin", "PseudoVFMIN", AllFloatVectors>;
4142defm "" : VPatBinaryV_VV_VX<"int_riscv_vfmax", "PseudoVFMAX", AllFloatVectors>;
4143
4144//===----------------------------------------------------------------------===//
4145// 14.12. Vector Floating-Point Sign-Injection Instructions
4146//===----------------------------------------------------------------------===//
4147defm "" : VPatBinaryV_VV_VX<"int_riscv_vfsgnj", "PseudoVFSGNJ", AllFloatVectors>;
4148defm "" : VPatBinaryV_VV_VX<"int_riscv_vfsgnjn", "PseudoVFSGNJN", AllFloatVectors>;
4149defm "" : VPatBinaryV_VV_VX<"int_riscv_vfsgnjx", "PseudoVFSGNJX", AllFloatVectors>;
4150
4151//===----------------------------------------------------------------------===//
4152// 14.13. Vector Floating-Point Compare Instructions
4153//===----------------------------------------------------------------------===//
4154defm "" : VPatBinaryM_VV_VX<"int_riscv_vmfeq", "PseudoVMFEQ", AllFloatVectors>;
4155defm "" : VPatBinaryM_VV_VX<"int_riscv_vmfle", "PseudoVMFLE", AllFloatVectors>;
4156defm "" : VPatBinaryM_VV_VX<"int_riscv_vmflt", "PseudoVMFLT", AllFloatVectors>;
4157defm "" : VPatBinaryM_VV_VX<"int_riscv_vmfne", "PseudoVMFNE", AllFloatVectors>;
4158defm "" : VPatBinaryM_VX<"int_riscv_vmfgt", "PseudoVMFGT", AllFloatVectors>;
4159defm "" : VPatBinaryM_VX<"int_riscv_vmfge", "PseudoVMFGE", AllFloatVectors>;
4160
4161//===----------------------------------------------------------------------===//
4162// 14.14. Vector Floating-Point Classify Instruction
4163//===----------------------------------------------------------------------===//
4164defm "" : VPatConversionVI_VF<"int_riscv_vfclass", "PseudoVFCLASS">;
4165
4166//===----------------------------------------------------------------------===//
4167// 14.15. Vector Floating-Point Merge Instruction
4168//===----------------------------------------------------------------------===//
4169// We can use vmerge.vvm to support vector-vector vfmerge.
4170defm "" : VPatBinaryV_VM<"int_riscv_vfmerge", "PseudoVMERGE",
4171                         /*CarryOut = */0, /*vtilist=*/AllFloatVectors>;
4172defm "" : VPatBinaryV_XM<"int_riscv_vfmerge", "PseudoVFMERGE",
4173                         /*CarryOut = */0, /*vtilist=*/AllFloatVectors>;
4174
4175foreach fvti = AllFloatVectors in {
4176  defvar instr = !cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX);
4177  def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector fvti.RegClass:$rs2),
4178                                            (fvti.Scalar (fpimm0)),
4179                                            (fvti.Mask V0), (XLenVT (VLOp GPR:$vl)))),
4180            (instr fvti.RegClass:$rs2, 0, (fvti.Mask V0), GPR:$vl, fvti.SEW)>;
4181}
4182
4183//===----------------------------------------------------------------------===//
4184// 14.16. Vector Floating-Point Move Instruction
4185//===----------------------------------------------------------------------===//
4186foreach fvti = AllFloatVectors in {
4187  // If we're splatting fpimm0, use vmv.v.x vd, x0.
4188  def : Pat<(fvti.Vector (int_riscv_vfmv_v_f
4189                         (fvti.Scalar (fpimm0)), (XLenVT (VLOp GPR:$vl)))),
4190            (!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX)
4191             0, GPR:$vl, fvti.SEW)>;
4192
4193  def : Pat<(fvti.Vector (int_riscv_vfmv_v_f
4194                         (fvti.Scalar fvti.ScalarRegClass:$rs2), (XLenVT (VLOp GPR:$vl)))),
4195            (!cast<Instruction>("PseudoVFMV_V_" # fvti.ScalarSuffix # "_" #
4196                                fvti.LMul.MX)
4197             (fvti.Scalar fvti.ScalarRegClass:$rs2),
4198             GPR:$vl, fvti.SEW)>;
4199}
4200
4201//===----------------------------------------------------------------------===//
4202// 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions
4203//===----------------------------------------------------------------------===//
4204defm "" : VPatConversionVI_VF<"int_riscv_vfcvt_xu_f_v", "PseudoVFCVT_XU_F">;
4205defm "" : VPatConversionVI_VF<"int_riscv_vfcvt_rtz_xu_f_v", "PseudoVFCVT_RTZ_XU_F">;
4206defm "" : VPatConversionVI_VF<"int_riscv_vfcvt_x_f_v", "PseudoVFCVT_X_F">;
4207defm "" : VPatConversionVI_VF<"int_riscv_vfcvt_rtz_x_f_v", "PseudoVFCVT_RTZ_X_F">;
4208defm "" : VPatConversionVF_VI<"int_riscv_vfcvt_f_x_v", "PseudoVFCVT_F_X">;
4209defm "" : VPatConversionVF_VI<"int_riscv_vfcvt_f_xu_v", "PseudoVFCVT_F_XU">;
4210
4211//===----------------------------------------------------------------------===//
4212// 14.18. Widening Floating-Point/Integer Type-Convert Instructions
4213//===----------------------------------------------------------------------===//
4214defm "" : VPatConversionWI_VF<"int_riscv_vfwcvt_xu_f_v", "PseudoVFWCVT_XU_F">;
4215defm "" : VPatConversionWI_VF<"int_riscv_vfwcvt_x_f_v", "PseudoVFWCVT_X_F">;
4216defm "" : VPatConversionWI_VF<"int_riscv_vfwcvt_rtz_xu_f_v", "PseudoVFWCVT_RTZ_XU_F">;
4217defm "" : VPatConversionWI_VF<"int_riscv_vfwcvt_rtz_x_f_v", "PseudoVFWCVT_RTZ_X_F">;
4218defm "" : VPatConversionWF_VI<"int_riscv_vfwcvt_f_xu_v", "PseudoVFWCVT_F_XU">;
4219defm "" : VPatConversionWF_VI<"int_riscv_vfwcvt_f_x_v", "PseudoVFWCVT_F_X">;
4220defm "" : VPatConversionWF_VF<"int_riscv_vfwcvt_f_f_v", "PseudoVFWCVT_F_F">;
4221
4222//===----------------------------------------------------------------------===//
4223// 14.19. Narrowing Floating-Point/Integer Type-Convert Instructions
4224//===----------------------------------------------------------------------===//
4225defm "" : VPatConversionVI_WF<"int_riscv_vfncvt_xu_f_w", "PseudoVFNCVT_XU_F">;
4226defm "" : VPatConversionVI_WF<"int_riscv_vfncvt_x_f_w", "PseudoVFNCVT_X_F">;
4227defm "" : VPatConversionVI_WF<"int_riscv_vfncvt_rtz_xu_f_w", "PseudoVFNCVT_RTZ_XU_F">;
4228defm "" : VPatConversionVI_WF<"int_riscv_vfncvt_rtz_x_f_w", "PseudoVFNCVT_RTZ_X_F">;
4229defm "" : VPatConversionVF_WI <"int_riscv_vfncvt_f_xu_w", "PseudoVFNCVT_F_XU">;
4230defm "" : VPatConversionVF_WI <"int_riscv_vfncvt_f_x_w", "PseudoVFNCVT_F_X">;
4231defm "" : VPatConversionVF_WF<"int_riscv_vfncvt_f_f_w", "PseudoVFNCVT_F_F">;
4232defm "" : VPatConversionVF_WF<"int_riscv_vfncvt_rod_f_f_w", "PseudoVFNCVT_ROD_F_F">;
4233} // Predicates = [HasStdExtV, HasStdExtF]
4234
4235let Predicates = [HasStdExtV] in {
4236//===----------------------------------------------------------------------===//
4237// 15.1. Vector Single-Width Integer Reduction Instructions
4238//===----------------------------------------------------------------------===//
4239defm "" : VPatReductionV_VS<"int_riscv_vredsum", "PseudoVREDSUM">;
4240defm "" : VPatReductionV_VS<"int_riscv_vredand", "PseudoVREDAND">;
4241defm "" : VPatReductionV_VS<"int_riscv_vredor", "PseudoVREDOR">;
4242defm "" : VPatReductionV_VS<"int_riscv_vredxor", "PseudoVREDXOR">;
4243defm "" : VPatReductionV_VS<"int_riscv_vredminu", "PseudoVREDMINU">;
4244defm "" : VPatReductionV_VS<"int_riscv_vredmin", "PseudoVREDMIN">;
4245defm "" : VPatReductionV_VS<"int_riscv_vredmaxu", "PseudoVREDMAXU">;
4246defm "" : VPatReductionV_VS<"int_riscv_vredmax", "PseudoVREDMAX">;
4247
4248//===----------------------------------------------------------------------===//
4249// 15.2. Vector Widening Integer Reduction Instructions
4250//===----------------------------------------------------------------------===//
4251defm "" : VPatReductionW_VS<"int_riscv_vwredsumu", "PseudoVWREDSUMU">;
4252defm "" : VPatReductionW_VS<"int_riscv_vwredsum", "PseudoVWREDSUM">;
4253} // Predicates = [HasStdExtV]
4254
4255let Predicates = [HasStdExtV, HasStdExtF] in {
4256//===----------------------------------------------------------------------===//
4257// 15.3. Vector Single-Width Floating-Point Reduction Instructions
4258//===----------------------------------------------------------------------===//
4259defm "" : VPatReductionV_VS<"int_riscv_vfredosum", "PseudoVFREDOSUM", /*IsFloat=*/1>;
4260defm "" : VPatReductionV_VS<"int_riscv_vfredsum", "PseudoVFREDSUM", /*IsFloat=*/1>;
4261defm "" : VPatReductionV_VS<"int_riscv_vfredmin", "PseudoVFREDMIN", /*IsFloat=*/1>;
4262defm "" : VPatReductionV_VS<"int_riscv_vfredmax", "PseudoVFREDMAX", /*IsFloat=*/1>;
4263
4264//===----------------------------------------------------------------------===//
4265// 15.4. Vector Widening Floating-Point Reduction Instructions
4266//===----------------------------------------------------------------------===//
4267defm "" : VPatReductionW_VS<"int_riscv_vfwredsum", "PseudoVFWREDSUM", /*IsFloat=*/1>;
4268defm "" : VPatReductionW_VS<"int_riscv_vfwredosum", "PseudoVFWREDOSUM", /*IsFloat=*/1>;
4269
4270} // Predicates = [HasStdExtV, HasStdExtF]
4271
4272//===----------------------------------------------------------------------===//
4273// 16. Vector Mask Instructions
4274//===----------------------------------------------------------------------===//
4275
4276let Predicates = [HasStdExtV] in {
4277//===----------------------------------------------------------------------===//
4278// 16.1 Vector Mask-Register Logical Instructions
4279//===----------------------------------------------------------------------===//
4280defm "" : VPatBinaryM_MM<"int_riscv_vmand", "PseudoVMAND">;
4281defm "" : VPatBinaryM_MM<"int_riscv_vmnand", "PseudoVMNAND">;
4282defm "" : VPatBinaryM_MM<"int_riscv_vmandnot", "PseudoVMANDNOT">;
4283defm "" : VPatBinaryM_MM<"int_riscv_vmxor", "PseudoVMXOR">;
4284defm "" : VPatBinaryM_MM<"int_riscv_vmor", "PseudoVMOR">;
4285defm "" : VPatBinaryM_MM<"int_riscv_vmnor", "PseudoVMNOR">;
4286defm "" : VPatBinaryM_MM<"int_riscv_vmornot", "PseudoVMORNOT">;
4287defm "" : VPatBinaryM_MM<"int_riscv_vmxnor", "PseudoVMXNOR">;
4288
4289// pseudo instructions
4290defm "" : VPatNullaryM<"int_riscv_vmclr", "PseudoVMCLR">;
4291defm "" : VPatNullaryM<"int_riscv_vmset", "PseudoVMSET">;
4292
4293//===----------------------------------------------------------------------===//
4294// 16.2. Vector mask population count vpopc
4295//===----------------------------------------------------------------------===//
4296defm "" : VPatUnaryS_M<"int_riscv_vpopc", "PseudoVPOPC">;
4297
4298//===----------------------------------------------------------------------===//
4299// 16.3. vfirst find-first-set mask bit
4300//===----------------------------------------------------------------------===//
4301defm "" : VPatUnaryS_M<"int_riscv_vfirst", "PseudoVFIRST">;
4302
4303//===----------------------------------------------------------------------===//
4304// 16.4. vmsbf.m set-before-first mask bit
4305//===----------------------------------------------------------------------===//
4306defm "" : VPatUnaryM_M<"int_riscv_vmsbf", "PseudoVMSBF">;
4307
4308//===----------------------------------------------------------------------===//
4309// 16.5. vmsif.m set-including-first mask bit
4310//===----------------------------------------------------------------------===//
4311defm "" : VPatUnaryM_M<"int_riscv_vmsif", "PseudoVMSIF">;
4312
4313//===----------------------------------------------------------------------===//
4314// 16.6. vmsof.m set-only-first mask bit
4315//===----------------------------------------------------------------------===//
4316defm "" : VPatUnaryM_M<"int_riscv_vmsof", "PseudoVMSOF">;
4317
4318//===----------------------------------------------------------------------===//
4319// 16.8.  Vector Iota Instruction
4320//===----------------------------------------------------------------------===//
4321defm "" : VPatUnaryV_M<"int_riscv_viota", "PseudoVIOTA">;
4322
4323//===----------------------------------------------------------------------===//
4324// 16.9. Vector Element Index Instruction
4325//===----------------------------------------------------------------------===//
4326defm "" : VPatNullaryV<"int_riscv_vid", "PseudoVID">;
4327
4328} // Predicates = [HasStdExtV]
4329
4330//===----------------------------------------------------------------------===//
4331// 17. Vector Permutation Instructions
4332//===----------------------------------------------------------------------===//
4333
4334//===----------------------------------------------------------------------===//
4335// 17.1. Integer Scalar Move Instructions
4336//===----------------------------------------------------------------------===//
4337
4338let Predicates = [HasStdExtV] in {
4339foreach vti = AllIntegerVectors in {
4340  def : Pat<(riscv_vmv_x_s (vti.Vector vti.RegClass:$rs2)),
4341            (!cast<Instruction>("PseudoVMV_X_S_" # vti.LMul.MX) $rs2, vti.SEW)>;
4342  def : Pat<(vti.Vector (int_riscv_vmv_s_x (vti.Vector vti.RegClass:$rs1),
4343                                           GPR:$rs2, (XLenVT (VLOp GPR:$vl)))),
4344            (!cast<Instruction>("PseudoVMV_S_X_" # vti.LMul.MX)
4345             (vti.Vector $rs1), $rs2, GPR:$vl, vti.SEW)>;
4346}
4347} // Predicates = [HasStdExtV]
4348
4349//===----------------------------------------------------------------------===//
4350// 17.2. Floating-Point Scalar Move Instructions
4351//===----------------------------------------------------------------------===//
4352
4353let Predicates = [HasStdExtV, HasStdExtF] in {
4354foreach fvti = AllFloatVectors in {
4355  defvar instr = !cast<Instruction>("PseudoVFMV_"#fvti.ScalarSuffix#"_S_" #
4356                                    fvti.LMul.MX);
4357  def : Pat<(fvti.Scalar (int_riscv_vfmv_f_s (fvti.Vector fvti.RegClass:$rs2))),
4358                         (instr $rs2, fvti.SEW)>;
4359
4360  def : Pat<(fvti.Vector (int_riscv_vfmv_s_f (fvti.Vector fvti.RegClass:$rs1),
4361                         (fvti.Scalar fvti.ScalarRegClass:$rs2), (XLenVT (VLOp GPR:$vl)))),
4362            (!cast<Instruction>("PseudoVFMV_S_"#fvti.ScalarSuffix#"_" #
4363                                fvti.LMul.MX)
4364             (fvti.Vector $rs1),
4365             (fvti.Scalar fvti.ScalarRegClass:$rs2),
4366             GPR:$vl, fvti.SEW)>;
4367}
4368} // Predicates = [HasStdExtV, HasStdExtF]
4369
4370//===----------------------------------------------------------------------===//
4371// 17.3. Vector Slide Instructions
4372//===----------------------------------------------------------------------===//
4373let Predicates = [HasStdExtV] in {
4374  defm "" : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllIntegerVectors, uimm5>;
4375  defm "" : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllIntegerVectors, uimm5>;
4376  defm "" : VPatBinaryV_VX<"int_riscv_vslide1up", "PseudoVSLIDE1UP", AllIntegerVectors>;
4377  defm "" : VPatBinaryV_VX<"int_riscv_vslide1down", "PseudoVSLIDE1DOWN", AllIntegerVectors>;
4378} // Predicates = [HasStdExtV]
4379
4380let Predicates = [HasStdExtV, HasStdExtF] in {
4381  defm "" : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllFloatVectors, uimm5>;
4382  defm "" : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllFloatVectors, uimm5>;
4383  defm "" : VPatBinaryV_VX<"int_riscv_vfslide1up", "PseudoVFSLIDE1UP", AllFloatVectors>;
4384  defm "" : VPatBinaryV_VX<"int_riscv_vfslide1down", "PseudoVFSLIDE1DOWN", AllFloatVectors>;
4385} // Predicates = [HasStdExtV, HasStdExtF]
4386
4387//===----------------------------------------------------------------------===//
4388// 17.4. Vector Register Gather Instructions
4389//===----------------------------------------------------------------------===//
4390let Predicates = [HasStdExtV] in {
4391  defm "" : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER",
4392                                     AllIntegerVectors, uimm5>;
4393  defm "" : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16", "PseudoVRGATHEREI16",
4394                                   /* eew */ 16, AllIntegerVectors>;
4395} // Predicates = [HasStdExtV]
4396
4397let Predicates = [HasStdExtV, HasStdExtF] in {
4398  defm "" : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER",
4399                                     AllFloatVectors, uimm5>;
4400  defm "" : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16", "PseudoVRGATHEREI16",
4401                                   /* eew */ 16, AllFloatVectors>;
4402} // Predicates = [HasStdExtV, HasStdExtF]
4403
4404//===----------------------------------------------------------------------===//
4405// 17.5. Vector Compress Instruction
4406//===----------------------------------------------------------------------===//
4407let Predicates = [HasStdExtV] in {
4408  defm "" : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllIntegerVectors>;
4409} // Predicates = [HasStdExtV]
4410
4411let Predicates = [HasStdExtV, HasStdExtF] in {
4412  defm "" : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectors>;
4413} // Predicates = [HasStdExtV, HasStdExtF]
4414
4415// Include the non-intrinsic ISel patterns
4416include "RISCVInstrInfoVSDPatterns.td"
4417