1//===-- RISCVInstrInfoV.td - RISC-V 'V' instructions -------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// This file describes the RISC-V instructions from the standard 'V' Vector
10/// extension, version 1.0.
11///
12//===----------------------------------------------------------------------===//
13
14include "RISCVInstrFormatsV.td"
15
16//===----------------------------------------------------------------------===//
17// Operand and SDNode transformation definitions.
18//===----------------------------------------------------------------------===//
19
20class VTypeIAsmOperand<int VTypeINum> : AsmOperandClass {
21  let Name = "VTypeI" # VTypeINum;
22  let ParserMethod = "parseVTypeI";
23  let DiagnosticType = "InvalidVTypeI";
24  let RenderMethod = "addVTypeIOperands";
25}
26
27class VTypeIOp<int VTypeINum> : Operand<XLenVT> {
28  let ParserMatchClass = VTypeIAsmOperand<VTypeINum>;
29  let PrintMethod = "printVTypeI";
30  let DecoderMethod = "decodeUImmOperand<"#VTypeINum#">";
31}
32
33def VTypeIOp10 : VTypeIOp<10>;
34def VTypeIOp11 : VTypeIOp<11>;
35
36def VMaskAsmOperand : AsmOperandClass {
37  let Name = "RVVMaskRegOpOperand";
38  let RenderMethod = "addRegOperands";
39  let PredicateMethod = "isV0Reg";
40  let ParserMethod = "parseMaskReg";
41  let IsOptional = 1;
42  let DefaultMethod = "defaultMaskRegOp";
43  let DiagnosticType = "InvalidVMaskRegister";
44}
45
46def VMaskOp : RegisterOperand<VMV0> {
47  let ParserMatchClass = VMaskAsmOperand;
48  let PrintMethod = "printVMaskReg";
49  let EncoderMethod = "getVMaskReg";
50  let DecoderMethod = "decodeVMaskReg";
51}
52
53def simm5 : Operand<XLenVT>, ImmLeaf<XLenVT, [{return isInt<5>(Imm);}]> {
54  let ParserMatchClass = SImmAsmOperand<5>;
55  let EncoderMethod = "getImmOpValue";
56  let DecoderMethod = "decodeSImmOperand<5>";
57  let MCOperandPredicate = [{
58    int64_t Imm;
59    if (MCOp.evaluateAsConstantImm(Imm))
60      return isInt<5>(Imm);
61    return MCOp.isBareSymbolRef();
62  }];
63}
64
65def SImm5Plus1AsmOperand : AsmOperandClass {
66  let Name = "SImm5Plus1";
67  let RenderMethod = "addImmOperands";
68  let DiagnosticType = "InvalidSImm5Plus1";
69}
70
71def simm5_plus1 : Operand<XLenVT>, ImmLeaf<XLenVT,
72  [{return (isInt<5>(Imm) && Imm != -16) || Imm == 16;}]> {
73  let ParserMatchClass = SImm5Plus1AsmOperand;
74  let MCOperandPredicate = [{
75    int64_t Imm;
76    if (MCOp.evaluateAsConstantImm(Imm))
77      return (isInt<5>(Imm) && Imm != -16) || Imm == 16;
78    return MCOp.isBareSymbolRef();
79  }];
80}
81
82def simm5_plus1_nonzero : ImmLeaf<XLenVT,
83  [{return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);}]>;
84
85//===----------------------------------------------------------------------===//
86// Scheduling definitions.
87//===----------------------------------------------------------------------===//
88
89class VMVRSched<int n>: Sched <[!cast<SchedReadWrite>("WriteVMov" # n # "V"),
90                                !cast<SchedReadWrite>("ReadVMov" # n # "V")]>;
91
92class VLESched<int n> : Sched <[!cast<SchedReadWrite>("WriteVLDE" # n),
93                                ReadVLDX, ReadVMask]>;
94
95class VSESched<int n> : Sched <[!cast<SchedReadWrite>("WriteVSTE" # n),
96                                !cast<SchedReadWrite>("ReadVSTE" # n # "V"),
97                                ReadVSTX, ReadVMask]>;
98
99class VLSSched<int n> : Sched <[!cast<SchedReadWrite>("WriteVLDS" # n),
100                                ReadVLDX, ReadVLDSX, ReadVMask]>;
101
102class VSSSched<int n> : Sched <[!cast<SchedReadWrite>("WriteVSTS" # n),
103                                !cast<SchedReadWrite>("ReadVSTS" # n # "V"),
104                                ReadVSTX, ReadVSTSX, ReadVMask]>;
105
106class VLXSched<int n, string o> :
107  Sched <[!cast<SchedReadWrite>("WriteVLD" # o # "X" # n),
108          ReadVLDX, !cast<SchedReadWrite>("ReadVLD" # o # "XV"), ReadVMask]>;
109
110class VSXSched<int n, string o> :
111  Sched <[!cast<SchedReadWrite>("WriteVST" # o # "X" # n),
112          !cast<SchedReadWrite>("ReadVST" # o # "X" # n),
113          ReadVSTX, !cast<SchedReadWrite>("ReadVST" # o # "XV"), ReadVMask]>;
114
115class VLFSched<int n> : Sched <[!cast<SchedReadWrite>("WriteVLDFF" # n),
116                                ReadVLDX, ReadVMask]>;
117
118// Unit-Stride Segment Loads and Stores
119class VLSEGSched<int nf, int eew> : Sched<[
120  !cast<SchedReadWrite>("WriteVLSEG" #nf #"e" #eew), ReadVLDX, ReadVMask]>;
121class VSSEGSched<int nf, int eew> : Sched<[
122  !cast<SchedReadWrite>("WriteVSSEG" #nf #"e" #eew),
123  !cast<SchedReadWrite>("ReadVSTE" #eew #"V"), ReadVSTX, ReadVMask]>;
124class VLSEGFFSched<int nf, int eew> : Sched<[
125  !cast<SchedReadWrite>("WriteVLSEGFF" #nf #"e" #eew), ReadVLDX, ReadVMask]>;
126// Strided Segment Loads and Stores
127class VLSSEGSched<int nf, int eew> : Sched<[
128  !cast<SchedReadWrite>("WriteVLSSEG" #nf #"e" #eew), ReadVLDX, ReadVLDSX,
129  ReadVMask]>;
130class VSSSEGSched<int nf, int eew> : Sched<[
131  !cast<SchedReadWrite>("WriteVSSSEG" #nf #"e" #eew),
132  !cast<SchedReadWrite>("ReadVSTS" #eew #"V"), ReadVSTX, ReadVSTSX, ReadVMask]>;
133// Indexed Segment Loads and Stores
134class VLUXSEGSched<int nf, int eew> : Sched<[
135  !cast<SchedReadWrite>("WriteVLUXSEG" #nf #"e" #eew), ReadVLDX, ReadVLDUXV,
136  ReadVMask]>;
137class VLOXSEGSched<int nf, int eew> : Sched<[
138  !cast<SchedReadWrite>("WriteVLOXSEG" #nf #"e" #eew), ReadVLDX, ReadVLDOXV,
139  ReadVMask]>;
140class VSUXSEGSched<int nf, int eew> : Sched<[
141  !cast<SchedReadWrite>("WriteVSUXSEG" #nf #"e" #eew),
142  !cast<SchedReadWrite>("ReadVSTUX" #eew), ReadVSTX, ReadVSTUXV, ReadVMask]>;
143class VSOXSEGSched<int nf, int eew> : Sched<[
144  !cast<SchedReadWrite>("WriteVSOXSEG" #nf #"e" #eew),
145  !cast<SchedReadWrite>("ReadVSTOX" #eew), ReadVSTX, ReadVSTOXV, ReadVMask]>;
146
147//===----------------------------------------------------------------------===//
148// Instruction class templates
149//===----------------------------------------------------------------------===//
150
151let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in {
152// unit-stride load vd, (rs1), vm
153class VUnitStrideLoad<RISCVWidth width, string opcodestr>
154    : RVInstVLU<0b000, width.Value{3}, LUMOPUnitStride, width.Value{2-0},
155                (outs VR:$vd),
156                (ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
157
158let vm = 1, RVVConstraint = NoConstraint in {
159// unit-stride whole register load vl<nf>r.v vd, (rs1)
160class VWholeLoad<bits<3> nf, RISCVWidth width, string opcodestr, RegisterClass VRC>
161    : RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideWholeReg,
162                width.Value{2-0}, (outs VRC:$vd), (ins GPR:$rs1),
163                opcodestr, "$vd, (${rs1})"> {
164  let Uses = [];
165}
166
167// unit-stride mask load vd, (rs1)
168class VUnitStrideLoadMask<string opcodestr>
169    : RVInstVLU<0b000, LSWidth8.Value{3}, LUMOPUnitStrideMask, LSWidth8.Value{2-0},
170                (outs VR:$vd),
171                (ins GPR:$rs1), opcodestr, "$vd, (${rs1})">;
172} // vm = 1, RVVConstraint = NoConstraint
173
174// unit-stride fault-only-first load vd, (rs1), vm
175class VUnitStrideLoadFF<RISCVWidth width, string opcodestr>
176    : RVInstVLU<0b000, width.Value{3}, LUMOPUnitStrideFF, width.Value{2-0},
177                (outs VR:$vd),
178                (ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
179
180// strided load vd, (rs1), rs2, vm
181class VStridedLoad<RISCVWidth width, string opcodestr>
182    : RVInstVLS<0b000, width.Value{3}, width.Value{2-0},
183                (outs VR:$vd),
184                (ins GPR:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr,
185                "$vd, (${rs1}), $rs2$vm">;
186
187// indexed load vd, (rs1), vs2, vm
188class VIndexedLoad<RISCVMOP mop, RISCVWidth width, string opcodestr>
189    : RVInstVLX<0b000, width.Value{3}, mop, width.Value{2-0},
190                (outs VR:$vd),
191                (ins GPR:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr,
192                "$vd, (${rs1}), $vs2$vm">;
193
194// unit-stride segment load vd, (rs1), vm
195class VUnitStrideSegmentLoad<bits<3> nf, RISCVWidth width, string opcodestr>
196    : RVInstVLU<nf, width.Value{3}, LUMOPUnitStride, width.Value{2-0},
197                (outs VR:$vd),
198                (ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
199
200// segment fault-only-first load vd, (rs1), vm
201class VUnitStrideSegmentLoadFF<bits<3> nf, RISCVWidth width, string opcodestr>
202    : RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideFF, width.Value{2-0},
203                (outs VR:$vd),
204                (ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
205
206// strided segment load vd, (rs1), rs2, vm
207class VStridedSegmentLoad<bits<3> nf, RISCVWidth width, string opcodestr>
208    : RVInstVLS<nf, width.Value{3}, width.Value{2-0},
209                (outs VR:$vd),
210                (ins GPR:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr,
211                "$vd, (${rs1}), $rs2$vm">;
212
213// indexed segment load vd, (rs1), vs2, vm
214class VIndexedSegmentLoad<bits<3> nf, RISCVMOP mop, RISCVWidth width,
215                          string opcodestr>
216    : RVInstVLX<nf, width.Value{3}, mop, width.Value{2-0},
217                (outs VR:$vd),
218                (ins GPR:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr,
219                "$vd, (${rs1}), $vs2$vm">;
220} // hasSideEffects = 0, mayLoad = 1, mayStore = 0
221
222let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in {
223// unit-stride store vd, vs3, (rs1), vm
224class VUnitStrideStore<RISCVWidth width, string opcodestr>
225    : RVInstVSU<0b000, width.Value{3}, SUMOPUnitStride, width.Value{2-0},
226                (outs), (ins VR:$vs3, GPR:$rs1, VMaskOp:$vm), opcodestr,
227                "$vs3, (${rs1})$vm">;
228
229let vm = 1 in {
230// vs<nf>r.v vd, (rs1)
231class VWholeStore<bits<3> nf, string opcodestr, RegisterClass VRC>
232    : RVInstVSU<nf, 0, SUMOPUnitStrideWholeReg,
233                0b000, (outs), (ins VRC:$vs3, GPR:$rs1),
234                opcodestr, "$vs3, (${rs1})"> {
235  let Uses = [];
236}
237
238// unit-stride mask store vd, vs3, (rs1)
239class VUnitStrideStoreMask<string opcodestr>
240    : RVInstVSU<0b000, LSWidth8.Value{3}, SUMOPUnitStrideMask, LSWidth8.Value{2-0},
241                (outs), (ins VR:$vs3, GPR:$rs1), opcodestr,
242                "$vs3, (${rs1})">;
243} // vm = 1
244
245// strided store vd, vs3, (rs1), rs2, vm
246class VStridedStore<RISCVWidth width, string opcodestr>
247    : RVInstVSS<0b000, width.Value{3}, width.Value{2-0}, (outs),
248                (ins VR:$vs3, GPR:$rs1, GPR:$rs2, VMaskOp:$vm),
249                opcodestr, "$vs3, (${rs1}), $rs2$vm">;
250
251// indexed store vd, vs3, (rs1), vs2, vm
252class VIndexedStore<RISCVMOP mop, RISCVWidth width, string opcodestr>
253    : RVInstVSX<0b000, width.Value{3}, mop, width.Value{2-0}, (outs),
254                (ins VR:$vs3, GPR:$rs1, VR:$vs2, VMaskOp:$vm),
255                opcodestr, "$vs3, (${rs1}), $vs2$vm">;
256
257// segment store vd, vs3, (rs1), vm
258class VUnitStrideSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr>
259    : RVInstVSU<nf, width.Value{3}, SUMOPUnitStride, width.Value{2-0},
260                (outs), (ins VR:$vs3, GPR:$rs1, VMaskOp:$vm), opcodestr,
261                "$vs3, (${rs1})$vm">;
262
263// segment store vd, vs3, (rs1), rs2, vm
264class VStridedSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr>
265    : RVInstVSS<nf, width.Value{3}, width.Value{2-0}, (outs),
266                (ins VR:$vs3, GPR:$rs1, GPR:$rs2, VMaskOp:$vm),
267                opcodestr, "$vs3, (${rs1}), $rs2$vm">;
268
269// segment store vd, vs3, (rs1), vs2, vm
270class VIndexedSegmentStore<bits<3> nf, RISCVMOP mop, RISCVWidth width,
271                           string opcodestr>
272    : RVInstVSX<nf, width.Value{3}, mop, width.Value{2-0}, (outs),
273                (ins VR:$vs3, GPR:$rs1, VR:$vs2, VMaskOp:$vm),
274                opcodestr, "$vs3, (${rs1}), $vs2$vm">;
275} // hasSideEffects = 0, mayLoad = 0, mayStore = 1
276
277let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
278// op vd, vs2, vs1, vm
279class VALUVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
280    : RVInstVV<funct6, opv, (outs VR:$vd),
281                (ins VR:$vs2, VR:$vs1, VMaskOp:$vm),
282                opcodestr, "$vd, $vs2, $vs1$vm">;
283
284// op vd, vs2, vs1, v0 (without mask, use v0 as carry input)
285class VALUmVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
286    : RVInstVV<funct6, opv, (outs VR:$vd),
287                (ins VR:$vs2, VR:$vs1, VMV0:$v0),
288                opcodestr, "$vd, $vs2, $vs1, v0"> {
289  let vm = 0;
290}
291
292// op vd, vs1, vs2, vm (reverse the order of vs1 and vs2)
293class VALUrVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
294    : RVInstVV<funct6, opv, (outs VR:$vd),
295                (ins VR:$vs1, VR:$vs2, VMaskOp:$vm),
296                opcodestr, "$vd, $vs1, $vs2$vm">;
297
298// op vd, vs2, vs1
299class VALUVVNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr>
300    : RVInstVV<funct6, opv, (outs VR:$vd),
301               (ins VR:$vs2, VR:$vs1),
302               opcodestr, "$vd, $vs2, $vs1"> {
303  let vm = 1;
304}
305
306// op vd, vs2, rs1, vm
307class VALUVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
308    : RVInstVX<funct6, opv, (outs VR:$vd),
309                (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
310                opcodestr, "$vd, $vs2, $rs1$vm">;
311
312// op vd, vs2, rs1, v0 (without mask, use v0 as carry input)
313class VALUmVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
314    : RVInstVX<funct6, opv, (outs VR:$vd),
315                (ins VR:$vs2, GPR:$rs1, VMV0:$v0),
316                opcodestr, "$vd, $vs2, $rs1, v0"> {
317  let vm = 0;
318}
319
320// op vd, rs1, vs2, vm (reverse the order of rs1 and vs2)
321class VALUrVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
322    : RVInstVX<funct6, opv, (outs VR:$vd),
323                (ins GPR:$rs1, VR:$vs2, VMaskOp:$vm),
324                opcodestr, "$vd, $rs1, $vs2$vm">;
325
326// op vd, vs1, vs2
327class VALUVXNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr>
328    : RVInstVX<funct6, opv, (outs VR:$vd),
329               (ins VR:$vs2, GPR:$rs1),
330               opcodestr, "$vd, $vs2, $rs1"> {
331  let vm = 1;
332}
333
334// op vd, vs2, imm, vm
335class VALUVI<bits<6> funct6, string opcodestr, Operand optype = simm5>
336    : RVInstIVI<funct6, (outs VR:$vd),
337                (ins VR:$vs2, optype:$imm, VMaskOp:$vm),
338                opcodestr, "$vd, $vs2, $imm$vm">;
339
340// op vd, vs2, imm, v0 (without mask, use v0 as carry input)
341class VALUmVI<bits<6> funct6, string opcodestr, Operand optype = simm5>
342    : RVInstIVI<funct6, (outs VR:$vd),
343                (ins VR:$vs2, optype:$imm, VMV0:$v0),
344                opcodestr, "$vd, $vs2, $imm, v0"> {
345  let vm = 0;
346}
347
348// op vd, vs2, imm, vm
349class VALUVINoVm<bits<6> funct6, string opcodestr, Operand optype = simm5>
350    : RVInstIVI<funct6, (outs VR:$vd),
351                (ins VR:$vs2, optype:$imm),
352                opcodestr, "$vd, $vs2, $imm"> {
353  let vm = 1;
354}
355
356// op vd, vs2, rs1, vm (Float)
357class VALUVF<bits<6> funct6, RISCVVFormat opv, string opcodestr>
358    : RVInstVX<funct6, opv, (outs VR:$vd),
359                (ins VR:$vs2, FPR32:$rs1, VMaskOp:$vm),
360                opcodestr, "$vd, $vs2, $rs1$vm">;
361
362// op vd, rs1, vs2, vm (Float) (with mask, reverse the order of rs1 and vs2)
363class VALUrVF<bits<6> funct6, RISCVVFormat opv, string opcodestr>
364    : RVInstVX<funct6, opv, (outs VR:$vd),
365                (ins FPR32:$rs1, VR:$vs2, VMaskOp:$vm),
366                opcodestr, "$vd, $rs1, $vs2$vm">;
367
368// op vd, vs2, vm (use vs1 as instruction encoding)
369class VALUVs2<bits<6> funct6, bits<5> vs1, RISCVVFormat opv, string opcodestr>
370    : RVInstV<funct6, vs1, opv, (outs VR:$vd),
371               (ins VR:$vs2, VMaskOp:$vm),
372               opcodestr, "$vd, $vs2$vm">;
373} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
374
375//===----------------------------------------------------------------------===//
376// Combination of instruction classes.
377// Use these multiclasses to define instructions more easily.
378//===----------------------------------------------------------------------===//
379
380multiclass VIndexLoadStore<list<int> EEWList> {
381  foreach n = EEWList in {
382    defvar w = !cast<RISCVWidth>("LSWidth" # n);
383
384    def VLUXEI # n # _V :
385      VIndexedLoad<MOPLDIndexedUnord, w, "vluxei" # n # ".v">,
386      VLXSched<n, "U">;
387    def VLOXEI # n # _V :
388      VIndexedLoad<MOPLDIndexedOrder, w, "vloxei" # n # ".v">,
389      VLXSched<n, "O">;
390
391    def VSUXEI # n # _V :
392      VIndexedStore<MOPSTIndexedUnord, w, "vsuxei" # n # ".v">,
393      VSXSched<n, "U">;
394    def VSOXEI # n # _V :
395      VIndexedStore<MOPSTIndexedOrder, w, "vsoxei" # n # ".v">,
396      VSXSched<n, "O">;
397  }
398}
399
400multiclass VALU_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
401  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
402           Sched<[WriteVIALUV, ReadVIALUV, ReadVIALUV, ReadVMask]>;
403  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
404           Sched<[WriteVIALUX, ReadVIALUV, ReadVIALUX, ReadVMask]>;
405  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
406           Sched<[WriteVIALUI, ReadVIALUV, ReadVMask]>;
407}
408
409multiclass VALU_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
410  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
411           Sched<[WriteVIALUV, ReadVIALUV, ReadVIALUV, ReadVMask]>;
412  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
413           Sched<[WriteVIALUX, ReadVIALUV, ReadVIALUX, ReadVMask]>;
414}
415
416multiclass VALU_IV_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
417  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
418           Sched<[WriteVIALUV, ReadVIALUV, ReadVIALUX, ReadVMask]>;
419  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
420           Sched<[WriteVIALUI, ReadVIALUV, ReadVMask]>;
421}
422
423multiclass VALU_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
424  def V  : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
425           Sched<[WriteVIWALUV, ReadVIWALUV, ReadVIWALUV, ReadVMask]>;
426  def X  : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
427           Sched<[WriteVIWALUX, ReadVIWALUV, ReadVIWALUX, ReadVMask]>;
428}
429
430multiclass VMAC_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
431  def V : VALUrVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
432          Sched<[WriteVIMulAddV, ReadVIMulAddV, ReadVIMulAddV, ReadVMask]>;
433  def X : VALUrVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
434          Sched<[WriteVIMulAddX, ReadVIMulAddV, ReadVIMulAddX, ReadVMask]>;
435}
436
437multiclass VWMAC_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
438  def V : VALUrVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
439          Sched<[WriteVIWMulAddV, ReadVIWMulAddV, ReadVIWMulAddV, ReadVMask]>;
440  def X : VALUrVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
441          Sched<[WriteVIWMulAddX, ReadVIWMulAddV, ReadVIWMulAddX, ReadVMask]>;
442}
443
444multiclass VWMAC_MV_X<string opcodestr, bits<6> funct6, string vw = "v"> {
445  def X : VALUrVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
446          Sched<[WriteVIWMulAddX, ReadVIWMulAddV, ReadVIWMulAddX, ReadVMask]>;
447}
448
449multiclass VALU_MV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
450  def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>,
451           Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
452}
453
454multiclass VALUm_IV_V_X_I<string opcodestr, bits<6> funct6> {
455  def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">,
456           Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV, ReadVMask]>;
457  def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">,
458           Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX, ReadVMask]>;
459  def IM : VALUmVI<funct6, opcodestr # ".vim">,
460           Sched<[WriteVICALUI, ReadVIALUCV, ReadVMask]>;
461}
462
463multiclass VMRG_IV_V_X_I<string opcodestr, bits<6> funct6> {
464  def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">,
465           Sched<[WriteVIMergeV, ReadVIMergeV, ReadVIMergeV, ReadVMask]>;
466  def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">,
467           Sched<[WriteVIMergeX, ReadVIMergeV, ReadVIMergeX, ReadVMask]>;
468  def IM : VALUmVI<funct6, opcodestr # ".vim">,
469           Sched<[WriteVIMergeI, ReadVIMergeV, ReadVMask]>;
470}
471
472multiclass VALUm_IV_V_X<string opcodestr, bits<6> funct6> {
473  def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">,
474           Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV, ReadVMask]>;
475  def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">,
476           Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX, ReadVMask]>;
477}
478
479multiclass VALUNoVm_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5> {
480  def V : VALUVVNoVm<funct6, OPIVV, opcodestr # ".vv">,
481          Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV]>;
482  def X : VALUVXNoVm<funct6, OPIVX, opcodestr # ".vx">,
483          Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX]>;
484  def I : VALUVINoVm<funct6, opcodestr # ".vi", optype>,
485          Sched<[WriteVICALUI, ReadVIALUCV]>;
486}
487
488multiclass VALUNoVm_IV_V_X<string opcodestr, bits<6> funct6> {
489  def V : VALUVVNoVm<funct6, OPIVV, opcodestr # ".vv">,
490          Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV]>;
491  def X : VALUVXNoVm<funct6, OPIVX, opcodestr # ".vx">,
492          Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX]>;
493}
494
495multiclass VALU_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
496  def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
497          Sched<[WriteVFALUV, ReadVFALUV, ReadVFALUV, ReadVMask]>;
498  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
499          Sched<[WriteVFALUF, ReadVFALUV, ReadVFALUF, ReadVMask]>;
500}
501
502multiclass VALU_FV_F<string opcodestr, bits<6> funct6, string vw = "v"> {
503  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
504          Sched<[WriteVFALUF, ReadVFALUV, ReadVFALUF, ReadVMask]>;
505}
506
507multiclass VWALU_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
508  def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
509          Sched<[WriteVFWALUV, ReadVFWALUV, ReadVFWALUV, ReadVMask]>;
510  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
511          Sched<[WriteVFWALUF, ReadVFWALUV, ReadVFWALUF, ReadVMask]>;
512}
513
514multiclass VMUL_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
515  def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
516          Sched<[WriteVFMulV, ReadVFMulV, ReadVFMulV, ReadVMask]>;
517  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
518          Sched<[WriteVFMulF, ReadVFMulV, ReadVFMulF, ReadVMask]>;
519}
520
521multiclass VDIV_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
522  def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
523          Sched<[WriteVFDivV, ReadVFDivV, ReadVFDivV, ReadVMask]>;
524  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
525          Sched<[WriteVFDivF, ReadVFDivV, ReadVFDivF, ReadVMask]>;
526}
527
528multiclass VRDIV_FV_F<string opcodestr, bits<6> funct6, string vw = "v"> {
529  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
530          Sched<[WriteVFDivF, ReadVFDivV, ReadVFDivF, ReadVMask]>;
531}
532
533multiclass VWMUL_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
534  def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
535          Sched<[WriteVFWMulV, ReadVFWMulV, ReadVFWMulV, ReadVMask]>;
536  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
537          Sched<[WriteVFWMulF, ReadVFWMulV, ReadVFWMulF, ReadVMask]>;
538}
539
540multiclass VMAC_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
541  def V : VALUrVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
542          Sched<[WriteVFMulAddV, ReadVFMulAddV, ReadVFMulAddV, ReadVMask]>;
543  def F : VALUrVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
544          Sched<[WriteVFMulAddF, ReadVFMulAddV, ReadVFMulAddF, ReadVMask]>;
545}
546
547multiclass VWMAC_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
548  def V : VALUrVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
549          Sched<[WriteVFWMulAddV, ReadVFWMulAddV, ReadVFWMulAddV, ReadVMask]>;
550  def F : VALUrVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
551          Sched<[WriteVFWMulAddF, ReadVFWMulAddV, ReadVFWMulAddF, ReadVMask]>;
552}
553
554multiclass VSQR_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
555  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
556           Sched<[WriteVFSqrtV, ReadVFSqrtV, ReadVMask]>;
557}
558
559multiclass VRCP_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
560  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
561           Sched<[WriteVFRecpV, ReadVFRecpV, ReadVMask]>;
562}
563
564multiclass VCMP_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
565  def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
566          Sched<[WriteVFCmpV, ReadVFCmpV, ReadVFCmpV, ReadVMask]>;
567  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
568          Sched<[WriteVFCmpF, ReadVFCmpV, ReadVFCmpF, ReadVMask]>;
569}
570
571multiclass VCMP_FV_F<string opcodestr, bits<6> funct6, string vw = "v"> {
572  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
573          Sched<[WriteVFCmpF, ReadVFCmpV, ReadVFCmpF, ReadVMask]>;
574}
575
576multiclass VSGNJ_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
577  def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
578          Sched<[WriteVFSgnjV, ReadVFSgnjV, ReadVFSgnjV, ReadVMask]>;
579  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
580          Sched<[WriteVFSgnjF, ReadVFSgnjV, ReadVFSgnjF, ReadVMask]>;
581}
582
583multiclass VCLS_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
584  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
585           Sched<[WriteVFClassV, ReadVFClassV, ReadVMask]>;
586}
587
588multiclass VCVTF_IV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
589  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
590           Sched<[WriteVFCvtIToFV, ReadVFCvtIToFV, ReadVMask]>;
591}
592
593multiclass VCVTI_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
594  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
595           Sched<[WriteVFCvtFToIV, ReadVFCvtFToIV, ReadVMask]>;
596}
597
598multiclass VWCVTF_IV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
599  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
600           Sched<[WriteVFWCvtIToFV, ReadVFWCvtIToFV, ReadVMask]>;
601}
602
603multiclass VWCVTI_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
604  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
605           Sched<[WriteVFWCvtFToIV, ReadVFWCvtFToIV, ReadVMask]>;
606}
607
608multiclass VWCVTF_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
609  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
610           Sched<[WriteVFWCvtFToFV, ReadVFWCvtFToFV, ReadVMask]>;
611}
612
613multiclass VNCVTF_IV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
614  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
615           Sched<[WriteVFNCvtIToFV, ReadVFNCvtIToFV, ReadVMask]>;
616}
617
618multiclass VNCVTI_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
619  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
620           Sched<[WriteVFNCvtFToIV, ReadVFNCvtFToIV, ReadVMask]>;
621}
622
623multiclass VNCVTF_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
624  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
625           Sched<[WriteVFNCvtFToFV, ReadVFNCvtFToFV, ReadVMask]>;
626}
627
628multiclass VRED_MV_V<string opcodestr, bits<6> funct6> {
629  def _VS : VALUVV<funct6, OPMVV, opcodestr # ".vs">,
630            Sched<[WriteVIRedV, ReadVIRedV, ReadVIRedV0, ReadVMask]>;
631}
632
633multiclass VWRED_IV_V<string opcodestr, bits<6> funct6> {
634  def _VS : VALUVV<funct6, OPIVV, opcodestr # ".vs">,
635            Sched<[WriteVIWRedV, ReadVIWRedV, ReadVIWRedV0, ReadVMask]>;
636}
637
638multiclass VRED_FV_V<string opcodestr, bits<6> funct6> {
639  def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
640            Sched<[WriteVFRedV, ReadVFRedV, ReadVFRedV0, ReadVMask]>;
641}
642
643multiclass VREDO_FV_V<string opcodestr, bits<6> funct6> {
644  def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
645            Sched<[WriteVFRedOV, ReadVFRedOV, ReadVFRedOV0, ReadVMask]>;
646}
647
648multiclass VWRED_FV_V<string opcodestr, bits<6> funct6> {
649  def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
650            Sched<[WriteVFWRedV, ReadVFWRedV, ReadVFWRedV0, ReadVMask]>;
651}
652
653multiclass VWREDO_FV_V<string opcodestr, bits<6> funct6> {
654  def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
655            Sched<[WriteVFWRedOV, ReadVFWRedOV, ReadVFWRedOV0, ReadVMask]>;
656}
657
658multiclass VMALU_MV_Mask<string opcodestr, bits<6> funct6, string vm = "v"> {
659  def M : VALUVVNoVm<funct6, OPMVV, opcodestr # "." # vm # "m">,
660          Sched<[WriteVMALUV, ReadVMALUV, ReadVMALUV]>;
661}
662
663multiclass VMSFS_MV_V<string opcodestr, bits<6> funct6, bits<5> vs1> {
664  def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>,
665           Sched<[WriteVMSFSV, ReadVMSFSV, ReadVMask]>;
666}
667
668multiclass VMIOT_MV_V<string opcodestr, bits<6> funct6, bits<5> vs1> {
669  def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>,
670           Sched<[WriteVMIotV, ReadVMIotV, ReadVMask]>;
671}
672
673multiclass VSHT_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
674  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
675           Sched<[WriteVShiftV, ReadVShiftV, ReadVShiftV, ReadVMask]>;
676  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
677           Sched<[WriteVShiftX, ReadVShiftV, ReadVShiftX, ReadVMask]>;
678  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
679           Sched<[WriteVShiftI, ReadVShiftV, ReadVMask]>;
680}
681
682multiclass VNSHT_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
683  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
684           Sched<[WriteVNShiftV, ReadVNShiftV, ReadVNShiftV, ReadVMask]>;
685  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
686           Sched<[WriteVNShiftX, ReadVNShiftV, ReadVNShiftX, ReadVMask]>;
687  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
688           Sched<[WriteVNShiftI, ReadVNShiftV, ReadVMask]>;
689}
690
691multiclass VCMP_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
692  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
693           Sched<[WriteVICmpV, ReadVICmpV, ReadVICmpV, ReadVMask]>;
694  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
695           Sched<[WriteVICmpX, ReadVICmpV, ReadVICmpX, ReadVMask]>;
696  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
697           Sched<[WriteVICmpI, ReadVICmpV, ReadVMask]>;
698}
699
700multiclass VCMP_IV_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
701  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
702           Sched<[WriteVICmpV, ReadVICmpV, ReadVICmpX, ReadVMask]>;
703  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
704           Sched<[WriteVICmpI, ReadVICmpV, ReadVMask]>;
705}
706
707multiclass VCMP_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
708  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
709           Sched<[WriteVICmpV, ReadVICmpV, ReadVICmpV, ReadVMask]>;
710  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
711           Sched<[WriteVICmpX, ReadVICmpV, ReadVICmpX, ReadVMask]>;
712}
713
714multiclass VMUL_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
715  def V  : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
716           Sched<[WriteVIMulV, ReadVIMulV, ReadVIMulV, ReadVMask]>;
717  def X  : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
718           Sched<[WriteVIMulX, ReadVIMulV, ReadVIMulX, ReadVMask]>;
719}
720
721multiclass VWMUL_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
722  def V  : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
723           Sched<[WriteVIWMulV, ReadVIWMulV, ReadVIWMulV, ReadVMask]>;
724  def X  : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
725           Sched<[WriteVIWMulX, ReadVIWMulV, ReadVIWMulX, ReadVMask]>;
726}
727
728multiclass VDIV_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
729  def V  : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
730           Sched<[WriteVIDivV, ReadVIDivV, ReadVIDivV, ReadVMask]>;
731  def X  : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
732           Sched<[WriteVIDivX, ReadVIDivV, ReadVIDivX, ReadVMask]>;
733}
734
735multiclass VSALU_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
736  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
737           Sched<[WriteVSALUV, ReadVSALUV, ReadVSALUV, ReadVMask]>;
738  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
739           Sched<[WriteVSALUX, ReadVSALUV, ReadVSALUX, ReadVMask]>;
740  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
741           Sched<[WriteVSALUI, ReadVSALUV, ReadVMask]>;
742}
743
744multiclass VSALU_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
745  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
746           Sched<[WriteVSALUV, ReadVSALUV, ReadVSALUV, ReadVMask]>;
747  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
748           Sched<[WriteVSALUX, ReadVSALUV, ReadVSALUX, ReadVMask]>;
749}
750
751multiclass VAALU_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
752  def V  : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
753           Sched<[WriteVAALUV, ReadVAALUV, ReadVAALUV, ReadVMask]>;
754  def X  : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
755           Sched<[WriteVAALUX, ReadVAALUV, ReadVAALUX, ReadVMask]>;
756}
757
758multiclass VSMUL_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
759  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
760           Sched<[WriteVSMulV, ReadVSMulV, ReadVSMulV, ReadVMask]>;
761  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
762           Sched<[WriteVSMulX, ReadVSMulV, ReadVSMulX, ReadVMask]>;
763}
764
765multiclass VSSHF_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
766  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
767           Sched<[WriteVSShiftV, ReadVSShiftV, ReadVSShiftV, ReadVMask]>;
768  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
769           Sched<[WriteVSShiftX, ReadVSShiftV, ReadVSShiftX, ReadVMask]>;
770  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
771           Sched<[WriteVSShiftI, ReadVSShiftV, ReadVMask]>;
772}
773
774multiclass VNCLP_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
775  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
776           Sched<[WriteVNClipV, ReadVNClipV, ReadVNClipV, ReadVMask]>;
777  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
778           Sched<[WriteVNClipX, ReadVNClipV, ReadVNClipX, ReadVMask]>;
779  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
780           Sched<[WriteVNClipI, ReadVNClipV, ReadVMask]>;
781}
782
783multiclass VSLD_IV_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
784  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
785           Sched<[WriteVISlideX, ReadVISlideV, ReadVISlideX, ReadVMask]>;
786  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
787           Sched<[WriteVISlideI, ReadVISlideV, ReadVMask]>;
788}
789
790multiclass VSLD1_MV_X<string opcodestr, bits<6> funct6, string vw = "v"> {
791  def X  : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
792           Sched<[WriteVISlide1X, ReadVISlideV, ReadVISlideX, ReadVMask]>;
793}
794
795multiclass VSLD1_FV_F<string opcodestr, bits<6> funct6, string vw = "v"> {
796  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
797          Sched<[WriteVFSlide1F, ReadVFSlideV, ReadVFSlideF, ReadVMask]>;
798}
799
800multiclass VGTR_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
801  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
802           Sched<[WriteVGatherV, ReadVGatherV, ReadVGatherV, ReadVMask]>;
803  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
804           Sched<[WriteVGatherX, ReadVGatherV, ReadVGatherX, ReadVMask]>;
805  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
806           Sched<[WriteVGatherI, ReadVGatherV, ReadVMask]>;
807}
808
809multiclass VCPR_MV_Mask<string opcodestr, bits<6> funct6, string vm = "v"> {
810  def M  : VALUVVNoVm<funct6, OPMVV, opcodestr # "." # vm # "m">,
811           Sched<[WriteVCompressV, ReadVCompressV, ReadVCompressV]>;
812}
813
814multiclass VWholeLoadN<bits<3> nf, string opcodestr, RegisterClass VRC> {
815  foreach l = [8, 16, 32] in {
816    defvar w = !cast<RISCVWidth>("LSWidth" # l);
817    defvar s = !cast<SchedWrite>("WriteVLD" # !add(nf, 1) # "R" # l);
818
819    def E # l # _V : VWholeLoad<nf, w, opcodestr # "e" # l # ".v", VRC>,
820                     Sched<[s, ReadVLDX]>;
821  }
822}
823multiclass VWholeLoadEEW64<bits<3> nf, string opcodestr, RegisterClass VRC, SchedReadWrite schedrw> {
824  def E64_V : VWholeLoad<nf, LSWidth64, opcodestr # "e64.v", VRC>,
825              Sched<[schedrw, ReadVLDX]>;
826}
827
828//===----------------------------------------------------------------------===//
829// Instructions
830//===----------------------------------------------------------------------===//
831
832let Predicates = [HasVInstructions] in {
833let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in {
834def VSETVLI : RVInstSetVLi<(outs GPR:$rd), (ins GPR:$rs1, VTypeIOp11:$vtypei),
835                           "vsetvli", "$rd, $rs1, $vtypei">;
836
837def VSETIVLI : RVInstSetiVLi<(outs GPR:$rd), (ins uimm5:$uimm, VTypeIOp10:$vtypei),
838                             "vsetivli", "$rd, $uimm, $vtypei">;
839
840def VSETVL : RVInstSetVL<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2),
841                         "vsetvl", "$rd, $rs1, $rs2">;
842} // hasSideEffects = 1, mayLoad = 0, mayStore = 0
843foreach eew = [8, 16, 32] in {
844  defvar w = !cast<RISCVWidth>("LSWidth" # eew);
845
846  // Vector Unit-Stride Instructions
847  def VLE#eew#_V : VUnitStrideLoad<w, "vle"#eew#".v">, VLESched<eew>;
848  def VSE#eew#_V  : VUnitStrideStore<w,  "vse"#eew#".v">, VSESched<eew>;
849
850  // Vector Unit-Stride Fault-only-First Loads
851  def VLE#eew#FF_V : VUnitStrideLoadFF<w,  "vle"#eew#"ff.v">, VLFSched<eew>;
852
853  // Vector Strided Instructions
854  def VLSE#eew#_V  : VStridedLoad<w,  "vlse"#eew#".v">, VLSSched<eew>;
855  def VSSE#eew#_V  : VStridedStore<w,  "vsse"#eew#".v">, VSSSched<eew>;
856}
857
858defm "" : VIndexLoadStore<[8, 16, 32]>;
859} // Predicates = [HasVInstructions]
860
861let Predicates = [HasVInstructions] in {
862def VLM_V : VUnitStrideLoadMask<"vlm.v">,
863             Sched<[WriteVLDM, ReadVLDX]>;
864def VSM_V : VUnitStrideStoreMask<"vsm.v">,
865             Sched<[WriteVSTM, ReadVSTM, ReadVSTX]>;
866def : InstAlias<"vle1.v $vd, (${rs1})",
867                (VLM_V VR:$vd, GPR:$rs1), 0>;
868def : InstAlias<"vse1.v $vs3, (${rs1})",
869                (VSM_V VR:$vs3, GPR:$rs1), 0>;
870
871defm VL1R : VWholeLoadN<0, "vl1r", VR>;
872defm VL2R : VWholeLoadN<1, "vl2r", VRM2>;
873defm VL4R : VWholeLoadN<3, "vl4r", VRM4>;
874defm VL8R : VWholeLoadN<7, "vl8r", VRM8>;
875
876def VS1R_V : VWholeStore<0, "vs1r.v", VR>,
877             Sched<[WriteVST1R, ReadVST1R, ReadVSTX]>;
878def VS2R_V : VWholeStore<1, "vs2r.v", VRM2>,
879             Sched<[WriteVST2R, ReadVST2R, ReadVSTX]>;
880def VS4R_V : VWholeStore<3, "vs4r.v", VRM4>,
881             Sched<[WriteVST4R, ReadVST4R, ReadVSTX]>;
882def VS8R_V : VWholeStore<7, "vs8r.v", VRM8>,
883             Sched<[WriteVST8R, ReadVST8R, ReadVSTX]>;
884
885def : InstAlias<"vl1r.v $vd, (${rs1})", (VL1RE8_V VR:$vd, GPR:$rs1)>;
886def : InstAlias<"vl2r.v $vd, (${rs1})", (VL2RE8_V VRM2:$vd, GPR:$rs1)>;
887def : InstAlias<"vl4r.v $vd, (${rs1})", (VL4RE8_V VRM4:$vd, GPR:$rs1)>;
888def : InstAlias<"vl8r.v $vd, (${rs1})", (VL8RE8_V VRM8:$vd, GPR:$rs1)>;
889} // Predicates = [HasVInstructions]
890
891let Predicates = [HasVInstructionsI64] in {
892// Vector Unit-Stride Instructions
893def VLE64_V : VUnitStrideLoad<LSWidth64, "vle64.v">,
894              VLESched<64>;
895
896def VLE64FF_V : VUnitStrideLoadFF<LSWidth64, "vle64ff.v">,
897                VLFSched<64>;
898
899def VSE64_V : VUnitStrideStore<LSWidth64, "vse64.v">,
900              VSESched<64>;
901// Vector Strided Instructions
902def VLSE64_V : VStridedLoad<LSWidth64, "vlse64.v">,
903               VLSSched<32>;
904
905def VSSE64_V : VStridedStore<LSWidth64, "vsse64.v">,
906               VSSSched<64>;
907
908defm VL1R: VWholeLoadEEW64<0, "vl1r", VR, WriteVLD1R64>;
909defm VL2R: VWholeLoadEEW64<1, "vl2r", VRM2, WriteVLD2R64>;
910defm VL4R: VWholeLoadEEW64<3, "vl4r", VRM4, WriteVLD4R64>;
911defm VL8R: VWholeLoadEEW64<7, "vl8r", VRM8, WriteVLD8R64>;
912} // Predicates = [HasVInstructionsI64]
913let Predicates = [IsRV64, HasVInstructionsI64] in {
914  // Vector Indexed Instructions
915  defm "" : VIndexLoadStore<[64]>;
916} // [IsRV64, HasVInstructionsI64]
917
918let Predicates = [HasVInstructions] in {
919// Vector Single-Width Integer Add and Subtract
920defm VADD_V : VALU_IV_V_X_I<"vadd", 0b000000>;
921defm VSUB_V : VALU_IV_V_X<"vsub", 0b000010>;
922defm VRSUB_V : VALU_IV_X_I<"vrsub", 0b000011>;
923
924def : InstAlias<"vneg.v $vd, $vs$vm", (VRSUB_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
925def : InstAlias<"vneg.v $vd, $vs", (VRSUB_VX VR:$vd, VR:$vs, X0, zero_reg)>;
926
927// Vector Widening Integer Add/Subtract
928// Refer to 11.2 Widening Vector Arithmetic Instructions
929// The destination vector register group cannot overlap a source vector
930// register group of a different element width (including the mask register
931// if masked), otherwise an illegal instruction exception is raised.
932let Constraints = "@earlyclobber $vd" in {
933let RVVConstraint = WidenV in {
934defm VWADDU_V : VALU_MV_V_X<"vwaddu", 0b110000>;
935defm VWSUBU_V : VALU_MV_V_X<"vwsubu", 0b110010>;
936defm VWADD_V : VALU_MV_V_X<"vwadd", 0b110001>;
937defm VWSUB_V : VALU_MV_V_X<"vwsub", 0b110011>;
938} // RVVConstraint = WidenV
939// Set earlyclobber for following instructions for second and mask operands.
940// This has the downside that the earlyclobber constraint is too coarse and
941// will impose unnecessary restrictions by not allowing the destination to
942// overlap with the first (wide) operand.
943let RVVConstraint = WidenW in {
944defm VWADDU_W : VALU_MV_V_X<"vwaddu", 0b110100, "w">;
945defm VWSUBU_W : VALU_MV_V_X<"vwsubu", 0b110110, "w">;
946defm VWADD_W : VALU_MV_V_X<"vwadd", 0b110101, "w">;
947defm VWSUB_W : VALU_MV_V_X<"vwsub", 0b110111, "w">;
948} // RVVConstraint = WidenW
949} // Constraints = "@earlyclobber $vd"
950
951def : InstAlias<"vwcvt.x.x.v $vd, $vs$vm",
952                (VWADD_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
953def : InstAlias<"vwcvt.x.x.v $vd, $vs",
954                (VWADD_VX VR:$vd, VR:$vs, X0, zero_reg)>;
955def : InstAlias<"vwcvtu.x.x.v $vd, $vs$vm",
956                (VWADDU_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
957def : InstAlias<"vwcvtu.x.x.v $vd, $vs",
958                (VWADDU_VX VR:$vd, VR:$vs, X0, zero_reg)>;
959
960// Vector Integer Extension
961defm VZEXT_VF8 : VALU_MV_VS2<"vzext.vf8", 0b010010, 0b00010>;
962defm VSEXT_VF8 : VALU_MV_VS2<"vsext.vf8", 0b010010, 0b00011>;
963defm VZEXT_VF4 : VALU_MV_VS2<"vzext.vf4", 0b010010, 0b00100>;
964defm VSEXT_VF4 : VALU_MV_VS2<"vsext.vf4", 0b010010, 0b00101>;
965defm VZEXT_VF2 : VALU_MV_VS2<"vzext.vf2", 0b010010, 0b00110>;
966defm VSEXT_VF2 : VALU_MV_VS2<"vsext.vf2", 0b010010, 0b00111>;
967
968// Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
969defm VADC_V : VALUm_IV_V_X_I<"vadc", 0b010000>;
970let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
971defm VMADC_V : VALUm_IV_V_X_I<"vmadc", 0b010001>;
972defm VMADC_V : VALUNoVm_IV_V_X_I<"vmadc", 0b010001>;
973} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
974defm VSBC_V : VALUm_IV_V_X<"vsbc", 0b010010>;
975let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
976defm VMSBC_V : VALUm_IV_V_X<"vmsbc", 0b010011>;
977defm VMSBC_V : VALUNoVm_IV_V_X<"vmsbc", 0b010011>;
978} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
979
980// Vector Bitwise Logical Instructions
981defm VAND_V : VALU_IV_V_X_I<"vand", 0b001001>;
982defm VOR_V : VALU_IV_V_X_I<"vor", 0b001010>;
983defm VXOR_V : VALU_IV_V_X_I<"vxor", 0b001011>;
984
985def : InstAlias<"vnot.v $vd, $vs$vm",
986                (VXOR_VI VR:$vd, VR:$vs, -1, VMaskOp:$vm)>;
987def : InstAlias<"vnot.v $vd, $vs",
988                (VXOR_VI VR:$vd, VR:$vs, -1, zero_reg)>;
989
990// Vector Single-Width Bit Shift Instructions
991defm VSLL_V : VSHT_IV_V_X_I<"vsll", 0b100101, uimm5>;
992defm VSRL_V : VSHT_IV_V_X_I<"vsrl", 0b101000, uimm5>;
993defm VSRA_V : VSHT_IV_V_X_I<"vsra", 0b101001, uimm5>;
994
995// Vector Narrowing Integer Right Shift Instructions
996// Refer to 11.3. Narrowing Vector Arithmetic Instructions
997// The destination vector register group cannot overlap the first source
998// vector register group (specified by vs2). The destination vector register
999// group cannot overlap the mask register if used, unless LMUL=1.
1000let Constraints = "@earlyclobber $vd" in {
1001defm VNSRL_W : VNSHT_IV_V_X_I<"vnsrl", 0b101100, uimm5, "w">;
1002defm VNSRA_W : VNSHT_IV_V_X_I<"vnsra", 0b101101, uimm5, "w">;
1003} // Constraints = "@earlyclobber $vd"
1004
1005def : InstAlias<"vncvt.x.x.w $vd, $vs$vm",
1006                (VNSRL_WX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
1007def : InstAlias<"vncvt.x.x.w $vd, $vs",
1008                (VNSRL_WX VR:$vd, VR:$vs, X0, zero_reg)>;
1009
1010// Vector Integer Comparison Instructions
1011let RVVConstraint = NoConstraint in {
1012defm VMSEQ_V : VCMP_IV_V_X_I<"vmseq", 0b011000>;
1013defm VMSNE_V : VCMP_IV_V_X_I<"vmsne", 0b011001>;
1014defm VMSLTU_V : VCMP_IV_V_X<"vmsltu", 0b011010>;
1015defm VMSLT_V : VCMP_IV_V_X<"vmslt", 0b011011>;
1016defm VMSLEU_V : VCMP_IV_V_X_I<"vmsleu", 0b011100>;
1017defm VMSLE_V : VCMP_IV_V_X_I<"vmsle", 0b011101>;
1018defm VMSGTU_V : VCMP_IV_X_I<"vmsgtu", 0b011110>;
1019defm VMSGT_V : VCMP_IV_X_I<"vmsgt", 0b011111>;
1020} // RVVConstraint = NoConstraint
1021
1022def : InstAlias<"vmsgtu.vv $vd, $va, $vb$vm",
1023                (VMSLTU_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
1024def : InstAlias<"vmsgt.vv $vd, $va, $vb$vm",
1025                (VMSLT_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
1026def : InstAlias<"vmsgeu.vv $vd, $va, $vb$vm",
1027                (VMSLEU_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
1028def : InstAlias<"vmsge.vv $vd, $va, $vb$vm",
1029                (VMSLE_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
1030
1031let isCodeGenOnly = 0, isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 0,
1032    mayStore = 0 in {
1033// For unsigned comparisons we need to special case 0 immediate to maintain
1034// the always true/false semantics we would invert if we just decremented the
1035// immediate like we do for signed. To match the GNU assembler we will use
1036// vmseq/vmsne.vv with the same register for both operands which we can't do
1037// from an InstAlias.
1038def PseudoVMSGEU_VI : Pseudo<(outs VR:$vd),
1039                             (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
1040                             [], "vmsgeu.vi", "$vd, $vs2, $imm$vm">;
1041def PseudoVMSLTU_VI : Pseudo<(outs VR:$vd),
1042                             (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
1043                             [], "vmsltu.vi", "$vd, $vs2, $imm$vm">;
1044// Handle signed with pseudos as well for more consistency in the
1045// implementation.
1046def PseudoVMSGE_VI : Pseudo<(outs VR:$vd),
1047                            (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
1048                            [], "vmsge.vi", "$vd, $vs2, $imm$vm">;
1049def PseudoVMSLT_VI : Pseudo<(outs VR:$vd),
1050                            (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
1051                            [], "vmslt.vi", "$vd, $vs2, $imm$vm">;
1052}
1053
1054let isCodeGenOnly = 0, isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 0,
1055    mayStore = 0 in {
1056def PseudoVMSGEU_VX : Pseudo<(outs VR:$vd),
1057                             (ins VR:$vs2, GPR:$rs1),
1058                             [], "vmsgeu.vx", "$vd, $vs2, $rs1">;
1059def PseudoVMSGE_VX : Pseudo<(outs VR:$vd),
1060                            (ins VR:$vs2, GPR:$rs1),
1061                            [], "vmsge.vx", "$vd, $vs2, $rs1">;
1062def PseudoVMSGEU_VX_M : Pseudo<(outs VRNoV0:$vd),
1063                               (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
1064                               [], "vmsgeu.vx", "$vd, $vs2, $rs1$vm">;
1065def PseudoVMSGE_VX_M : Pseudo<(outs VRNoV0:$vd),
1066                              (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
1067                              [], "vmsge.vx", "$vd, $vs2, $rs1$vm">;
1068def PseudoVMSGEU_VX_M_T : Pseudo<(outs VR:$vd, VRNoV0:$scratch),
1069                                 (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
1070                                 [], "vmsgeu.vx", "$vd, $vs2, $rs1$vm, $scratch">;
1071def PseudoVMSGE_VX_M_T : Pseudo<(outs VR:$vd, VRNoV0:$scratch),
1072                                (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
1073                                [], "vmsge.vx", "$vd, $vs2, $rs1$vm, $scratch">;
1074}
1075
1076// Vector Integer Min/Max Instructions
1077defm VMINU_V : VCMP_IV_V_X<"vminu", 0b000100>;
1078defm VMIN_V : VCMP_IV_V_X<"vmin", 0b000101>;
1079defm VMAXU_V : VCMP_IV_V_X<"vmaxu", 0b000110>;
1080defm VMAX_V : VCMP_IV_V_X<"vmax", 0b000111>;
1081
1082// Vector Single-Width Integer Multiply Instructions
1083defm VMUL_V : VMUL_MV_V_X<"vmul", 0b100101>;
1084defm VMULH_V : VMUL_MV_V_X<"vmulh", 0b100111>;
1085defm VMULHU_V : VMUL_MV_V_X<"vmulhu", 0b100100>;
1086defm VMULHSU_V : VMUL_MV_V_X<"vmulhsu", 0b100110>;
1087
1088// Vector Integer Divide Instructions
1089defm VDIVU_V : VDIV_MV_V_X<"vdivu", 0b100000>;
1090defm VDIV_V : VDIV_MV_V_X<"vdiv", 0b100001>;
1091defm VREMU_V : VDIV_MV_V_X<"vremu", 0b100010>;
1092defm VREM_V : VDIV_MV_V_X<"vrem", 0b100011>;
1093
1094// Vector Widening Integer Multiply Instructions
1095let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
1096defm VWMUL_V : VWMUL_MV_V_X<"vwmul", 0b111011>;
1097defm VWMULU_V : VWMUL_MV_V_X<"vwmulu", 0b111000>;
1098defm VWMULSU_V : VWMUL_MV_V_X<"vwmulsu", 0b111010>;
1099} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
1100
1101// Vector Single-Width Integer Multiply-Add Instructions
1102defm VMACC_V : VMAC_MV_V_X<"vmacc", 0b101101>;
1103defm VNMSAC_V : VMAC_MV_V_X<"vnmsac", 0b101111>;
1104defm VMADD_V : VMAC_MV_V_X<"vmadd", 0b101001>;
1105defm VNMSUB_V : VMAC_MV_V_X<"vnmsub", 0b101011>;
1106
1107// Vector Widening Integer Multiply-Add Instructions
1108let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
1109defm VWMACCU_V : VWMAC_MV_V_X<"vwmaccu", 0b111100>;
1110defm VWMACC_V : VWMAC_MV_V_X<"vwmacc", 0b111101>;
1111defm VWMACCSU_V : VWMAC_MV_V_X<"vwmaccsu", 0b111111>;
1112defm VWMACCUS_V : VWMAC_MV_X<"vwmaccus", 0b111110>;
1113} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
1114
1115// Vector Integer Merge Instructions
1116defm VMERGE_V : VMRG_IV_V_X_I<"vmerge", 0b010111>;
1117
1118// Vector Integer Move Instructions
1119let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vs2 = 0, vm = 1,
1120    RVVConstraint = NoConstraint  in {
1121// op vd, vs1
1122def VMV_V_V : RVInstVV<0b010111, OPIVV, (outs VR:$vd),
1123                       (ins VR:$vs1), "vmv.v.v", "$vd, $vs1">,
1124              Sched<[WriteVIMovV, ReadVIMovV]>;
1125// op vd, rs1
1126def VMV_V_X : RVInstVX<0b010111, OPIVX, (outs VR:$vd),
1127                       (ins GPR:$rs1), "vmv.v.x", "$vd, $rs1">,
1128              Sched<[WriteVIMovX, ReadVIMovX]>;
1129// op vd, imm
1130def VMV_V_I : RVInstIVI<0b010111, (outs VR:$vd),
1131                       (ins simm5:$imm), "vmv.v.i", "$vd, $imm">,
1132              Sched<[WriteVIMovI]>;
1133} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1134
1135// Vector Fixed-Point Arithmetic Instructions
1136defm VSADDU_V : VSALU_IV_V_X_I<"vsaddu", 0b100000>;
1137defm VSADD_V : VSALU_IV_V_X_I<"vsadd", 0b100001>;
1138defm VSSUBU_V : VSALU_IV_V_X<"vssubu", 0b100010>;
1139defm VSSUB_V : VSALU_IV_V_X<"vssub", 0b100011>;
1140
1141// Vector Single-Width Averaging Add and Subtract
1142defm VAADDU_V : VAALU_MV_V_X<"vaaddu", 0b001000>;
1143defm VAADD_V : VAALU_MV_V_X<"vaadd", 0b001001>;
1144defm VASUBU_V : VAALU_MV_V_X<"vasubu", 0b001010>;
1145defm VASUB_V : VAALU_MV_V_X<"vasub", 0b001011>;
1146
1147// Vector Single-Width Fractional Multiply with Rounding and Saturation
1148defm VSMUL_V : VSMUL_IV_V_X<"vsmul", 0b100111>;
1149
1150// Vector Single-Width Scaling Shift Instructions
1151defm VSSRL_V : VSSHF_IV_V_X_I<"vssrl", 0b101010, uimm5>;
1152defm VSSRA_V : VSSHF_IV_V_X_I<"vssra", 0b101011, uimm5>;
1153
1154// Vector Narrowing Fixed-Point Clip Instructions
1155let Constraints = "@earlyclobber $vd" in {
1156defm VNCLIPU_W : VNCLP_IV_V_X_I<"vnclipu", 0b101110, uimm5, "w">;
1157defm VNCLIP_W : VNCLP_IV_V_X_I<"vnclip", 0b101111, uimm5, "w">;
1158} // Constraints = "@earlyclobber $vd"
1159} // Predicates = [HasVInstructions]
1160
1161let Predicates = [HasVInstructionsAnyF] in {
1162// Vector Single-Width Floating-Point Add/Subtract Instructions
1163let Uses = [FRM], mayRaiseFPException = true in {
1164defm VFADD_V : VALU_FV_V_F<"vfadd", 0b000000>;
1165defm VFSUB_V : VALU_FV_V_F<"vfsub", 0b000010>;
1166defm VFRSUB_V : VALU_FV_F<"vfrsub", 0b100111>;
1167}
1168
1169// Vector Widening Floating-Point Add/Subtract Instructions
1170let Constraints = "@earlyclobber $vd",
1171    Uses = [FRM],
1172    mayRaiseFPException = true in {
1173let RVVConstraint = WidenV in {
1174defm VFWADD_V : VWALU_FV_V_F<"vfwadd", 0b110000>;
1175defm VFWSUB_V : VWALU_FV_V_F<"vfwsub", 0b110010>;
1176} // RVVConstraint = WidenV
1177// Set earlyclobber for following instructions for second and mask operands.
1178// This has the downside that the earlyclobber constraint is too coarse and
1179// will impose unnecessary restrictions by not allowing the destination to
1180// overlap with the first (wide) operand.
1181let RVVConstraint = WidenW in {
1182defm VFWADD_W : VWALU_FV_V_F<"vfwadd", 0b110100, "w">;
1183defm VFWSUB_W : VWALU_FV_V_F<"vfwsub", 0b110110, "w">;
1184} // RVVConstraint = WidenW
1185} // Constraints = "@earlyclobber $vd", Uses = [FRM], mayRaiseFPException = true
1186
1187// Vector Single-Width Floating-Point Multiply/Divide Instructions
1188let Uses = [FRM], mayRaiseFPException = true in {
1189defm VFMUL_V : VMUL_FV_V_F<"vfmul", 0b100100>;
1190defm VFDIV_V : VDIV_FV_V_F<"vfdiv", 0b100000>;
1191defm VFRDIV_V : VRDIV_FV_F<"vfrdiv", 0b100001>;
1192}
1193
1194// Vector Widening Floating-Point Multiply
1195let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV,
1196    Uses = [FRM], mayRaiseFPException = true in {
1197defm VFWMUL_V : VWMUL_FV_V_F<"vfwmul", 0b111000>;
1198} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV, Uses = [FRM], mayRaiseFPException = true
1199
1200// Vector Single-Width Floating-Point Fused Multiply-Add Instructions
1201let Uses = [FRM], mayRaiseFPException = true in {
1202defm VFMACC_V : VMAC_FV_V_F<"vfmacc", 0b101100>;
1203defm VFNMACC_V : VMAC_FV_V_F<"vfnmacc", 0b101101>;
1204defm VFMSAC_V : VMAC_FV_V_F<"vfmsac", 0b101110>;
1205defm VFNMSAC_V : VMAC_FV_V_F<"vfnmsac", 0b101111>;
1206defm VFMADD_V : VMAC_FV_V_F<"vfmadd", 0b101000>;
1207defm VFNMADD_V : VMAC_FV_V_F<"vfnmadd", 0b101001>;
1208defm VFMSUB_V : VMAC_FV_V_F<"vfmsub", 0b101010>;
1209defm VFNMSUB_V : VMAC_FV_V_F<"vfnmsub", 0b101011>;
1210}
1211
1212// Vector Widening Floating-Point Fused Multiply-Add Instructions
1213let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV,
1214    Uses = [FRM], mayRaiseFPException = true in {
1215defm VFWMACC_V : VWMAC_FV_V_F<"vfwmacc", 0b111100>;
1216defm VFWNMACC_V : VWMAC_FV_V_F<"vfwnmacc", 0b111101>;
1217defm VFWMSAC_V : VWMAC_FV_V_F<"vfwmsac", 0b111110>;
1218defm VFWNMSAC_V : VWMAC_FV_V_F<"vfwnmsac", 0b111111>;
1219} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV, Uses = [FRM], mayRaiseFPException = true
1220
1221// Vector Floating-Point Square-Root Instruction
1222let Uses = [FRM], mayRaiseFPException = true in {
1223defm VFSQRT_V : VSQR_FV_VS2<"vfsqrt.v", 0b010011, 0b00000>;
1224defm VFREC7_V : VRCP_FV_VS2<"vfrec7.v", 0b010011, 0b00101>;
1225}
1226
1227let mayRaiseFPException = true in
1228defm VFRSQRT7_V : VRCP_FV_VS2<"vfrsqrt7.v", 0b010011, 0b00100>;
1229
1230// Vector Floating-Point MIN/MAX Instructions
1231let mayRaiseFPException = true in {
1232defm VFMIN_V : VCMP_FV_V_F<"vfmin", 0b000100>;
1233defm VFMAX_V : VCMP_FV_V_F<"vfmax", 0b000110>;
1234}
1235
1236// Vector Floating-Point Sign-Injection Instructions
1237defm VFSGNJ_V : VSGNJ_FV_V_F<"vfsgnj", 0b001000>;
1238defm VFSGNJN_V : VSGNJ_FV_V_F<"vfsgnjn", 0b001001>;
1239defm VFSGNJX_V : VSGNJ_FV_V_F<"vfsgnjx", 0b001010>;
1240
1241def : InstAlias<"vfneg.v $vd, $vs$vm",
1242                (VFSGNJN_VV VR:$vd, VR:$vs, VR:$vs, VMaskOp:$vm)>;
1243def : InstAlias<"vfneg.v $vd, $vs",
1244                (VFSGNJN_VV VR:$vd, VR:$vs, VR:$vs, zero_reg)>;
1245def : InstAlias<"vfabs.v $vd, $vs$vm",
1246                (VFSGNJX_VV VR:$vd, VR:$vs, VR:$vs, VMaskOp:$vm)>;
1247def : InstAlias<"vfabs.v $vd, $vs",
1248                (VFSGNJX_VV VR:$vd, VR:$vs, VR:$vs, zero_reg)>;
1249
1250// Vector Floating-Point Compare Instructions
1251let RVVConstraint = NoConstraint, mayRaiseFPException = true in {
1252defm VMFEQ_V : VCMP_FV_V_F<"vmfeq", 0b011000>;
1253defm VMFNE_V : VCMP_FV_V_F<"vmfne", 0b011100>;
1254defm VMFLT_V : VCMP_FV_V_F<"vmflt", 0b011011>;
1255defm VMFLE_V : VCMP_FV_V_F<"vmfle", 0b011001>;
1256defm VMFGT_V : VCMP_FV_F<"vmfgt", 0b011101>;
1257defm VMFGE_V : VCMP_FV_F<"vmfge", 0b011111>;
1258} // RVVConstraint = NoConstraint, mayRaiseFPException = true
1259
1260def : InstAlias<"vmfgt.vv $vd, $va, $vb$vm",
1261                (VMFLT_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
1262def : InstAlias<"vmfge.vv $vd, $va, $vb$vm",
1263                (VMFLE_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
1264
1265// Vector Floating-Point Classify Instruction
1266defm VFCLASS_V : VCLS_FV_VS2<"vfclass.v", 0b010011, 0b10000>;
1267
1268let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
1269
1270// Vector Floating-Point Merge Instruction
1271let vm = 0 in
1272def VFMERGE_VFM : RVInstVX<0b010111, OPFVF, (outs VR:$vd),
1273                           (ins VR:$vs2, FPR32:$rs1, VMV0:$v0),
1274                           "vfmerge.vfm", "$vd, $vs2, $rs1, v0">,
1275                  Sched<[WriteVFMergeV, ReadVFMergeV, ReadVFMergeF, ReadVMask]>;
1276
1277// Vector Floating-Point Move Instruction
1278let RVVConstraint = NoConstraint in
1279let vm = 1, vs2 = 0 in
1280def VFMV_V_F : RVInstVX<0b010111, OPFVF, (outs VR:$vd),
1281                       (ins FPR32:$rs1), "vfmv.v.f", "$vd, $rs1">,
1282               Sched<[WriteVFMovV, ReadVFMovF]>;
1283
1284} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1285
1286// Single-Width Floating-Point/Integer Type-Convert Instructions
1287defm VFCVT_XU_F_V : VCVTI_FV_VS2<"vfcvt.xu.f.v", 0b010010, 0b00000>;
1288defm VFCVT_X_F_V : VCVTI_FV_VS2<"vfcvt.x.f.v", 0b010010, 0b00001>;
1289defm VFCVT_RTZ_XU_F_V : VCVTI_FV_VS2<"vfcvt.rtz.xu.f.v", 0b010010, 0b00110>;
1290defm VFCVT_RTZ_X_F_V : VCVTI_FV_VS2<"vfcvt.rtz.x.f.v", 0b010010, 0b00111>;
1291defm VFCVT_F_XU_V : VCVTF_IV_VS2<"vfcvt.f.xu.v", 0b010010, 0b00010>;
1292defm VFCVT_F_X_V : VCVTF_IV_VS2<"vfcvt.f.x.v", 0b010010, 0b00011>;
1293
1294// Widening Floating-Point/Integer Type-Convert Instructions
1295let Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt in {
1296defm VFWCVT_XU_F_V : VWCVTI_FV_VS2<"vfwcvt.xu.f.v", 0b010010, 0b01000>;
1297defm VFWCVT_X_F_V : VWCVTI_FV_VS2<"vfwcvt.x.f.v", 0b010010, 0b01001>;
1298defm VFWCVT_RTZ_XU_F_V : VWCVTI_FV_VS2<"vfwcvt.rtz.xu.f.v", 0b010010, 0b01110>;
1299defm VFWCVT_RTZ_X_F_V : VWCVTI_FV_VS2<"vfwcvt.rtz.x.f.v", 0b010010, 0b01111>;
1300defm VFWCVT_F_XU_V : VWCVTF_IV_VS2<"vfwcvt.f.xu.v", 0b010010, 0b01010>;
1301defm VFWCVT_F_X_V : VWCVTF_IV_VS2<"vfwcvt.f.x.v", 0b010010, 0b01011>;
1302defm VFWCVT_F_F_V : VWCVTF_FV_VS2<"vfwcvt.f.f.v", 0b010010, 0b01100>;
1303} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt
1304
1305// Narrowing Floating-Point/Integer Type-Convert Instructions
1306let Constraints = "@earlyclobber $vd" in {
1307defm VFNCVT_XU_F_W : VNCVTI_FV_VS2<"vfncvt.xu.f.w", 0b010010, 0b10000>;
1308defm VFNCVT_X_F_W : VNCVTI_FV_VS2<"vfncvt.x.f.w", 0b010010, 0b10001>;
1309defm VFNCVT_RTZ_XU_F_W : VNCVTI_FV_VS2<"vfncvt.rtz.xu.f.w", 0b010010, 0b10110>;
1310defm VFNCVT_RTZ_X_F_W : VNCVTI_FV_VS2<"vfncvt.rtz.x.f.w", 0b010010, 0b10111>;
1311defm VFNCVT_F_XU_W : VNCVTF_IV_VS2<"vfncvt.f.xu.w", 0b010010, 0b10010>;
1312defm VFNCVT_F_X_W : VNCVTF_IV_VS2<"vfncvt.f.x.w", 0b010010, 0b10011>;
1313defm VFNCVT_F_F_W : VNCVTF_FV_VS2<"vfncvt.f.f.w", 0b010010, 0b10100>;
1314defm VFNCVT_ROD_F_F_W : VNCVTF_FV_VS2<"vfncvt.rod.f.f.w", 0b010010, 0b10101>;
1315} // Constraints = "@earlyclobber $vd"
1316} // Predicates = HasVInstructionsAnyF]
1317
1318let Predicates = [HasVInstructions] in {
1319
1320// Vector Single-Width Integer Reduction Instructions
1321let RVVConstraint = NoConstraint in {
1322defm VREDSUM : VRED_MV_V<"vredsum", 0b000000>;
1323defm VREDMAXU : VRED_MV_V<"vredmaxu", 0b000110>;
1324defm VREDMAX : VRED_MV_V<"vredmax", 0b000111>;
1325defm VREDMINU : VRED_MV_V<"vredminu", 0b000100>;
1326defm VREDMIN : VRED_MV_V<"vredmin", 0b000101>;
1327defm VREDAND : VRED_MV_V<"vredand", 0b000001>;
1328defm VREDOR : VRED_MV_V<"vredor", 0b000010>;
1329defm VREDXOR : VRED_MV_V<"vredxor", 0b000011>;
1330} // RVVConstraint = NoConstraint
1331
1332// Vector Widening Integer Reduction Instructions
1333let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
1334// Set earlyclobber for following instructions for second and mask operands.
1335// This has the downside that the earlyclobber constraint is too coarse and
1336// will impose unnecessary restrictions by not allowing the destination to
1337// overlap with the first (wide) operand.
1338defm VWREDSUMU : VWRED_IV_V<"vwredsumu", 0b110000>;
1339defm VWREDSUM : VWRED_IV_V<"vwredsum", 0b110001>;
1340} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
1341
1342} // Predicates = [HasVInstructions]
1343
1344let Predicates = [HasVInstructionsAnyF] in {
1345// Vector Single-Width Floating-Point Reduction Instructions
1346let RVVConstraint = NoConstraint in {
1347let Uses = [FRM], mayRaiseFPException = true in {
1348defm VFREDOSUM : VREDO_FV_V<"vfredosum", 0b000011>;
1349defm VFREDUSUM : VRED_FV_V<"vfredusum", 0b000001>;
1350}
1351let mayRaiseFPException = true in {
1352defm VFREDMAX : VRED_FV_V<"vfredmax", 0b000111>;
1353defm VFREDMIN : VRED_FV_V<"vfredmin", 0b000101>;
1354}
1355} // RVVConstraint = NoConstraint
1356
1357def : InstAlias<"vfredsum.vs $vd, $vs2, $vs1$vm",
1358                (VFREDUSUM_VS VR:$vd, VR:$vs2, VR:$vs1, VMaskOp:$vm), 0>;
1359
1360// Vector Widening Floating-Point Reduction Instructions
1361let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
1362// Set earlyclobber for following instructions for second and mask operands.
1363// This has the downside that the earlyclobber constraint is too coarse and
1364// will impose unnecessary restrictions by not allowing the destination to
1365// overlap with the first (wide) operand.
1366let Uses = [FRM], mayRaiseFPException = true in {
1367defm VFWREDOSUM : VWREDO_FV_V<"vfwredosum", 0b110011>;
1368defm VFWREDUSUM : VWRED_FV_V<"vfwredusum", 0b110001>;
1369}
1370} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
1371
1372def : InstAlias<"vfwredsum.vs $vd, $vs2, $vs1$vm",
1373                (VFWREDUSUM_VS VR:$vd, VR:$vs2, VR:$vs1, VMaskOp:$vm), 0>;
1374} // Predicates = [HasVInstructionsAnyF]
1375
1376let Predicates = [HasVInstructions] in {
1377// Vector Mask-Register Logical Instructions
1378let RVVConstraint = NoConstraint in {
1379defm VMAND_M : VMALU_MV_Mask<"vmand", 0b011001, "m">;
1380defm VMNAND_M : VMALU_MV_Mask<"vmnand", 0b011101, "m">;
1381defm VMANDN_M : VMALU_MV_Mask<"vmandn", 0b011000, "m">;
1382defm VMXOR_M : VMALU_MV_Mask<"vmxor", 0b011011, "m">;
1383defm VMOR_M : VMALU_MV_Mask<"vmor", 0b011010, "m">;
1384defm VMNOR_M : VMALU_MV_Mask<"vmnor", 0b011110, "m">;
1385defm VMORN_M : VMALU_MV_Mask<"vmorn", 0b011100, "m">;
1386defm VMXNOR_M : VMALU_MV_Mask<"vmxnor", 0b011111, "m">;
1387}
1388
1389def : InstAlias<"vmmv.m $vd, $vs",
1390                (VMAND_MM VR:$vd, VR:$vs, VR:$vs)>;
1391def : InstAlias<"vmclr.m $vd",
1392                (VMXOR_MM VR:$vd, VR:$vd, VR:$vd)>;
1393def : InstAlias<"vmset.m $vd",
1394                (VMXNOR_MM VR:$vd, VR:$vd, VR:$vd)>;
1395def : InstAlias<"vmnot.m $vd, $vs",
1396                (VMNAND_MM VR:$vd, VR:$vs, VR:$vs)>;
1397
1398def : InstAlias<"vmandnot.mm $vd, $vs2, $vs1",
1399                (VMANDN_MM VR:$vd, VR:$vs2, VR:$vs1), 0>;
1400def : InstAlias<"vmornot.mm $vd, $vs2, $vs1",
1401                (VMORN_MM VR:$vd, VR:$vs2, VR:$vs1), 0>;
1402
1403let hasSideEffects = 0, mayLoad = 0, mayStore = 0,
1404    RVVConstraint = NoConstraint  in {
1405
1406// Vector mask population count vcpop
1407def VCPOP_M : RVInstV<0b010000, 0b10000, OPMVV, (outs GPR:$vd),
1408                      (ins VR:$vs2, VMaskOp:$vm),
1409                      "vcpop.m", "$vd, $vs2$vm">,
1410              Sched<[WriteVMPopV, ReadVMPopV, ReadVMask]>;
1411
1412// vfirst find-first-set mask bit
1413def VFIRST_M : RVInstV<0b010000, 0b10001, OPMVV, (outs GPR:$vd),
1414                       (ins VR:$vs2, VMaskOp:$vm),
1415                       "vfirst.m", "$vd, $vs2$vm">,
1416              Sched<[WriteVMFFSV, ReadVMFFSV, ReadVMask]>;
1417
1418} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1419
1420def : InstAlias<"vpopc.m $vd, $vs2$vm",
1421                (VCPOP_M GPR:$vd, VR:$vs2, VMaskOp:$vm), 0>;
1422
1423let Constraints = "@earlyclobber $vd", RVVConstraint = Iota in {
1424
1425// vmsbf.m set-before-first mask bit
1426defm VMSBF_M : VMSFS_MV_V<"vmsbf.m", 0b010100, 0b00001>;
1427// vmsif.m set-including-first mask bit
1428defm VMSIF_M : VMSFS_MV_V<"vmsif.m", 0b010100, 0b00011>;
1429// vmsof.m set-only-first mask bit
1430defm VMSOF_M : VMSFS_MV_V<"vmsof.m", 0b010100, 0b00010>;
1431// Vector Iota Instruction
1432defm VIOTA_M : VMIOT_MV_V<"viota.m", 0b010100, 0b10000>;
1433
1434} // Constraints = "@earlyclobber $vd", RVVConstraint = Iota
1435
1436// Vector Element Index Instruction
1437let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
1438
1439let vs2 = 0 in
1440def VID_V : RVInstV<0b010100, 0b10001, OPMVV, (outs VR:$vd),
1441                    (ins VMaskOp:$vm), "vid.v", "$vd$vm">,
1442            Sched<[WriteVMIdxV, ReadVMask]>;
1443
1444// Integer Scalar Move Instructions
1445let vm = 1, RVVConstraint = NoConstraint in {
1446def VMV_X_S : RVInstV<0b010000, 0b00000, OPMVV, (outs GPR:$vd),
1447                      (ins VR:$vs2), "vmv.x.s", "$vd, $vs2">,
1448              Sched<[WriteVIMovVX, ReadVIMovVX]>;
1449let Constraints = "$vd = $vd_wb" in
1450def VMV_S_X : RVInstV2<0b010000, 0b00000, OPMVX, (outs VR:$vd_wb),
1451                      (ins VR:$vd, GPR:$rs1), "vmv.s.x", "$vd, $rs1">,
1452              Sched<[WriteVIMovXV, ReadVIMovXV, ReadVIMovXX]>;
1453}
1454
1455} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1456
1457} // Predicates = [HasVInstructions]
1458
1459let Predicates = [HasVInstructionsAnyF] in {
1460
1461let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1,
1462    RVVConstraint = NoConstraint  in {
1463// Floating-Point Scalar Move Instructions
1464def VFMV_F_S : RVInstV<0b010000, 0b00000, OPFVV, (outs FPR32:$vd),
1465                      (ins VR:$vs2), "vfmv.f.s", "$vd, $vs2">,
1466               Sched<[WriteVFMovVF, ReadVFMovVF]>;
1467let Constraints = "$vd = $vd_wb" in
1468def VFMV_S_F : RVInstV2<0b010000, 0b00000, OPFVF, (outs VR:$vd_wb),
1469                       (ins VR:$vd, FPR32:$rs1), "vfmv.s.f", "$vd, $rs1">,
1470               Sched<[WriteVFMovFV, ReadVFMovFV, ReadVFMovFX]>;
1471
1472} // hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1
1473
1474} // Predicates = [HasVInstructionsAnyF]
1475
1476let Predicates = [HasVInstructions] in {
1477// Vector Slide Instructions
1478let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
1479defm VSLIDEUP_V : VSLD_IV_X_I<"vslideup", 0b001110, uimm5>;
1480defm VSLIDE1UP_V : VSLD1_MV_X<"vslide1up", 0b001110>;
1481} // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
1482defm VSLIDEDOWN_V : VSLD_IV_X_I<"vslidedown", 0b001111, uimm5>;
1483defm VSLIDE1DOWN_V : VSLD1_MV_X<"vslide1down", 0b001111>;
1484} // Predicates = [HasVInstructions]
1485
1486let Predicates = [HasVInstructionsAnyF] in {
1487let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
1488defm VFSLIDE1UP_V : VSLD1_FV_F<"vfslide1up", 0b001110>;
1489} // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
1490defm VFSLIDE1DOWN_V : VSLD1_FV_F<"vfslide1down", 0b001111>;
1491} // Predicates = [HasVInstructionsAnyF]
1492
1493let Predicates = [HasVInstructions] in {
1494// Vector Register Gather Instruction
1495let Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather in {
1496defm VRGATHER_V : VGTR_IV_V_X_I<"vrgather", 0b001100, uimm5>;
1497def VRGATHEREI16_VV : VALUVV<0b001110, OPIVV, "vrgatherei16.vv">,
1498                      Sched<[WriteVGatherV, ReadVGatherV, ReadVGatherV]>;
1499} // Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather
1500
1501// Vector Compress Instruction
1502let Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress in {
1503defm VCOMPRESS_V : VCPR_MV_Mask<"vcompress", 0b010111>;
1504} // Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress
1505
1506let hasSideEffects = 0, mayLoad = 0, mayStore = 0,
1507    RVVConstraint = NoConstraint in {
1508// A future extension may relax the vector register alignment restrictions.
1509foreach n = [1, 2, 4, 8] in {
1510  defvar vrc = !cast<VReg>(!if(!eq(n, 1), "VR", "VRM"#n));
1511  def VMV#n#R_V  : RVInstV<0b100111, !add(n, -1), OPIVI, (outs vrc:$vd),
1512                           (ins vrc:$vs2), "vmv" # n # "r.v", "$vd, $vs2">,
1513                   VMVRSched<n> {
1514    let Uses = [];
1515    let vm = 1;
1516  }
1517}
1518} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1519} // Predicates = [HasVInstructions]
1520
1521let Predicates = [HasVInstructions] in {
1522  foreach nf=2-8 in {
1523    foreach eew = [8, 16, 32] in {
1524      defvar w = !cast<RISCVWidth>("LSWidth"#eew);
1525
1526      def VLSEG#nf#E#eew#_V :
1527        VUnitStrideSegmentLoad<!add(nf, -1), w, "vlseg"#nf#"e"#eew#".v">,
1528        VLSEGSched<nf, eew>;
1529      def VLSEG#nf#E#eew#FF_V :
1530        VUnitStrideSegmentLoadFF<!add(nf, -1), w, "vlseg"#nf#"e"#eew#"ff.v">,
1531        VLSEGFFSched<nf, eew>;
1532      def VSSEG#nf#E#eew#_V :
1533        VUnitStrideSegmentStore<!add(nf, -1), w, "vsseg"#nf#"e"#eew#".v">,
1534        VSSEGSched<nf, eew>;
1535      // Vector Strided Instructions
1536      def VLSSEG#nf#E#eew#_V :
1537        VStridedSegmentLoad<!add(nf, -1), w, "vlsseg"#nf#"e"#eew#".v">,
1538        VLSSEGSched<nf, eew>;
1539      def VSSSEG#nf#E#eew#_V :
1540        VStridedSegmentStore<!add(nf, -1), w, "vssseg"#nf#"e"#eew#".v">,
1541        VSSSEGSched<nf, eew>;
1542
1543      // Vector Indexed Instructions
1544      def VLUXSEG#nf#EI#eew#_V :
1545        VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord, w,
1546                            "vluxseg"#nf#"ei"#eew#".v">, VLUXSEGSched<nf, eew>;
1547      def VLOXSEG#nf#EI#eew#_V :
1548        VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder, w,
1549                            "vloxseg"#nf#"ei"#eew#".v">, VLOXSEGSched<nf, eew>;
1550      def VSUXSEG#nf#EI#eew#_V :
1551        VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord, w,
1552                             "vsuxseg"#nf#"ei"#eew#".v">, VSUXSEGSched<nf, eew>;
1553      def VSOXSEG#nf#EI#eew#_V :
1554        VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder, w,
1555                             "vsoxseg"#nf#"ei"#eew#".v">, VSOXSEGSched<nf, eew>;
1556    }
1557  }
1558} // Predicates = [HasVInstructions]
1559
1560let Predicates = [HasVInstructionsI64] in {
1561  foreach nf=2-8 in {
1562    // Vector Unit-strided Segment Instructions
1563    def VLSEG#nf#E64_V :
1564      VUnitStrideSegmentLoad<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64.v">,
1565      VLSEGSched<nf, 64>;
1566    def VLSEG#nf#E64FF_V :
1567      VUnitStrideSegmentLoadFF<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64ff.v">,
1568      VLSEGFFSched<nf, 64>;
1569    def VSSEG#nf#E64_V :
1570      VUnitStrideSegmentStore<!add(nf, -1), LSWidth64, "vsseg"#nf#"e64.v">,
1571      VSSEGSched<nf, 64>;
1572
1573    // Vector Strided Segment Instructions
1574    def VLSSEG#nf#E64_V :
1575      VStridedSegmentLoad<!add(nf, -1), LSWidth64, "vlsseg"#nf#"e64.v">,
1576      VLSSEGSched<nf, 64>;
1577    def VSSSEG#nf#E64_V :
1578      VStridedSegmentStore<!add(nf, -1), LSWidth64, "vssseg"#nf#"e64.v">,
1579      VSSSEGSched<nf, 64>;
1580  }
1581} // Predicates = [HasVInstructionsI64]
1582let Predicates = [HasVInstructionsI64, IsRV64] in {
1583  foreach nf=2-8 in {
1584    // Vector Indexed Segment Instructions
1585    def VLUXSEG#nf#EI64_V :
1586      VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord, LSWidth64,
1587                          "vluxseg"#nf#"ei64.v">, VLUXSEGSched<nf, 64>;
1588    def VLOXSEG#nf#EI64_V :
1589      VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder, LSWidth64,
1590                          "vloxseg"#nf#"ei64.v">, VLOXSEGSched<nf, 64>;
1591    def VSUXSEG#nf#EI64_V :
1592      VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord, LSWidth64,
1593                           "vsuxseg"#nf#"ei64.v">, VSUXSEGSched<nf, 64>;
1594    def VSOXSEG#nf#EI64_V :
1595      VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder, LSWidth64,
1596                           "vsoxseg"#nf#"ei64.v">, VSOXSEGSched<nf, 64>;
1597  }
1598} // Predicates = [HasVInstructionsI64, IsRV64]
1599
1600include "RISCVInstrInfoVPseudos.td"
1601