1//===-- RISCVInstrInfoV.td - RISC-V 'V' instructions -------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// This file describes the RISC-V instructions from the standard 'V' Vector
10/// extension, version 0.10.
11/// This version is still experimental as the 'V' extension hasn't been
12/// ratified yet.
13///
14//===----------------------------------------------------------------------===//
15
16include "RISCVInstrFormatsV.td"
17
18//===----------------------------------------------------------------------===//
19// Operand and SDNode transformation definitions.
20//===----------------------------------------------------------------------===//
21
22def VTypeIAsmOperand : AsmOperandClass {
23  let Name = "VTypeI";
24  let ParserMethod = "parseVTypeI";
25  let DiagnosticType = "InvalidVTypeI";
26}
27
28def VTypeIOp : Operand<XLenVT> {
29  let ParserMatchClass = VTypeIAsmOperand;
30  let PrintMethod = "printVTypeI";
31  let DecoderMethod = "decodeUImmOperand<11>";
32}
33
34def VMaskAsmOperand : AsmOperandClass {
35  let Name = "RVVMaskRegOpOperand";
36  let RenderMethod = "addRegOperands";
37  let PredicateMethod = "isV0Reg";
38  let ParserMethod = "parseMaskReg";
39  let IsOptional = 1;
40  let DefaultMethod = "defaultMaskRegOp";
41  let DiagnosticType = "InvalidVMaskRegister";
42}
43
44def VMaskOp : RegisterOperand<VMV0> {
45  let ParserMatchClass = VMaskAsmOperand;
46  let PrintMethod = "printVMaskReg";
47  let EncoderMethod = "getVMaskReg";
48  let DecoderMethod = "decodeVMaskReg";
49}
50
51def simm5 : Operand<XLenVT>, ImmLeaf<XLenVT, [{return isInt<5>(Imm);}]> {
52  let ParserMatchClass = SImmAsmOperand<5>;
53  let EncoderMethod = "getImmOpValue";
54  let DecoderMethod = "decodeSImmOperand<5>";
55  let MCOperandPredicate = [{
56    int64_t Imm;
57    if (MCOp.evaluateAsConstantImm(Imm))
58      return isInt<5>(Imm);
59    return MCOp.isBareSymbolRef();
60  }];
61}
62
63def SImm5Plus1AsmOperand : AsmOperandClass {
64  let Name = "SImm5Plus1";
65  let RenderMethod = "addImmOperands";
66  let DiagnosticType = "InvalidSImm5Plus1";
67}
68
69def simm5_plus1 : Operand<XLenVT>, ImmLeaf<XLenVT,
70  [{return (isInt<5>(Imm) && Imm != -16) || Imm == 16;}]> {
71  let ParserMatchClass = SImm5Plus1AsmOperand;
72  let MCOperandPredicate = [{
73    int64_t Imm;
74    if (MCOp.evaluateAsConstantImm(Imm))
75      return (isInt<5>(Imm) && Imm != -16) || Imm == 16;
76    return MCOp.isBareSymbolRef();
77  }];
78}
79
80//===----------------------------------------------------------------------===//
81// Scheduling definitions.
82//===----------------------------------------------------------------------===//
83
84class VMVRSched<int n>: Sched <[!cast<SchedReadWrite>("WriteVMov" # n # "V"),
85                                !cast<SchedReadWrite>("ReadVMov" # n # "V")]>;
86
87class VLESched<int n> : Sched <[!cast<SchedReadWrite>("WriteVLDE" # n),
88                                ReadVLDX, ReadVMask]>;
89
90class VSESched<int n> : Sched <[!cast<SchedReadWrite>("WriteVSTE" # n),
91                                !cast<SchedReadWrite>("ReadVSTE" # n # "V"),
92                                ReadVSTX, ReadVMask]>;
93
94class VLSSched<int n> : Sched <[!cast<SchedReadWrite>("WriteVLDS" # n),
95                                ReadVLDX, ReadVLDSX, ReadVMask]>;
96
97class VSSSched<int n> : Sched <[!cast<SchedReadWrite>("WriteVSTS" # n),
98                                !cast<SchedReadWrite>("ReadVSTS" # n # "V"),
99                                ReadVSTX, ReadVSTSX, ReadVMask]>;
100
101class VLXSched<int n, string o> :
102  Sched <[!cast<SchedReadWrite>("WriteVLD" # o # "X" # n),
103          ReadVLDX, !cast<SchedReadWrite>("ReadVLD" # o # "XV"), ReadVMask]>;
104
105class VSXSched<int n, string o> :
106  Sched <[!cast<SchedReadWrite>("WriteVST" # o # "X" # n),
107          !cast<SchedReadWrite>("ReadVST" # o # "X" # n),
108          ReadVSTX, !cast<SchedReadWrite>("ReadVST" # o # "XV"), ReadVMask]>;
109
110class VLFSched<int n> : Sched <[!cast<SchedReadWrite>("WriteVLDFF" # n),
111                                ReadVLDX, ReadVMask]>;
112
113//===----------------------------------------------------------------------===//
114// Instruction class templates
115//===----------------------------------------------------------------------===//
116
117let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in {
118// unit-stride load vd, (rs1), vm
119class VUnitStrideLoad<RISCVWidth width, string opcodestr>
120    : RVInstVLU<0b000, width.Value{3}, LUMOPUnitStride, width.Value{2-0},
121                (outs VR:$vd),
122                (ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
123
124let vm = 1, RVVConstraint = NoConstraint in {
125// unit-stride whole register load vl<nf>r.v vd, (rs1)
126class VWholeLoad<bits<3> nf, RISCVWidth width, string opcodestr, RegisterClass VRC>
127    : RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideWholeReg,
128                width.Value{2-0}, (outs VRC:$vd), (ins GPR:$rs1),
129                opcodestr, "$vd, (${rs1})"> {
130  let Uses = [];
131}
132
133// unit-stride mask load vd, (rs1)
134class VUnitStrideLoadMask<string opcodestr>
135    : RVInstVLU<0b000, LSWidth8.Value{3}, LUMOPUnitStrideMask, LSWidth8.Value{2-0},
136                (outs VR:$vd),
137                (ins GPR:$rs1), opcodestr, "$vd, (${rs1})">;
138} // vm = 1, RVVConstraint = NoConstraint
139
140// unit-stride fault-only-first load vd, (rs1), vm
141class VUnitStrideLoadFF<RISCVWidth width, string opcodestr>
142    : RVInstVLU<0b000, width.Value{3}, LUMOPUnitStrideFF, width.Value{2-0},
143                (outs VR:$vd),
144                (ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
145
146// strided load vd, (rs1), rs2, vm
147class VStridedLoad<RISCVWidth width, string opcodestr>
148    : RVInstVLS<0b000, width.Value{3}, width.Value{2-0},
149                (outs VR:$vd),
150                (ins GPR:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr,
151                "$vd, (${rs1}), $rs2$vm">;
152
153// indexed load vd, (rs1), vs2, vm
154class VIndexedLoad<RISCVMOP mop, RISCVWidth width, string opcodestr>
155    : RVInstVLX<0b000, width.Value{3}, mop, width.Value{2-0},
156                (outs VR:$vd),
157                (ins GPR:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr,
158                "$vd, (${rs1}), $vs2$vm">;
159
160// unit-stride segment load vd, (rs1), vm
161class VUnitStrideSegmentLoad<bits<3> nf, RISCVWidth width, string opcodestr>
162    : RVInstVLU<nf, width.Value{3}, LUMOPUnitStride, width.Value{2-0},
163                (outs VR:$vd),
164                (ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
165
166// segment fault-only-first load vd, (rs1), vm
167class VUnitStrideSegmentLoadFF<bits<3> nf, RISCVWidth width, string opcodestr>
168    : RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideFF, width.Value{2-0},
169                (outs VR:$vd),
170                (ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
171
172// strided segment load vd, (rs1), rs2, vm
173class VStridedSegmentLoad<bits<3> nf, RISCVWidth width, string opcodestr>
174    : RVInstVLS<nf, width.Value{3}, width.Value{2-0},
175                (outs VR:$vd),
176                (ins GPR:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr,
177                "$vd, (${rs1}), $rs2$vm">;
178
179// indexed segment load vd, (rs1), vs2, vm
180class VIndexedSegmentLoad<bits<3> nf, RISCVMOP mop, RISCVWidth width,
181                          string opcodestr>
182    : RVInstVLX<nf, width.Value{3}, mop, width.Value{2-0},
183                (outs VR:$vd),
184                (ins GPR:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr,
185                "$vd, (${rs1}), $vs2$vm">;
186} // hasSideEffects = 0, mayLoad = 1, mayStore = 0
187
188let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in {
189// unit-stride store vd, vs3, (rs1), vm
190class VUnitStrideStore<RISCVWidth width, string opcodestr>
191    : RVInstVSU<0b000, width.Value{3}, SUMOPUnitStride, width.Value{2-0},
192                (outs), (ins VR:$vs3, GPR:$rs1, VMaskOp:$vm), opcodestr,
193                "$vs3, (${rs1})$vm">;
194
195let vm = 1 in {
196// vs<nf>r.v vd, (rs1)
197class VWholeStore<bits<3> nf, string opcodestr, RegisterClass VRC>
198    : RVInstVSU<nf, 0, SUMOPUnitStrideWholeReg,
199                0b000, (outs), (ins VRC:$vs3, GPR:$rs1),
200                opcodestr, "$vs3, (${rs1})"> {
201  let Uses = [];
202}
203
204// unit-stride mask store vd, vs3, (rs1)
205class VUnitStrideStoreMask<string opcodestr>
206    : RVInstVSU<0b000, LSWidth8.Value{3}, SUMOPUnitStrideMask, LSWidth8.Value{2-0},
207                (outs), (ins VR:$vs3, GPR:$rs1), opcodestr,
208                "$vs3, (${rs1})">;
209} // vm = 1
210
211// strided store vd, vs3, (rs1), rs2, vm
212class VStridedStore<RISCVWidth width, string opcodestr>
213    : RVInstVSS<0b000, width.Value{3}, width.Value{2-0}, (outs),
214                (ins VR:$vs3, GPR:$rs1, GPR:$rs2, VMaskOp:$vm),
215                opcodestr, "$vs3, (${rs1}), $rs2$vm">;
216
217// indexed store vd, vs3, (rs1), vs2, vm
218class VIndexedStore<RISCVMOP mop, RISCVWidth width, string opcodestr>
219    : RVInstVSX<0b000, width.Value{3}, mop, width.Value{2-0}, (outs),
220                (ins VR:$vs3, GPR:$rs1, VR:$vs2, VMaskOp:$vm),
221                opcodestr, "$vs3, (${rs1}), $vs2$vm">;
222
223// segment store vd, vs3, (rs1), vm
224class VUnitStrideSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr>
225    : RVInstVSU<nf, width.Value{3}, SUMOPUnitStride, width.Value{2-0},
226                (outs), (ins VR:$vs3, GPR:$rs1, VMaskOp:$vm), opcodestr,
227                "$vs3, (${rs1})$vm">;
228
229// segment store vd, vs3, (rs1), rs2, vm
230class VStridedSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr>
231    : RVInstVSS<nf, width.Value{3}, width.Value{2-0}, (outs),
232                (ins VR:$vs3, GPR:$rs1, GPR:$rs2, VMaskOp:$vm),
233                opcodestr, "$vs3, (${rs1}), $rs2$vm">;
234
235// segment store vd, vs3, (rs1), vs2, vm
236class VIndexedSegmentStore<bits<3> nf, RISCVMOP mop, RISCVWidth width,
237                           string opcodestr>
238    : RVInstVSX<nf, width.Value{3}, mop, width.Value{2-0}, (outs),
239                (ins VR:$vs3, GPR:$rs1, VR:$vs2, VMaskOp:$vm),
240                opcodestr, "$vs3, (${rs1}), $vs2$vm">;
241} // hasSideEffects = 0, mayLoad = 0, mayStore = 1
242
243let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
244// op vd, vs2, vs1, vm
245class VALUVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
246    : RVInstVV<funct6, opv, (outs VR:$vd),
247                (ins VR:$vs2, VR:$vs1, VMaskOp:$vm),
248                opcodestr, "$vd, $vs2, $vs1$vm">;
249
250// op vd, vs2, vs1, v0 (without mask, use v0 as carry input)
251class VALUmVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
252    : RVInstVV<funct6, opv, (outs VR:$vd),
253                (ins VR:$vs2, VR:$vs1, VMV0:$v0),
254                opcodestr, "$vd, $vs2, $vs1, v0"> {
255  let vm = 0;
256}
257
258// op vd, vs1, vs2, vm (reverse the order of vs1 and vs2)
259class VALUrVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
260    : RVInstVV<funct6, opv, (outs VR:$vd),
261                (ins VR:$vs1, VR:$vs2, VMaskOp:$vm),
262                opcodestr, "$vd, $vs1, $vs2$vm">;
263
264// op vd, vs2, vs1
265class VALUVVNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr>
266    : RVInstVV<funct6, opv, (outs VR:$vd),
267               (ins VR:$vs2, VR:$vs1),
268               opcodestr, "$vd, $vs2, $vs1"> {
269  let vm = 1;
270}
271
272// op vd, vs2, rs1, vm
273class VALUVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
274    : RVInstVX<funct6, opv, (outs VR:$vd),
275                (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
276                opcodestr, "$vd, $vs2, $rs1$vm">;
277
278// op vd, vs2, rs1, v0 (without mask, use v0 as carry input)
279class VALUmVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
280    : RVInstVX<funct6, opv, (outs VR:$vd),
281                (ins VR:$vs2, GPR:$rs1, VMV0:$v0),
282                opcodestr, "$vd, $vs2, $rs1, v0"> {
283  let vm = 0;
284}
285
286// op vd, rs1, vs2, vm (reverse the order of rs1 and vs2)
287class VALUrVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
288    : RVInstVX<funct6, opv, (outs VR:$vd),
289                (ins GPR:$rs1, VR:$vs2, VMaskOp:$vm),
290                opcodestr, "$vd, $rs1, $vs2$vm">;
291
292// op vd, vs1, vs2
293class VALUVXNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr>
294    : RVInstVX<funct6, opv, (outs VR:$vd),
295               (ins VR:$vs2, GPR:$rs1),
296               opcodestr, "$vd, $vs2, $rs1"> {
297  let vm = 1;
298}
299
300// op vd, vs2, imm, vm
301class VALUVI<bits<6> funct6, string opcodestr, Operand optype = simm5>
302    : RVInstIVI<funct6, (outs VR:$vd),
303                (ins VR:$vs2, optype:$imm, VMaskOp:$vm),
304                opcodestr, "$vd, $vs2, $imm$vm">;
305
306// op vd, vs2, imm, v0 (without mask, use v0 as carry input)
307class VALUmVI<bits<6> funct6, string opcodestr, Operand optype = simm5>
308    : RVInstIVI<funct6, (outs VR:$vd),
309                (ins VR:$vs2, optype:$imm, VMV0:$v0),
310                opcodestr, "$vd, $vs2, $imm, v0"> {
311  let vm = 0;
312}
313
314// op vd, vs2, imm, vm
315class VALUVINoVm<bits<6> funct6, string opcodestr, Operand optype = simm5>
316    : RVInstIVI<funct6, (outs VR:$vd),
317                (ins VR:$vs2, optype:$imm),
318                opcodestr, "$vd, $vs2, $imm"> {
319  let vm = 1;
320}
321
322// op vd, vs2, rs1, vm (Float)
323class VALUVF<bits<6> funct6, RISCVVFormat opv, string opcodestr>
324    : RVInstVX<funct6, opv, (outs VR:$vd),
325                (ins VR:$vs2, FPR32:$rs1, VMaskOp:$vm),
326                opcodestr, "$vd, $vs2, $rs1$vm">;
327
328// op vd, rs1, vs2, vm (Float) (with mask, reverse the order of rs1 and vs2)
329class VALUrVF<bits<6> funct6, RISCVVFormat opv, string opcodestr>
330    : RVInstVX<funct6, opv, (outs VR:$vd),
331                (ins FPR32:$rs1, VR:$vs2, VMaskOp:$vm),
332                opcodestr, "$vd, $rs1, $vs2$vm">;
333
334// op vd, vs2, vm (use vs1 as instruction encoding)
335class VALUVs2<bits<6> funct6, bits<5> vs1, RISCVVFormat opv, string opcodestr>
336    : RVInstV<funct6, vs1, opv, (outs VR:$vd),
337               (ins VR:$vs2, VMaskOp:$vm),
338               opcodestr, "$vd, $vs2$vm">;
339} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
340
341let hasSideEffects = 0, mayLoad = 1, mayStore = 1 in {
342// vamo vd, (rs1), vs2, vd, vm
343class VAMOWd<RISCVAMOOP amoop, RISCVWidth width, string opcodestr>
344    : RVInstVAMO<amoop, width.Value{2-0}, (outs VR:$vd_wd),
345            (ins GPR:$rs1, VR:$vs2, VR:$vd, VMaskOp:$vm),
346            opcodestr, "$vd_wd, (${rs1}), $vs2, $vd$vm"> {
347    let Constraints = "$vd_wd = $vd";
348    let wd = 1;
349    bits<5> vd;
350    let Inst{11-7} = vd;
351}
352
353// vamo x0, (rs1), vs2, vs3, vm
354class VAMONoWd<RISCVAMOOP amoop, RISCVWidth width, string opcodestr>
355    : RVInstVAMO<amoop, width.Value{2-0}, (outs),
356            (ins GPR:$rs1, VR:$vs2, VR:$vs3, VMaskOp:$vm),
357            opcodestr, "x0, (${rs1}), $vs2, $vs3$vm"> {
358    bits<5> vs3;
359    let Inst{11-7} = vs3;
360}
361
362} // hasSideEffects = 0, mayLoad = 1, mayStore = 1
363
364//===----------------------------------------------------------------------===//
365// Combination of instruction classes.
366// Use these multiclasses to define instructions more easily.
367//===----------------------------------------------------------------------===//
368multiclass VALU_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
369  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
370           Sched<[WriteVIALUV, ReadVIALUV, ReadVIALUV, ReadVMask]>;
371  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
372           Sched<[WriteVIALUX, ReadVIALUV, ReadVIALUX, ReadVMask]>;
373  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
374           Sched<[WriteVIALUI, ReadVIALUV, ReadVMask]>;
375}
376
377multiclass VALU_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
378  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
379           Sched<[WriteVIALUV, ReadVIALUV, ReadVIALUV, ReadVMask]>;
380  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
381           Sched<[WriteVIALUX, ReadVIALUV, ReadVIALUX, ReadVMask]>;
382}
383
384multiclass VALU_IV_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
385  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
386           Sched<[WriteVIALUV, ReadVIALUV, ReadVIALUX, ReadVMask]>;
387  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
388           Sched<[WriteVIALUI, ReadVIALUV, ReadVMask]>;
389}
390
391multiclass VALU_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
392  def V  : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
393           Sched<[WriteVIWALUV, ReadVIWALUV, ReadVIWALUV, ReadVMask]>;
394  def X  : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
395           Sched<[WriteVIWALUX, ReadVIWALUV, ReadVIWALUX, ReadVMask]>;
396}
397
398multiclass VMAC_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
399  def V : VALUrVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
400          Sched<[WriteVIMulAddV, ReadVIMulAddV, ReadVIMulAddV, ReadVMask]>;
401  def X : VALUrVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
402          Sched<[WriteVIMulAddX, ReadVIMulAddV, ReadVIMulAddX, ReadVMask]>;
403}
404
405multiclass VWMAC_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
406  def V : VALUrVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
407          Sched<[WriteVIWMulAddV, ReadVIWMulAddV, ReadVIWMulAddV, ReadVMask]>;
408  def X : VALUrVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
409          Sched<[WriteVIWMulAddX, ReadVIWMulAddV, ReadVIWMulAddX, ReadVMask]>;
410}
411
412multiclass VWMAC_MV_X<string opcodestr, bits<6> funct6, string vw = "v"> {
413  def X : VALUrVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
414          Sched<[WriteVIWMulAddX, ReadVIWMulAddV, ReadVIWMulAddX, ReadVMask]>;
415}
416
417multiclass VALU_MV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
418  def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>,
419           Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
420}
421
422multiclass VALUm_IV_V_X_I<string opcodestr, bits<6> funct6> {
423  def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">,
424           Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV, ReadVMask]>;
425  def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">,
426           Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX, ReadVMask]>;
427  def IM : VALUmVI<funct6, opcodestr # ".vim">,
428           Sched<[WriteVICALUI, ReadVIALUCV, ReadVMask]>;
429}
430
431multiclass VMRG_IV_V_X_I<string opcodestr, bits<6> funct6> {
432  def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">,
433           Sched<[WriteVIMergeV, ReadVIMergeV, ReadVIMergeV, ReadVMask]>;
434  def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">,
435           Sched<[WriteVIMergeX, ReadVIMergeV, ReadVIMergeX, ReadVMask]>;
436  def IM : VALUmVI<funct6, opcodestr # ".vim">,
437           Sched<[WriteVIMergeI, ReadVIMergeV, ReadVMask]>;
438}
439
440multiclass VALUm_IV_V_X<string opcodestr, bits<6> funct6> {
441  def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">,
442           Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV, ReadVMask]>;
443  def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">,
444           Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX, ReadVMask]>;
445}
446
447multiclass VALUNoVm_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5> {
448  def V : VALUVVNoVm<funct6, OPIVV, opcodestr # ".vv">,
449          Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV]>;
450  def X : VALUVXNoVm<funct6, OPIVX, opcodestr # ".vx">,
451          Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX]>;
452  def I : VALUVINoVm<funct6, opcodestr # ".vi", optype>,
453          Sched<[WriteVICALUI, ReadVIALUCV]>;
454}
455
456multiclass VALUNoVm_IV_V_X<string opcodestr, bits<6> funct6> {
457  def V : VALUVVNoVm<funct6, OPIVV, opcodestr # ".vv">,
458          Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV]>;
459  def X : VALUVXNoVm<funct6, OPIVX, opcodestr # ".vx">,
460          Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX]>;
461}
462
463multiclass VALU_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
464  def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
465          Sched<[WriteVFALUV, ReadVFALUV, ReadVFALUV, ReadVMask]>;
466  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
467          Sched<[WriteVFALUF, ReadVFALUV, ReadVFALUF, ReadVMask]>;
468}
469
470multiclass VALU_FV_F<string opcodestr, bits<6> funct6, string vw = "v"> {
471  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
472          Sched<[WriteVFALUF, ReadVFALUV, ReadVFALUF, ReadVMask]>;
473}
474
475multiclass VWALU_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
476  def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
477          Sched<[WriteVFWALUV, ReadVFWALUV, ReadVFWALUV, ReadVMask]>;
478  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
479          Sched<[WriteVFWALUF, ReadVFWALUV, ReadVFWALUF, ReadVMask]>;
480}
481
482multiclass VMUL_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
483  def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
484          Sched<[WriteVFMulV, ReadVFMulV, ReadVFMulV, ReadVMask]>;
485  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
486          Sched<[WriteVFMulF, ReadVFMulV, ReadVFMulF, ReadVMask]>;
487}
488
489multiclass VDIV_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
490  def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
491          Sched<[WriteVFDivV, ReadVFDivV, ReadVFDivV, ReadVMask]>;
492  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
493          Sched<[WriteVFDivF, ReadVFDivV, ReadVFDivF, ReadVMask]>;
494}
495
496multiclass VRDIV_FV_F<string opcodestr, bits<6> funct6, string vw = "v"> {
497  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
498          Sched<[WriteVFDivF, ReadVFDivV, ReadVFDivF, ReadVMask]>;
499}
500
501multiclass VWMUL_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
502  def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
503          Sched<[WriteVFWMulV, ReadVFWMulV, ReadVFWMulV, ReadVMask]>;
504  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
505          Sched<[WriteVFWMulF, ReadVFWMulV, ReadVFWMulF, ReadVMask]>;
506}
507
508multiclass VMAC_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
509  def V : VALUrVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
510          Sched<[WriteVFMulAddV, ReadVFMulAddV, ReadVFMulAddV, ReadVMask]>;
511  def F : VALUrVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
512          Sched<[WriteVFMulAddF, ReadVFMulAddV, ReadVFMulAddF, ReadVMask]>;
513}
514
515multiclass VWMAC_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
516  def V : VALUrVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
517          Sched<[WriteVFWMulAddV, ReadVFWMulAddV, ReadVFWMulAddV, ReadVMask]>;
518  def F : VALUrVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
519          Sched<[WriteVFWMulAddF, ReadVFWMulAddV, ReadVFWMulAddF, ReadVMask]>;
520}
521
522multiclass VSQR_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
523  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
524           Sched<[WriteVFSqrtV, ReadVFSqrtV, ReadVMask]>;
525}
526
527multiclass VRCP_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
528  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
529           Sched<[WriteVFRecpV, ReadVFRecpV, ReadVMask]>;
530}
531
532multiclass VCMP_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
533  def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
534          Sched<[WriteVFCmpV, ReadVFCmpV, ReadVFCmpV, ReadVMask]>;
535  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
536          Sched<[WriteVFCmpF, ReadVFCmpV, ReadVFCmpF, ReadVMask]>;
537}
538
539multiclass VCMP_FV_F<string opcodestr, bits<6> funct6, string vw = "v"> {
540  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
541          Sched<[WriteVFCmpF, ReadVFCmpV, ReadVFCmpF, ReadVMask]>;
542}
543
544multiclass VSGNJ_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
545  def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
546          Sched<[WriteVFSgnjV, ReadVFSgnjV, ReadVFSgnjV, ReadVMask]>;
547  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
548          Sched<[WriteVFSgnjF, ReadVFSgnjV, ReadVFSgnjF, ReadVMask]>;
549}
550
551multiclass VCLS_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
552  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
553           Sched<[WriteVFClassV, ReadVFClassV, ReadVMask]>;
554}
555
556multiclass VCVTF_IV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
557  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
558           Sched<[WriteVFCvtIToFV, ReadVFCvtIToFV, ReadVMask]>;
559}
560
561multiclass VCVTI_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
562  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
563           Sched<[WriteVFCvtFToIV, ReadVFCvtFToIV, ReadVMask]>;
564}
565
566multiclass VWCVTF_IV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
567  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
568           Sched<[WriteVFWCvtIToFV, ReadVFWCvtIToFV, ReadVMask]>;
569}
570
571multiclass VWCVTI_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
572  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
573           Sched<[WriteVFWCvtFToIV, ReadVFWCvtFToIV, ReadVMask]>;
574}
575
576multiclass VWCVTF_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
577  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
578           Sched<[WriteVFWCvtFToFV, ReadVFWCvtFToFV, ReadVMask]>;
579}
580
581multiclass VNCVTF_IV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
582  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
583           Sched<[WriteVFNCvtIToFV, ReadVFNCvtIToFV, ReadVMask]>;
584}
585
586multiclass VNCVTI_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
587  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
588           Sched<[WriteVFNCvtFToIV, ReadVFNCvtFToIV, ReadVMask]>;
589}
590
591multiclass VNCVTF_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
592  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
593           Sched<[WriteVFNCvtFToFV, ReadVFNCvtFToFV, ReadVMask]>;
594}
595
596multiclass VRED_MV_V<string opcodestr, bits<6> funct6> {
597  def _VS : VALUVV<funct6, OPMVV, opcodestr # ".vs">,
598            Sched<[WriteVIRedV, ReadVIRedV, ReadVIRedV0, ReadVMask]>;
599}
600
601multiclass VWRED_IV_V<string opcodestr, bits<6> funct6> {
602  def _VS : VALUVV<funct6, OPIVV, opcodestr # ".vs">,
603            Sched<[WriteVIWRedV, ReadVIWRedV, ReadVIWRedV0, ReadVMask]>;
604}
605
606multiclass VRED_FV_V<string opcodestr, bits<6> funct6> {
607  def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
608            Sched<[WriteVFRedV, ReadVFRedV, ReadVFRedV0, ReadVMask]>;
609}
610
611multiclass VREDO_FV_V<string opcodestr, bits<6> funct6> {
612  def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
613            Sched<[WriteVFRedOV, ReadVFRedOV, ReadVFRedOV0, ReadVMask]>;
614}
615
616multiclass VWRED_FV_V<string opcodestr, bits<6> funct6> {
617  def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
618            Sched<[WriteVFWRedV, ReadVFWRedV, ReadVFWRedV0, ReadVMask]>;
619}
620
621multiclass VWREDO_FV_V<string opcodestr, bits<6> funct6> {
622  def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
623            Sched<[WriteVFWRedOV, ReadVFWRedOV, ReadVFWRedOV0, ReadVMask]>;
624}
625
626multiclass VMALU_MV_Mask<string opcodestr, bits<6> funct6, string vm = "v"> {
627  def M : VALUVVNoVm<funct6, OPMVV, opcodestr # "." # vm # "m">,
628          Sched<[WriteVMALUV, ReadVMALUV, ReadVMALUV]>;
629}
630
631multiclass VMSFS_MV_V<string opcodestr, bits<6> funct6, bits<5> vs1> {
632  def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>,
633           Sched<[WriteVMSFSV, ReadVMSFSV, ReadVMask]>;
634}
635
636multiclass VMIOT_MV_V<string opcodestr, bits<6> funct6, bits<5> vs1> {
637  def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>,
638           Sched<[WriteVMIotV, ReadVMIotV, ReadVMask]>;
639}
640
641multiclass VSHT_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
642  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
643           Sched<[WriteVShiftV, ReadVShiftV, ReadVShiftV, ReadVMask]>;
644  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
645           Sched<[WriteVShiftX, ReadVShiftV, ReadVShiftX, ReadVMask]>;
646  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
647           Sched<[WriteVShiftI, ReadVShiftV, ReadVMask]>;
648}
649
650multiclass VNSHT_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
651  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
652           Sched<[WriteVNShiftV, ReadVNShiftV, ReadVNShiftV, ReadVMask]>;
653  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
654           Sched<[WriteVNShiftX, ReadVNShiftV, ReadVNShiftX, ReadVMask]>;
655  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
656           Sched<[WriteVNShiftI, ReadVNShiftV, ReadVMask]>;
657}
658
659multiclass VCMP_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
660  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
661           Sched<[WriteVICmpV, ReadVICmpV, ReadVICmpV, ReadVMask]>;
662  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
663           Sched<[WriteVICmpX, ReadVICmpV, ReadVICmpX, ReadVMask]>;
664  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
665           Sched<[WriteVICmpI, ReadVICmpV, ReadVMask]>;
666}
667
668multiclass VCMP_IV_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
669  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
670           Sched<[WriteVICmpV, ReadVICmpV, ReadVICmpX, ReadVMask]>;
671  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
672           Sched<[WriteVICmpI, ReadVICmpV, ReadVMask]>;
673}
674
675multiclass VCMP_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
676  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
677           Sched<[WriteVICmpV, ReadVICmpV, ReadVICmpV, ReadVMask]>;
678  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
679           Sched<[WriteVICmpX, ReadVICmpV, ReadVICmpX, ReadVMask]>;
680}
681
682multiclass VMUL_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
683  def V  : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
684           Sched<[WriteVIMulV, ReadVIMulV, ReadVIMulV, ReadVMask]>;
685  def X  : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
686           Sched<[WriteVIMulX, ReadVIMulV, ReadVIMulX, ReadVMask]>;
687}
688
689multiclass VWMUL_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
690  def V  : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
691           Sched<[WriteVIWMulV, ReadVIWMulV, ReadVIWMulV, ReadVMask]>;
692  def X  : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
693           Sched<[WriteVIWMulX, ReadVIWMulV, ReadVIWMulX, ReadVMask]>;
694}
695
696multiclass VDIV_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
697  def V  : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
698           Sched<[WriteVIDivV, ReadVIDivV, ReadVIDivV, ReadVMask]>;
699  def X  : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
700           Sched<[WriteVIDivX, ReadVIDivV, ReadVIDivX, ReadVMask]>;
701}
702
703multiclass VSALU_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
704  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
705           Sched<[WriteVSALUV, ReadVSALUV, ReadVSALUV, ReadVMask]>;
706  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
707           Sched<[WriteVSALUX, ReadVSALUV, ReadVSALUX, ReadVMask]>;
708  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
709           Sched<[WriteVSALUI, ReadVSALUV, ReadVMask]>;
710}
711
712multiclass VSALU_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
713  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
714           Sched<[WriteVSALUV, ReadVSALUV, ReadVSALUV, ReadVMask]>;
715  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
716           Sched<[WriteVSALUX, ReadVSALUV, ReadVSALUX, ReadVMask]>;
717}
718
719multiclass VAALU_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
720  def V  : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
721           Sched<[WriteVAALUV, ReadVAALUV, ReadVAALUV, ReadVMask]>;
722  def X  : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
723           Sched<[WriteVAALUX, ReadVAALUV, ReadVAALUX, ReadVMask]>;
724}
725
726multiclass VSMUL_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
727  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
728           Sched<[WriteVSMulV, ReadVSMulV, ReadVSMulV, ReadVMask]>;
729  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
730           Sched<[WriteVSMulX, ReadVSMulV, ReadVSMulX, ReadVMask]>;
731}
732
733multiclass VSSHF_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
734  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
735           Sched<[WriteVSShiftV, ReadVSShiftV, ReadVSShiftV, ReadVMask]>;
736  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
737           Sched<[WriteVSShiftX, ReadVSShiftV, ReadVSShiftX, ReadVMask]>;
738  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
739           Sched<[WriteVSShiftI, ReadVSShiftV, ReadVMask]>;
740}
741
742multiclass VNCLP_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
743  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
744           Sched<[WriteVNClipV, ReadVNClipV, ReadVNClipV, ReadVMask]>;
745  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
746           Sched<[WriteVNClipX, ReadVNClipV, ReadVNClipX, ReadVMask]>;
747  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
748           Sched<[WriteVNClipI, ReadVNClipV, ReadVMask]>;
749}
750
751multiclass VSLD_IV_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
752  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
753           Sched<[WriteVISlideX, ReadVISlideV, ReadVISlideX, ReadVMask]>;
754  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
755           Sched<[WriteVISlideI, ReadVISlideV, ReadVMask]>;
756}
757
758multiclass VSLD1_MV_X<string opcodestr, bits<6> funct6, string vw = "v"> {
759  def X  : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
760           Sched<[WriteVISlide1X, ReadVISlideV, ReadVISlideX, ReadVMask]>;
761}
762
763multiclass VSLD1_FV_F<string opcodestr, bits<6> funct6, string vw = "v"> {
764  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
765          Sched<[WriteVFSlide1F, ReadVFSlideV, ReadVFSlideF, ReadVMask]>;
766}
767
768multiclass VGTR_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
769  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
770           Sched<[WriteVGatherV, ReadVGatherV, ReadVGatherV, ReadVMask]>;
771  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
772           Sched<[WriteVGatherX, ReadVGatherV, ReadVGatherX, ReadVMask]>;
773  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
774           Sched<[WriteVGatherI, ReadVGatherV, ReadVMask]>;
775}
776
777multiclass VCPR_MV_Mask<string opcodestr, bits<6> funct6, string vm = "v"> {
778  def M  : VALUVVNoVm<funct6, OPMVV, opcodestr # "." # vm # "m">,
779           Sched<[WriteVCompressV, ReadVCompressV, ReadVCompressV]>;
780}
781
782multiclass VAMO<RISCVAMOOP amoop, RISCVWidth width, string opcodestr> {
783  def _WD : VAMOWd<amoop, width, opcodestr>;
784  def _UNWD : VAMONoWd<amoop, width, opcodestr>;
785}
786
787multiclass VWholeLoadN<bits<3> nf, string opcodestr, RegisterClass VRC> {
788  foreach l = [8, 16, 32, 64] in {
789    defvar w = !cast<RISCVWidth>("LSWidth" # l);
790    defvar s = !cast<SchedWrite>("WriteVLD" # !add(nf, 1) # "R" # l);
791
792    def E # l # _V : VWholeLoad<nf, w, opcodestr # "e" # l # ".v", VRC>,
793                     Sched<[s, ReadVLDX]>;
794  }
795}
796
797//===----------------------------------------------------------------------===//
798// Instructions
799//===----------------------------------------------------------------------===//
800
801let Predicates = [HasStdExtV] in {
802let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in {
803def VSETVLI : RVInstSetVLi<(outs GPR:$rd), (ins GPR:$rs1, VTypeIOp:$vtypei),
804                           "vsetvli", "$rd, $rs1, $vtypei">;
805
806def VSETIVLI : RVInstSetiVLi<(outs GPR:$rd), (ins uimm5:$uimm, VTypeIOp:$vtypei),
807                             "vsetivli", "$rd, $uimm, $vtypei">;
808
809def VSETVL : RVInstSetVL<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2),
810                         "vsetvl", "$rd, $rs1, $rs2">;
811} // hasSideEffects = 1, mayLoad = 0, mayStore = 0
812foreach eew = [8, 16, 32, 64] in {
813  defvar w = !cast<RISCVWidth>("LSWidth" # eew);
814
815  // Vector Unit-Stride Instructions
816  def VLE#eew#_V : VUnitStrideLoad<w, "vle"#eew#".v">, VLESched<eew>;
817  def VSE#eew#_V  : VUnitStrideStore<w,  "vse"#eew#".v">, VSESched<eew>;
818
819  // Vector Unit-Stride Fault-only-First Loads
820  def VLE#eew#FF_V : VUnitStrideLoadFF<w,  "vle"#eew#"ff.v">, VLFSched<eew>;
821
822  // Vector Strided Instructions
823  def VLSE#eew#_V  : VStridedLoad<w,  "vlse"#eew#".v">, VLSSched<eew>;
824  def VSSE#eew#_V  : VStridedStore<w,  "vsse"#eew#".v">, VSSSched<eew>;
825
826  // Vector Indexed Instructions
827  def VLUXEI#eew#_V :
828    VIndexedLoad<MOPLDIndexedUnord, w, "vluxei"#eew#".v">, VLXSched<eew, "U">;
829  def VLOXEI#eew#_V :
830    VIndexedLoad<MOPLDIndexedOrder, w, "vloxei"#eew#".v">, VLXSched<eew, "O">;
831  def VSUXEI#eew#_V :
832    VIndexedStore<MOPSTIndexedUnord, w, "vsuxei"#eew#".v">, VSXSched<eew, "U">;
833  def VSOXEI#eew#_V :
834    VIndexedStore<MOPSTIndexedOrder, w, "vsoxei"#eew#".v">, VSXSched<eew, "O">;
835}
836
837def VLM_V : VUnitStrideLoadMask<"vlm.v">,
838             Sched<[WriteVLDM, ReadVLDX]>;
839def VSM_V : VUnitStrideStoreMask<"vsm.v">,
840             Sched<[WriteVSTM, ReadVSTM, ReadVSTX]>;
841def : InstAlias<"vle1.v $vd, (${rs1})",
842                (VLM_V VR:$vd, GPR:$rs1), 0>;
843def : InstAlias<"vse1.v $vs3, (${rs1})",
844                (VSM_V VR:$vs3, GPR:$rs1), 0>;
845
846defm VL1R : VWholeLoadN<0, "vl1r", VR>;
847defm VL2R : VWholeLoadN<1, "vl2r", VRM2>;
848defm VL4R : VWholeLoadN<3, "vl4r", VRM4>;
849defm VL8R : VWholeLoadN<7, "vl8r", VRM8>;
850
851def : InstAlias<"vl1r.v $vd, (${rs1})", (VL1RE8_V VR:$vd, GPR:$rs1)>;
852def : InstAlias<"vl2r.v $vd, (${rs1})", (VL2RE8_V VRM2:$vd, GPR:$rs1)>;
853def : InstAlias<"vl4r.v $vd, (${rs1})", (VL4RE8_V VRM4:$vd, GPR:$rs1)>;
854def : InstAlias<"vl8r.v $vd, (${rs1})", (VL8RE8_V VRM8:$vd, GPR:$rs1)>;
855
856def VS1R_V : VWholeStore<0, "vs1r.v", VR>,
857             Sched<[WriteVST1R, ReadVST1R, ReadVSTX]>;
858def VS2R_V : VWholeStore<1, "vs2r.v", VRM2>,
859             Sched<[WriteVST2R, ReadVST2R, ReadVSTX]>;
860def VS4R_V : VWholeStore<3, "vs4r.v", VRM4>,
861             Sched<[WriteVST4R, ReadVST4R, ReadVSTX]>;
862def VS8R_V : VWholeStore<7, "vs8r.v", VRM8>,
863             Sched<[WriteVST8R, ReadVST8R, ReadVSTX]>;
864
865// Vector Single-Width Integer Add and Subtract
866defm VADD_V : VALU_IV_V_X_I<"vadd", 0b000000>;
867defm VSUB_V : VALU_IV_V_X<"vsub", 0b000010>;
868defm VRSUB_V : VALU_IV_X_I<"vrsub", 0b000011>;
869
870def : InstAlias<"vneg.v $vd, $vs$vm", (VRSUB_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
871
872// Vector Widening Integer Add/Subtract
873// Refer to 11.2 Widening Vector Arithmetic Instructions
874// The destination vector register group cannot overlap a source vector
875// register group of a different element width (including the mask register
876// if masked), otherwise an illegal instruction exception is raised.
877let Constraints = "@earlyclobber $vd" in {
878let RVVConstraint = WidenV in {
879defm VWADDU_V : VALU_MV_V_X<"vwaddu", 0b110000>;
880defm VWSUBU_V : VALU_MV_V_X<"vwsubu", 0b110010>;
881defm VWADD_V : VALU_MV_V_X<"vwadd", 0b110001>;
882defm VWSUB_V : VALU_MV_V_X<"vwsub", 0b110011>;
883} // RVVConstraint = WidenV
884// Set earlyclobber for following instructions for second and mask operands.
885// This has the downside that the earlyclobber constraint is too coarse and
886// will impose unnecessary restrictions by not allowing the destination to
887// overlap with the first (wide) operand.
888let RVVConstraint = WidenW in {
889defm VWADDU_W : VALU_MV_V_X<"vwaddu", 0b110100, "w">;
890defm VWSUBU_W : VALU_MV_V_X<"vwsubu", 0b110110, "w">;
891defm VWADD_W : VALU_MV_V_X<"vwadd", 0b110101, "w">;
892defm VWSUB_W : VALU_MV_V_X<"vwsub", 0b110111, "w">;
893} // RVVConstraint = WidenW
894} // Constraints = "@earlyclobber $vd"
895
896def : InstAlias<"vwcvt.x.x.v $vd, $vs$vm",
897                (VWADD_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
898def : InstAlias<"vwcvtu.x.x.v $vd, $vs$vm",
899                (VWADDU_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
900
901// Vector Integer Extension
902defm VZEXT_VF8 : VALU_MV_VS2<"vzext.vf8", 0b010010, 0b00010>;
903defm VSEXT_VF8 : VALU_MV_VS2<"vsext.vf8", 0b010010, 0b00011>;
904defm VZEXT_VF4 : VALU_MV_VS2<"vzext.vf4", 0b010010, 0b00100>;
905defm VSEXT_VF4 : VALU_MV_VS2<"vsext.vf4", 0b010010, 0b00101>;
906defm VZEXT_VF2 : VALU_MV_VS2<"vzext.vf2", 0b010010, 0b00110>;
907defm VSEXT_VF2 : VALU_MV_VS2<"vsext.vf2", 0b010010, 0b00111>;
908
909// Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
910defm VADC_V : VALUm_IV_V_X_I<"vadc", 0b010000>;
911let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
912defm VMADC_V : VALUm_IV_V_X_I<"vmadc", 0b010001>;
913defm VMADC_V : VALUNoVm_IV_V_X_I<"vmadc", 0b010001>;
914} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
915defm VSBC_V : VALUm_IV_V_X<"vsbc", 0b010010>;
916let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
917defm VMSBC_V : VALUm_IV_V_X<"vmsbc", 0b010011>;
918defm VMSBC_V : VALUNoVm_IV_V_X<"vmsbc", 0b010011>;
919} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
920
921// Vector Bitwise Logical Instructions
922defm VAND_V : VALU_IV_V_X_I<"vand", 0b001001>;
923defm VOR_V : VALU_IV_V_X_I<"vor", 0b001010>;
924defm VXOR_V : VALU_IV_V_X_I<"vxor", 0b001011>;
925
926def : InstAlias<"vnot.v $vd, $vs$vm",
927                (VXOR_VI VR:$vd, VR:$vs, -1, VMaskOp:$vm)>;
928
929// Vector Single-Width Bit Shift Instructions
930defm VSLL_V : VSHT_IV_V_X_I<"vsll", 0b100101, uimm5>;
931defm VSRL_V : VSHT_IV_V_X_I<"vsrl", 0b101000, uimm5>;
932defm VSRA_V : VSHT_IV_V_X_I<"vsra", 0b101001, uimm5>;
933
934// Vector Narrowing Integer Right Shift Instructions
935// Refer to 11.3. Narrowing Vector Arithmetic Instructions
936// The destination vector register group cannot overlap the first source
937// vector register group (specified by vs2). The destination vector register
938// group cannot overlap the mask register if used, unless LMUL=1.
939let Constraints = "@earlyclobber $vd" in {
940defm VNSRL_W : VNSHT_IV_V_X_I<"vnsrl", 0b101100, uimm5, "w">;
941defm VNSRA_W : VNSHT_IV_V_X_I<"vnsra", 0b101101, uimm5, "w">;
942} // Constraints = "@earlyclobber $vd"
943
944def : InstAlias<"vncvt.x.x.w $vd, $vs$vm",
945                (VNSRL_WX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
946
947// Vector Integer Comparison Instructions
948let RVVConstraint = NoConstraint in {
949defm VMSEQ_V : VCMP_IV_V_X_I<"vmseq", 0b011000>;
950defm VMSNE_V : VCMP_IV_V_X_I<"vmsne", 0b011001>;
951defm VMSLTU_V : VCMP_IV_V_X<"vmsltu", 0b011010>;
952defm VMSLT_V : VCMP_IV_V_X<"vmslt", 0b011011>;
953defm VMSLEU_V : VCMP_IV_V_X_I<"vmsleu", 0b011100>;
954defm VMSLE_V : VCMP_IV_V_X_I<"vmsle", 0b011101>;
955defm VMSGTU_V : VCMP_IV_X_I<"vmsgtu", 0b011110>;
956defm VMSGT_V : VCMP_IV_X_I<"vmsgt", 0b011111>;
957} // RVVConstraint = NoConstraint
958
959def : InstAlias<"vmsgtu.vv $vd, $va, $vb$vm",
960                (VMSLTU_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
961def : InstAlias<"vmsgt.vv $vd, $va, $vb$vm",
962                (VMSLT_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
963def : InstAlias<"vmsgeu.vv $vd, $va, $vb$vm",
964                (VMSLEU_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
965def : InstAlias<"vmsge.vv $vd, $va, $vb$vm",
966                (VMSLE_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
967
968let isCodeGenOnly = 0, isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 0,
969    mayStore = 0 in {
970// For unsigned comparisons we need to special case 0 immediate to maintain
971// the always true/false semantics we would invert if we just decremented the
972// immediate like we do for signed. To match the GNU assembler we will use
973// vmseq/vmsne.vv with the same register for both operands which we can't do
974// from an InstAlias.
975def PseudoVMSGEU_VI : Pseudo<(outs VR:$vd),
976                             (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
977                             [], "vmsgeu.vi", "$vd, $vs2, $imm$vm">;
978def PseudoVMSLTU_VI : Pseudo<(outs VR:$vd),
979                             (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
980                             [], "vmsltu.vi", "$vd, $vs2, $imm$vm">;
981// Handle signed with pseudos as well for more consistency in the
982// implementation.
983def PseudoVMSGE_VI : Pseudo<(outs VR:$vd),
984                            (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
985                            [], "vmsge.vi", "$vd, $vs2, $imm$vm">;
986def PseudoVMSLT_VI : Pseudo<(outs VR:$vd),
987                            (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
988                            [], "vmslt.vi", "$vd, $vs2, $imm$vm">;
989}
990
991let isCodeGenOnly = 0, isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 0,
992    mayStore = 0 in {
993def PseudoVMSGEU_VX : Pseudo<(outs VR:$vd),
994                             (ins VR:$vs2, GPR:$rs1),
995                             [], "vmsgeu.vx", "$vd, $vs2, $rs1">;
996def PseudoVMSGE_VX : Pseudo<(outs VR:$vd),
997                            (ins VR:$vs2, GPR:$rs1),
998                            [], "vmsge.vx", "$vd, $vs2, $rs1">;
999def PseudoVMSGEU_VX_M : Pseudo<(outs VRNoV0:$vd),
1000                               (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
1001                               [], "vmsgeu.vx", "$vd, $vs2, $rs1$vm">;
1002def PseudoVMSGE_VX_M : Pseudo<(outs VRNoV0:$vd),
1003                              (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
1004                              [], "vmsge.vx", "$vd, $vs2, $rs1$vm">;
1005def PseudoVMSGEU_VX_M_T : Pseudo<(outs VR:$vd, VRNoV0:$scratch),
1006                                 (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
1007                                 [], "vmsgeu.vx", "$vd, $vs2, $rs1$vm, $scratch">;
1008def PseudoVMSGE_VX_M_T : Pseudo<(outs VR:$vd, VRNoV0:$scratch),
1009                                (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
1010                                [], "vmsge.vx", "$vd, $vs2, $rs1$vm, $scratch">;
1011}
1012
1013// Vector Integer Min/Max Instructions
1014defm VMINU_V : VCMP_IV_V_X<"vminu", 0b000100>;
1015defm VMIN_V : VCMP_IV_V_X<"vmin", 0b000101>;
1016defm VMAXU_V : VCMP_IV_V_X<"vmaxu", 0b000110>;
1017defm VMAX_V : VCMP_IV_V_X<"vmax", 0b000111>;
1018
1019// Vector Single-Width Integer Multiply Instructions
1020defm VMUL_V : VMUL_MV_V_X<"vmul", 0b100101>;
1021defm VMULH_V : VMUL_MV_V_X<"vmulh", 0b100111>;
1022defm VMULHU_V : VMUL_MV_V_X<"vmulhu", 0b100100>;
1023defm VMULHSU_V : VMUL_MV_V_X<"vmulhsu", 0b100110>;
1024
1025// Vector Integer Divide Instructions
1026defm VDIVU_V : VDIV_MV_V_X<"vdivu", 0b100000>;
1027defm VDIV_V : VDIV_MV_V_X<"vdiv", 0b100001>;
1028defm VREMU_V : VDIV_MV_V_X<"vremu", 0b100010>;
1029defm VREM_V : VDIV_MV_V_X<"vrem", 0b100011>;
1030
1031// Vector Widening Integer Multiply Instructions
1032let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
1033defm VWMUL_V : VWMUL_MV_V_X<"vwmul", 0b111011>;
1034defm VWMULU_V : VWMUL_MV_V_X<"vwmulu", 0b111000>;
1035defm VWMULSU_V : VWMUL_MV_V_X<"vwmulsu", 0b111010>;
1036} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
1037
1038// Vector Single-Width Integer Multiply-Add Instructions
1039defm VMACC_V : VMAC_MV_V_X<"vmacc", 0b101101>;
1040defm VNMSAC_V : VMAC_MV_V_X<"vnmsac", 0b101111>;
1041defm VMADD_V : VMAC_MV_V_X<"vmadd", 0b101001>;
1042defm VNMSUB_V : VMAC_MV_V_X<"vnmsub", 0b101011>;
1043
1044// Vector Widening Integer Multiply-Add Instructions
1045let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
1046defm VWMACCU_V : VWMAC_MV_V_X<"vwmaccu", 0b111100>;
1047defm VWMACC_V : VWMAC_MV_V_X<"vwmacc", 0b111101>;
1048defm VWMACCSU_V : VWMAC_MV_V_X<"vwmaccsu", 0b111111>;
1049defm VWMACCUS_V : VWMAC_MV_X<"vwmaccus", 0b111110>;
1050} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
1051
1052// Vector Integer Merge Instructions
1053defm VMERGE_V : VMRG_IV_V_X_I<"vmerge", 0b010111>;
1054
1055// Vector Integer Move Instructions
1056let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vs2 = 0, vm = 1,
1057    RVVConstraint = NoConstraint  in {
1058// op vd, vs1
1059def VMV_V_V : RVInstVV<0b010111, OPIVV, (outs VR:$vd),
1060                       (ins VR:$vs1), "vmv.v.v", "$vd, $vs1">,
1061              Sched<[WriteVIMovV, ReadVIMovV]>;
1062// op vd, rs1
1063def VMV_V_X : RVInstVX<0b010111, OPIVX, (outs VR:$vd),
1064                       (ins GPR:$rs1), "vmv.v.x", "$vd, $rs1">,
1065              Sched<[WriteVIMovX, ReadVIMovX]>;
1066// op vd, imm
1067def VMV_V_I : RVInstIVI<0b010111, (outs VR:$vd),
1068                       (ins simm5:$imm), "vmv.v.i", "$vd, $imm">,
1069              Sched<[WriteVIMovI]>;
1070} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1071
1072// Vector Fixed-Point Arithmetic Instructions
1073defm VSADDU_V : VSALU_IV_V_X_I<"vsaddu", 0b100000>;
1074defm VSADD_V : VSALU_IV_V_X_I<"vsadd", 0b100001>;
1075defm VSSUBU_V : VSALU_IV_V_X<"vssubu", 0b100010>;
1076defm VSSUB_V : VSALU_IV_V_X<"vssub", 0b100011>;
1077
1078// Vector Single-Width Averaging Add and Subtract
1079defm VAADDU_V : VAALU_MV_V_X<"vaaddu", 0b001000>;
1080defm VAADD_V : VAALU_MV_V_X<"vaadd", 0b001001>;
1081defm VASUBU_V : VAALU_MV_V_X<"vasubu", 0b001010>;
1082defm VASUB_V : VAALU_MV_V_X<"vasub", 0b001011>;
1083
1084// Vector Single-Width Fractional Multiply with Rounding and Saturation
1085defm VSMUL_V : VSMUL_IV_V_X<"vsmul", 0b100111>;
1086
1087// Vector Single-Width Scaling Shift Instructions
1088defm VSSRL_V : VSSHF_IV_V_X_I<"vssrl", 0b101010, uimm5>;
1089defm VSSRA_V : VSSHF_IV_V_X_I<"vssra", 0b101011, uimm5>;
1090
1091// Vector Narrowing Fixed-Point Clip Instructions
1092let Constraints = "@earlyclobber $vd" in {
1093defm VNCLIPU_W : VNCLP_IV_V_X_I<"vnclipu", 0b101110, uimm5, "w">;
1094defm VNCLIP_W : VNCLP_IV_V_X_I<"vnclip", 0b101111, uimm5, "w">;
1095} // Constraints = "@earlyclobber $vd"
1096} // Predicates = [HasStdExtV]
1097
1098let Predicates = [HasStdExtV, HasStdExtF] in {
1099// Vector Single-Width Floating-Point Add/Subtract Instructions
1100defm VFADD_V : VALU_FV_V_F<"vfadd", 0b000000>;
1101defm VFSUB_V : VALU_FV_V_F<"vfsub", 0b000010>;
1102defm VFRSUB_V : VALU_FV_F<"vfrsub", 0b100111>;
1103
1104// Vector Widening Floating-Point Add/Subtract Instructions
1105let Constraints = "@earlyclobber $vd" in {
1106let RVVConstraint = WidenV in {
1107defm VFWADD_V : VWALU_FV_V_F<"vfwadd", 0b110000>;
1108defm VFWSUB_V : VWALU_FV_V_F<"vfwsub", 0b110010>;
1109} // RVVConstraint = WidenV
1110// Set earlyclobber for following instructions for second and mask operands.
1111// This has the downside that the earlyclobber constraint is too coarse and
1112// will impose unnecessary restrictions by not allowing the destination to
1113// overlap with the first (wide) operand.
1114let RVVConstraint = WidenW in {
1115defm VFWADD_W : VWALU_FV_V_F<"vfwadd", 0b110100, "w">;
1116defm VFWSUB_W : VWALU_FV_V_F<"vfwsub", 0b110110, "w">;
1117} // RVVConstraint = WidenW
1118} // Constraints = "@earlyclobber $vd"
1119
1120// Vector Single-Width Floating-Point Multiply/Divide Instructions
1121defm VFMUL_V : VMUL_FV_V_F<"vfmul", 0b100100>;
1122defm VFDIV_V : VDIV_FV_V_F<"vfdiv", 0b100000>;
1123defm VFRDIV_V : VRDIV_FV_F<"vfrdiv", 0b100001>;
1124
1125// Vector Widening Floating-Point Multiply
1126let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
1127defm VFWMUL_V : VWMUL_FV_V_F<"vfwmul", 0b111000>;
1128} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
1129
1130// Vector Single-Width Floating-Point Fused Multiply-Add Instructions
1131defm VFMACC_V : VMAC_FV_V_F<"vfmacc", 0b101100>;
1132defm VFNMACC_V : VMAC_FV_V_F<"vfnmacc", 0b101101>;
1133defm VFMSAC_V : VMAC_FV_V_F<"vfmsac", 0b101110>;
1134defm VFNMSAC_V : VMAC_FV_V_F<"vfnmsac", 0b101111>;
1135defm VFMADD_V : VMAC_FV_V_F<"vfmadd", 0b101000>;
1136defm VFNMADD_V : VMAC_FV_V_F<"vfnmadd", 0b101001>;
1137defm VFMSUB_V : VMAC_FV_V_F<"vfmsub", 0b101010>;
1138defm VFNMSUB_V : VMAC_FV_V_F<"vfnmsub", 0b101011>;
1139
1140// Vector Widening Floating-Point Fused Multiply-Add Instructions
1141let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
1142defm VFWMACC_V : VWMAC_FV_V_F<"vfwmacc", 0b111100>;
1143defm VFWNMACC_V : VWMAC_FV_V_F<"vfwnmacc", 0b111101>;
1144defm VFWMSAC_V : VWMAC_FV_V_F<"vfwmsac", 0b111110>;
1145defm VFWNMSAC_V : VWMAC_FV_V_F<"vfwnmsac", 0b111111>;
1146} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
1147
1148// Vector Floating-Point Square-Root Instruction
1149defm VFSQRT_V : VSQR_FV_VS2<"vfsqrt.v", 0b010011, 0b00000>;
1150defm VFRSQRT7_V : VRCP_FV_VS2<"vfrsqrt7.v", 0b010011, 0b00100>;
1151defm VFREC7_V : VRCP_FV_VS2<"vfrec7.v", 0b010011, 0b00101>;
1152
1153// Vector Floating-Point MIN/MAX Instructions
1154defm VFMIN_V : VCMP_FV_V_F<"vfmin", 0b000100>;
1155defm VFMAX_V : VCMP_FV_V_F<"vfmax", 0b000110>;
1156
1157// Vector Floating-Point Sign-Injection Instructions
1158defm VFSGNJ_V : VSGNJ_FV_V_F<"vfsgnj", 0b001000>;
1159defm VFSGNJN_V : VSGNJ_FV_V_F<"vfsgnjn", 0b001001>;
1160defm VFSGNJX_V : VSGNJ_FV_V_F<"vfsgnjx", 0b001010>;
1161
1162def : InstAlias<"vfneg.v $vd, $vs$vm",
1163                (VFSGNJN_VV VR:$vd, VR:$vs, VR:$vs, VMaskOp:$vm)>;
1164def : InstAlias<"vfabs.v $vd, $vs$vm",
1165                (VFSGNJX_VV VR:$vd, VR:$vs, VR:$vs, VMaskOp:$vm)>;
1166
1167// Vector Floating-Point Compare Instructions
1168let RVVConstraint = NoConstraint in {
1169defm VMFEQ_V : VCMP_FV_V_F<"vmfeq", 0b011000>;
1170defm VMFNE_V : VCMP_FV_V_F<"vmfne", 0b011100>;
1171defm VMFLT_V : VCMP_FV_V_F<"vmflt", 0b011011>;
1172defm VMFLE_V : VCMP_FV_V_F<"vmfle", 0b011001>;
1173defm VMFGT_V : VCMP_FV_F<"vmfgt", 0b011101>;
1174defm VMFGE_V : VCMP_FV_F<"vmfge", 0b011111>;
1175} // RVVConstraint = NoConstraint
1176
1177def : InstAlias<"vmfgt.vv $vd, $va, $vb$vm",
1178                (VMFLT_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
1179def : InstAlias<"vmfge.vv $vd, $va, $vb$vm",
1180                (VMFLE_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
1181
1182// Vector Floating-Point Classify Instruction
1183defm VFCLASS_V : VCLS_FV_VS2<"vfclass.v", 0b010011, 0b10000>;
1184
1185let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
1186
1187// Vector Floating-Point Merge Instruction
1188let vm = 0 in
1189def VFMERGE_VFM : RVInstVX<0b010111, OPFVF, (outs VR:$vd),
1190                           (ins VR:$vs2, FPR32:$rs1, VMV0:$v0),
1191                           "vfmerge.vfm", "$vd, $vs2, $rs1, v0">,
1192                  Sched<[WriteVFMergeV, ReadVFMergeV, ReadVFMergeF, ReadVMask]>;
1193
1194// Vector Floating-Point Move Instruction
1195let RVVConstraint = NoConstraint in
1196let vm = 1, vs2 = 0 in
1197def VFMV_V_F : RVInstVX<0b010111, OPFVF, (outs VR:$vd),
1198                       (ins FPR32:$rs1), "vfmv.v.f", "$vd, $rs1">,
1199               Sched<[WriteVFMovV, ReadVFMovF]>;
1200
1201} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1202
1203// Single-Width Floating-Point/Integer Type-Convert Instructions
1204defm VFCVT_XU_F_V : VCVTI_FV_VS2<"vfcvt.xu.f.v", 0b010010, 0b00000>;
1205defm VFCVT_X_F_V : VCVTI_FV_VS2<"vfcvt.x.f.v", 0b010010, 0b00001>;
1206defm VFCVT_RTZ_XU_F_V : VCVTI_FV_VS2<"vfcvt.rtz.xu.f.v", 0b010010, 0b00110>;
1207defm VFCVT_RTZ_X_F_V : VCVTI_FV_VS2<"vfcvt.rtz.x.f.v", 0b010010, 0b00111>;
1208defm VFCVT_F_XU_V : VCVTF_IV_VS2<"vfcvt.f.xu.v", 0b010010, 0b00010>;
1209defm VFCVT_F_X_V : VCVTF_IV_VS2<"vfcvt.f.x.v", 0b010010, 0b00011>;
1210
1211// Widening Floating-Point/Integer Type-Convert Instructions
1212let Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt in {
1213defm VFWCVT_XU_F_V : VWCVTI_FV_VS2<"vfwcvt.xu.f.v", 0b010010, 0b01000>;
1214defm VFWCVT_X_F_V : VWCVTI_FV_VS2<"vfwcvt.x.f.v", 0b010010, 0b01001>;
1215defm VFWCVT_RTZ_XU_F_V : VWCVTI_FV_VS2<"vfwcvt.rtz.xu.f.v", 0b010010, 0b01110>;
1216defm VFWCVT_RTZ_X_F_V : VWCVTI_FV_VS2<"vfwcvt.rtz.x.f.v", 0b010010, 0b01111>;
1217defm VFWCVT_F_XU_V : VWCVTF_IV_VS2<"vfwcvt.f.xu.v", 0b010010, 0b01010>;
1218defm VFWCVT_F_X_V : VWCVTF_IV_VS2<"vfwcvt.f.x.v", 0b010010, 0b01011>;
1219defm VFWCVT_F_F_V : VWCVTF_FV_VS2<"vfwcvt.f.f.v", 0b010010, 0b01100>;
1220} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt
1221
1222// Narrowing Floating-Point/Integer Type-Convert Instructions
1223let Constraints = "@earlyclobber $vd" in {
1224defm VFNCVT_XU_F_W : VNCVTI_FV_VS2<"vfncvt.xu.f.w", 0b010010, 0b10000>;
1225defm VFNCVT_X_F_W : VNCVTI_FV_VS2<"vfncvt.x.f.w", 0b010010, 0b10001>;
1226defm VFNCVT_RTZ_XU_F_W : VNCVTI_FV_VS2<"vfncvt.rtz.xu.f.w", 0b010010, 0b10110>;
1227defm VFNCVT_RTZ_X_F_W : VNCVTI_FV_VS2<"vfncvt.rtz.x.f.w", 0b010010, 0b10111>;
1228defm VFNCVT_F_XU_W : VNCVTF_IV_VS2<"vfncvt.f.xu.w", 0b010010, 0b10010>;
1229defm VFNCVT_F_X_W : VNCVTF_IV_VS2<"vfncvt.f.x.w", 0b010010, 0b10011>;
1230defm VFNCVT_F_F_W : VNCVTF_FV_VS2<"vfncvt.f.f.w", 0b010010, 0b10100>;
1231defm VFNCVT_ROD_F_F_W : VNCVTF_FV_VS2<"vfncvt.rod.f.f.w", 0b010010, 0b10101>;
1232} // Constraints = "@earlyclobber $vd"
1233} // Predicates = [HasStdExtV, HasStdExtF]
1234
1235let Predicates = [HasStdExtV] in {
1236
1237// Vector Single-Width Integer Reduction Instructions
1238let RVVConstraint = NoConstraint in {
1239defm VREDSUM : VRED_MV_V<"vredsum", 0b000000>;
1240defm VREDMAXU : VRED_MV_V<"vredmaxu", 0b000110>;
1241defm VREDMAX : VRED_MV_V<"vredmax", 0b000111>;
1242defm VREDMINU : VRED_MV_V<"vredminu", 0b000100>;
1243defm VREDMIN : VRED_MV_V<"vredmin", 0b000101>;
1244defm VREDAND : VRED_MV_V<"vredand", 0b000001>;
1245defm VREDOR : VRED_MV_V<"vredor", 0b000010>;
1246defm VREDXOR : VRED_MV_V<"vredxor", 0b000011>;
1247} // RVVConstraint = NoConstraint
1248
1249// Vector Widening Integer Reduction Instructions
1250let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
1251// Set earlyclobber for following instructions for second and mask operands.
1252// This has the downside that the earlyclobber constraint is too coarse and
1253// will impose unnecessary restrictions by not allowing the destination to
1254// overlap with the first (wide) operand.
1255defm VWREDSUMU : VWRED_IV_V<"vwredsumu", 0b110000>;
1256defm VWREDSUM : VWRED_IV_V<"vwredsum", 0b110001>;
1257} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
1258
1259} // Predicates = [HasStdExtV]
1260
1261let Predicates = [HasStdExtV, HasStdExtF] in {
1262// Vector Single-Width Floating-Point Reduction Instructions
1263let RVVConstraint = NoConstraint in {
1264defm VFREDOSUM : VREDO_FV_V<"vfredosum", 0b000011>;
1265defm VFREDUSUM : VRED_FV_V<"vfredusum", 0b000001>;
1266defm VFREDMAX : VRED_FV_V<"vfredmax", 0b000111>;
1267defm VFREDMIN : VRED_FV_V<"vfredmin", 0b000101>;
1268} // RVVConstraint = NoConstraint
1269
1270def : InstAlias<"vfredsum.vs $vd, $vs2, $vs1$vm",
1271                (VFREDUSUM_VS VR:$vd, VR:$vs2, VR:$vs1, VMaskOp:$vm), 0>;
1272
1273// Vector Widening Floating-Point Reduction Instructions
1274let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
1275// Set earlyclobber for following instructions for second and mask operands.
1276// This has the downside that the earlyclobber constraint is too coarse and
1277// will impose unnecessary restrictions by not allowing the destination to
1278// overlap with the first (wide) operand.
1279defm VFWREDOSUM : VWREDO_FV_V<"vfwredosum", 0b110011>;
1280defm VFWREDUSUM : VWRED_FV_V<"vfwredusum", 0b110001>;
1281} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
1282
1283def : InstAlias<"vfwredsum.vs $vd, $vs2, $vs1$vm",
1284                (VFWREDUSUM_VS VR:$vd, VR:$vs2, VR:$vs1, VMaskOp:$vm), 0>;
1285} // Predicates = [HasStdExtV, HasStdExtF]
1286
1287let Predicates = [HasStdExtV] in {
1288// Vector Mask-Register Logical Instructions
1289let RVVConstraint = NoConstraint in {
1290defm VMAND_M : VMALU_MV_Mask<"vmand", 0b011001, "m">;
1291defm VMNAND_M : VMALU_MV_Mask<"vmnand", 0b011101, "m">;
1292defm VMANDN_M : VMALU_MV_Mask<"vmandn", 0b011000, "m">;
1293defm VMXOR_M : VMALU_MV_Mask<"vmxor", 0b011011, "m">;
1294defm VMOR_M : VMALU_MV_Mask<"vmor", 0b011010, "m">;
1295defm VMNOR_M : VMALU_MV_Mask<"vmnor", 0b011110, "m">;
1296defm VMORN_M : VMALU_MV_Mask<"vmorn", 0b011100, "m">;
1297defm VMXNOR_M : VMALU_MV_Mask<"vmxnor", 0b011111, "m">;
1298}
1299
1300def : InstAlias<"vmmv.m $vd, $vs",
1301                (VMAND_MM VR:$vd, VR:$vs, VR:$vs)>;
1302def : InstAlias<"vmclr.m $vd",
1303                (VMXOR_MM VR:$vd, VR:$vd, VR:$vd)>;
1304def : InstAlias<"vmset.m $vd",
1305                (VMXNOR_MM VR:$vd, VR:$vd, VR:$vd)>;
1306def : InstAlias<"vmnot.m $vd, $vs",
1307                (VMNAND_MM VR:$vd, VR:$vs, VR:$vs)>;
1308
1309def : InstAlias<"vmandnot.mm $vd, $vs2, $vs1",
1310                (VMANDN_MM VR:$vd, VR:$vs2, VR:$vs1), 0>;
1311def : InstAlias<"vmornot.mm $vd, $vs2, $vs1",
1312                (VMORN_MM VR:$vd, VR:$vs2, VR:$vs1), 0>;
1313
1314let hasSideEffects = 0, mayLoad = 0, mayStore = 0,
1315    RVVConstraint = NoConstraint  in {
1316
1317// Vector mask population count vcpop
1318def VCPOP_M : RVInstV<0b010000, 0b10000, OPMVV, (outs GPR:$vd),
1319                      (ins VR:$vs2, VMaskOp:$vm),
1320                      "vcpop.m", "$vd, $vs2$vm">,
1321              Sched<[WriteVMPopV, ReadVMPopV, ReadVMask]>;
1322
1323// vfirst find-first-set mask bit
1324def VFIRST_M : RVInstV<0b010000, 0b10001, OPMVV, (outs GPR:$vd),
1325                       (ins VR:$vs2, VMaskOp:$vm),
1326                       "vfirst.m", "$vd, $vs2$vm">,
1327              Sched<[WriteVMFFSV, ReadVMFFSV, ReadVMask]>;
1328
1329} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1330
1331def : InstAlias<"vpopc.m $vd, $vs2$vm",
1332                (VCPOP_M GPR:$vd, VR:$vs2, VMaskOp:$vm), 0>;
1333
1334let Constraints = "@earlyclobber $vd", RVVConstraint = Iota in {
1335
1336// vmsbf.m set-before-first mask bit
1337defm VMSBF_M : VMSFS_MV_V<"vmsbf.m", 0b010100, 0b00001>;
1338// vmsif.m set-including-first mask bit
1339defm VMSIF_M : VMSFS_MV_V<"vmsif.m", 0b010100, 0b00011>;
1340// vmsof.m set-only-first mask bit
1341defm VMSOF_M : VMSFS_MV_V<"vmsof.m", 0b010100, 0b00010>;
1342// Vector Iota Instruction
1343defm VIOTA_M : VMIOT_MV_V<"viota.m", 0b010100, 0b10000>;
1344
1345} // Constraints = "@earlyclobber $vd", RVVConstraint = Iota
1346
1347// Vector Element Index Instruction
1348let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
1349
1350let vs2 = 0 in
1351def VID_V : RVInstV<0b010100, 0b10001, OPMVV, (outs VR:$vd),
1352                    (ins VMaskOp:$vm), "vid.v", "$vd$vm">,
1353            Sched<[WriteVMIdxV, ReadVMask]>;
1354
1355// Integer Scalar Move Instructions
1356let vm = 1, RVVConstraint = NoConstraint in {
1357def VMV_X_S : RVInstV<0b010000, 0b00000, OPMVV, (outs GPR:$vd),
1358                      (ins VR:$vs2), "vmv.x.s", "$vd, $vs2">,
1359              Sched<[WriteVIMovVX, ReadVIMovVX]>;
1360let Constraints = "$vd = $vd_wb" in
1361def VMV_S_X : RVInstV2<0b010000, 0b00000, OPMVX, (outs VR:$vd_wb),
1362                      (ins VR:$vd, GPR:$rs1), "vmv.s.x", "$vd, $rs1">,
1363              Sched<[WriteVIMovXV, ReadVIMovXV, ReadVIMovXX]>;
1364}
1365
1366} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1367
1368} // Predicates = [HasStdExtV]
1369
1370let Predicates = [HasStdExtV, HasStdExtF] in {
1371
1372let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1,
1373    RVVConstraint = NoConstraint  in {
1374// Floating-Point Scalar Move Instructions
1375def VFMV_F_S : RVInstV<0b010000, 0b00000, OPFVV, (outs FPR32:$vd),
1376                      (ins VR:$vs2), "vfmv.f.s", "$vd, $vs2">,
1377               Sched<[WriteVFMovVF, ReadVFMovVF]>;
1378let Constraints = "$vd = $vd_wb" in
1379def VFMV_S_F : RVInstV2<0b010000, 0b00000, OPFVF, (outs VR:$vd_wb),
1380                       (ins VR:$vd, FPR32:$rs1), "vfmv.s.f", "$vd, $rs1">,
1381               Sched<[WriteVFMovFV, ReadVFMovFV, ReadVFMovFX]>;
1382
1383} // hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1
1384
1385} // Predicates = [HasStdExtV, HasStdExtF]
1386
1387let Predicates = [HasStdExtV] in {
1388// Vector Slide Instructions
1389let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
1390defm VSLIDEUP_V : VSLD_IV_X_I<"vslideup", 0b001110, uimm5>;
1391defm VSLIDE1UP_V : VSLD1_MV_X<"vslide1up", 0b001110>;
1392} // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
1393defm VSLIDEDOWN_V : VSLD_IV_X_I<"vslidedown", 0b001111, uimm5>;
1394defm VSLIDE1DOWN_V : VSLD1_MV_X<"vslide1down", 0b001111>;
1395} // Predicates = [HasStdExtV]
1396
1397let Predicates = [HasStdExtV, HasStdExtF] in {
1398let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
1399defm VFSLIDE1UP_V : VSLD1_FV_F<"vfslide1up", 0b001110>;
1400} // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
1401defm VFSLIDE1DOWN_V : VSLD1_FV_F<"vfslide1down", 0b001111>;
1402} // Predicates = [HasStdExtV, HasStdExtF]
1403
1404let Predicates = [HasStdExtV] in {
1405// Vector Register Gather Instruction
1406let Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather in {
1407defm VRGATHER_V : VGTR_IV_V_X_I<"vrgather", 0b001100, uimm5>;
1408def VRGATHEREI16_VV : VALUVV<0b001110, OPIVV, "vrgatherei16.vv">,
1409                      Sched<[WriteVGatherV, ReadVGatherV, ReadVGatherV]>;
1410} // Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather
1411
1412// Vector Compress Instruction
1413let Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress in {
1414defm VCOMPRESS_V : VCPR_MV_Mask<"vcompress", 0b010111>;
1415} // Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress
1416
1417let hasSideEffects = 0, mayLoad = 0, mayStore = 0,
1418    RVVConstraint = NoConstraint in {
1419foreach n = [1, 2, 4, 8] in {
1420  def VMV#n#R_V  : RVInstV<0b100111, !add(n, -1), OPIVI, (outs VR:$vd),
1421                           (ins VR:$vs2), "vmv" # n # "r.v", "$vd, $vs2">,
1422                   VMVRSched<n> {
1423  let Uses = [];
1424  let vm = 1;
1425}
1426}
1427} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1428} // Predicates = [HasStdExtV]
1429
1430let Predicates = [HasStdExtZvlsseg] in {
1431  foreach nf=2-8 in {
1432    foreach eew = [8, 16, 32, 64] in {
1433      defvar w = !cast<RISCVWidth>("LSWidth"#eew);
1434
1435      def VLSEG#nf#E#eew#_V :
1436        VUnitStrideSegmentLoad<!add(nf, -1), w, "vlseg"#nf#"e"#eew#".v">;
1437      def VLSEG#nf#E#eew#FF_V :
1438        VUnitStrideSegmentLoadFF<!add(nf, -1), w, "vlseg"#nf#"e"#eew#"ff.v">;
1439      def VSSEG#nf#E#eew#_V :
1440        VUnitStrideSegmentStore<!add(nf, -1), w, "vsseg"#nf#"e"#eew#".v">;
1441
1442      // Vector Strided Instructions
1443      def VLSSEG#nf#E#eew#_V :
1444        VStridedSegmentLoad<!add(nf, -1), w, "vlsseg"#nf#"e"#eew#".v">;
1445      def VSSSEG#nf#E#eew#_V :
1446        VStridedSegmentStore<!add(nf, -1), w, "vssseg"#nf#"e"#eew#".v">;
1447
1448      // Vector Indexed Instructions
1449      def VLUXSEG#nf#EI#eew#_V :
1450        VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord, w,
1451                            "vluxseg"#nf#"ei"#eew#".v">;
1452      def VLOXSEG#nf#EI#eew#_V :
1453        VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder, w,
1454                            "vloxseg"#nf#"ei"#eew#".v">;
1455      def VSUXSEG#nf#EI#eew#_V :
1456        VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord, w,
1457                             "vsuxseg"#nf#"ei"#eew#".v">;
1458      def VSOXSEG#nf#EI#eew#_V :
1459        VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder, w,
1460                             "vsoxseg"#nf#"ei"#eew#".v">;
1461    }
1462  }
1463} // Predicates = [HasStdExtZvlsseg]
1464
1465let Predicates = [HasStdExtZvamo, HasStdExtA] in {
1466  foreach eew = [8, 16, 32] in {
1467    defvar w = !cast<RISCVWidth>("LSWidth"#eew);
1468    defm VAMOSWAPEI#eew : VAMO<AMOOPVamoSwap, w, "vamoswapei"#eew#".v">;
1469    defm VAMOADDEI#eew : VAMO<AMOOPVamoAdd, w, "vamoaddei"#eew#".v">;
1470    defm VAMOXOREI#eew : VAMO<AMOOPVamoXor, w, "vamoxorei"#eew#".v">;
1471    defm VAMOANDEI#eew : VAMO<AMOOPVamoAnd, w, "vamoandei"#eew#".v">;
1472    defm VAMOOREI#eew : VAMO<AMOOPVamoOr, w, "vamoorei"#eew#".v">;
1473    defm VAMOMINEI#eew : VAMO<AMOOPVamoMin, w, "vamominei"#eew#".v">;
1474    defm VAMOMAXEI#eew : VAMO<AMOOPVamoMax, w, "vamomaxei"#eew#".v">;
1475    defm VAMOMINUEI#eew : VAMO<AMOOPVamoMinu, w, "vamominuei"#eew#".v">;
1476    defm VAMOMAXUEI#eew : VAMO<AMOOPVamoMaxu, w, "vamomaxuei"#eew#".v">;
1477  }
1478} // Predicates = [HasStdExtZvamo, HasStdExtA]
1479
1480let Predicates = [HasStdExtZvamo, HasStdExtA, IsRV64] in {
1481  defm VAMOSWAPEI64 : VAMO<AMOOPVamoSwap, LSWidth64, "vamoswapei64.v">;
1482  defm VAMOADDEI64 : VAMO<AMOOPVamoAdd, LSWidth64, "vamoaddei64.v">;
1483  defm VAMOXOREI64 : VAMO<AMOOPVamoXor, LSWidth64, "vamoxorei64.v">;
1484  defm VAMOANDEI64 : VAMO<AMOOPVamoAnd, LSWidth64, "vamoandei64.v">;
1485  defm VAMOOREI64 : VAMO<AMOOPVamoOr, LSWidth64, "vamoorei64.v">;
1486  defm VAMOMINEI64 : VAMO<AMOOPVamoMin, LSWidth64, "vamominei64.v">;
1487  defm VAMOMAXEI64 : VAMO<AMOOPVamoMax, LSWidth64, "vamomaxei64.v">;
1488  defm VAMOMINUEI64 : VAMO<AMOOPVamoMinu, LSWidth64, "vamominuei64.v">;
1489  defm VAMOMAXUEI64 : VAMO<AMOOPVamoMaxu, LSWidth64, "vamomaxuei64.v">;
1490} // Predicates = [HasStdExtZvamo, HasStdExtA, IsRV64]
1491
1492include "RISCVInstrInfoVPseudos.td"
1493