1//===-- RISCVInstrInfoV.td - RISC-V 'V' instructions -------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// This file describes the RISC-V instructions from the standard 'V' Vector
10/// extension, version 0.10.
11/// This version is still experimental as the 'V' extension hasn't been
12/// ratified yet.
13///
14//===----------------------------------------------------------------------===//
15
16include "RISCVInstrFormatsV.td"
17
18//===----------------------------------------------------------------------===//
19// Operand and SDNode transformation definitions.
20//===----------------------------------------------------------------------===//
21
22def VTypeIAsmOperand : AsmOperandClass {
23  let Name = "VTypeI";
24  let ParserMethod = "parseVTypeI";
25  let DiagnosticType = "InvalidVTypeI";
26}
27
28def VTypeIOp : Operand<XLenVT> {
29  let ParserMatchClass = VTypeIAsmOperand;
30  let PrintMethod = "printVTypeI";
31  let DecoderMethod = "decodeUImmOperand<11>";
32}
33
34def VMaskAsmOperand : AsmOperandClass {
35  let Name = "RVVMaskRegOpOperand";
36  let RenderMethod = "addRegOperands";
37  let PredicateMethod = "isV0Reg";
38  let ParserMethod = "parseMaskReg";
39  let IsOptional = 1;
40  let DefaultMethod = "defaultMaskRegOp";
41  let DiagnosticType = "InvalidVMaskRegister";
42}
43
44def VMaskOp : RegisterOperand<VMV0> {
45  let ParserMatchClass = VMaskAsmOperand;
46  let PrintMethod = "printVMaskReg";
47  let EncoderMethod = "getVMaskReg";
48  let DecoderMethod = "decodeVMaskReg";
49}
50
51def simm5 : Operand<XLenVT>, ImmLeaf<XLenVT, [{return isInt<5>(Imm);}]> {
52  let ParserMatchClass = SImmAsmOperand<5>;
53  let EncoderMethod = "getImmOpValue";
54  let DecoderMethod = "decodeSImmOperand<5>";
55  let MCOperandPredicate = [{
56    int64_t Imm;
57    if (MCOp.evaluateAsConstantImm(Imm))
58      return isInt<5>(Imm);
59    return MCOp.isBareSymbolRef();
60  }];
61}
62
63def SImm5Plus1AsmOperand : AsmOperandClass {
64  let Name = "SImm5Plus1";
65  let RenderMethod = "addImmOperands";
66  let DiagnosticType = "InvalidSImm5Plus1";
67}
68
69def simm5_plus1 : Operand<XLenVT>, ImmLeaf<XLenVT,
70  [{return (isInt<5>(Imm) && Imm != -16) || Imm == 16;}]> {
71  let ParserMatchClass = SImm5Plus1AsmOperand;
72  let MCOperandPredicate = [{
73    int64_t Imm;
74    if (MCOp.evaluateAsConstantImm(Imm))
75      return (isInt<5>(Imm) && Imm != -16) || Imm == 16;
76    return MCOp.isBareSymbolRef();
77  }];
78}
79
80//===----------------------------------------------------------------------===//
81// Instruction class templates
82//===----------------------------------------------------------------------===//
83
84let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in {
85// load vd, (rs1)
86class VUnitStrideLoadMask<string opcodestr>
87    : RVInstVLU<0b000, LSWidth8.Value{3}, LUMOPUnitStrideMask, LSWidth8.Value{2-0},
88                (outs VR:$vd),
89                (ins GPR:$rs1), opcodestr, "$vd, (${rs1})"> {
90  let vm = 1;
91  let RVVConstraint = NoConstraint;
92}
93
94// load vd, (rs1), vm
95class VUnitStrideLoad<RISCVLSUMOP lumop, RISCVWidth width,
96                      string opcodestr>
97    : RVInstVLU<0b000, width.Value{3}, lumop, width.Value{2-0},
98                (outs VR:$vd),
99                (ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
100
101// load vd, (rs1), rs2, vm
102class VStridedLoad<RISCVWidth width, string opcodestr>
103    : RVInstVLS<0b000, width.Value{3}, width.Value{2-0},
104                (outs VR:$vd),
105                (ins GPR:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr,
106                "$vd, (${rs1}), $rs2$vm">;
107
108// load vd, (rs1), vs2, vm
109class VIndexedLoad<RISCVMOP mop, RISCVWidth width, string opcodestr>
110    : RVInstVLX<0b000, width.Value{3}, mop, width.Value{2-0},
111                (outs VR:$vd),
112                (ins GPR:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr,
113                "$vd, (${rs1}), $vs2$vm">;
114
115// vl<nf>r.v vd, (rs1)
116class VWholeLoad<bits<3> nf, RISCVWidth width, string opcodestr, RegisterClass VRC>
117    : RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideWholeReg,
118                width.Value{2-0}, (outs VRC:$vd), (ins GPR:$rs1),
119                opcodestr, "$vd, (${rs1})"> {
120  let vm = 1;
121  let Uses = [];
122  let RVVConstraint = NoConstraint;
123}
124
125// segment load vd, (rs1), vm
126class VUnitStrideSegmentLoad<bits<3> nf, RISCVLSUMOP lumop,
127                             RISCVWidth width, string opcodestr>
128    : RVInstVLU<nf, width.Value{3}, lumop, width.Value{2-0},
129                (outs VR:$vd),
130                (ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
131
132// segment load vd, (rs1), rs2, vm
133class VStridedSegmentLoad<bits<3> nf, RISCVWidth width, string opcodestr>
134    : RVInstVLS<nf, width.Value{3}, width.Value{2-0},
135                (outs VR:$vd),
136                (ins GPR:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr,
137                "$vd, (${rs1}), $rs2$vm">;
138
139// segment load vd, (rs1), vs2, vm
140class VIndexedSegmentLoad<bits<3> nf, RISCVMOP mop, RISCVWidth width,
141                          string opcodestr>
142    : RVInstVLX<nf, width.Value{3}, mop, width.Value{2-0},
143                (outs VR:$vd),
144                (ins GPR:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr,
145                "$vd, (${rs1}), $vs2$vm">;
146} // hasSideEffects = 0, mayLoad = 1, mayStore = 0
147
148let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in {
149// store vd, vs3, (rs1)
150class VUnitStrideStoreMask<string opcodestr>
151    : RVInstVSU<0b000, LSWidth8.Value{3}, SUMOPUnitStrideMask, LSWidth8.Value{2-0},
152                (outs), (ins VR:$vs3, GPR:$rs1), opcodestr,
153                "$vs3, (${rs1})"> {
154  let vm = 1;
155}
156
157// store vd, vs3, (rs1), vm
158class VUnitStrideStore<RISCVLSUMOP sumop, RISCVWidth width,
159                         string opcodestr>
160    : RVInstVSU<0b000, width.Value{3}, sumop, width.Value{2-0},
161                (outs), (ins VR:$vs3, GPR:$rs1, VMaskOp:$vm), opcodestr,
162                "$vs3, (${rs1})$vm">;
163
164// store vd, vs3, (rs1), rs2, vm
165class VStridedStore<RISCVWidth width, string opcodestr>
166    : RVInstVSS<0b000, width.Value{3}, width.Value{2-0}, (outs),
167                (ins VR:$vs3, GPR:$rs1, GPR:$rs2, VMaskOp:$vm),
168                opcodestr, "$vs3, (${rs1}), $rs2$vm">;
169
170// store vd, vs3, (rs1), vs2, vm
171class VIndexedStore<RISCVMOP mop, RISCVWidth width, string opcodestr>
172    : RVInstVSX<0b000, width.Value{3}, mop, width.Value{2-0}, (outs),
173                (ins VR:$vs3, GPR:$rs1, VR:$vs2, VMaskOp:$vm),
174                opcodestr, "$vs3, (${rs1}), $vs2$vm">;
175
176// vs<nf>r.v vd, (rs1)
177class VWholeStore<bits<3> nf, string opcodestr, RegisterClass VRC>
178    : RVInstVSU<nf, 0, SUMOPUnitStrideWholeReg,
179                0b000, (outs), (ins VRC:$vs3, GPR:$rs1),
180                opcodestr, "$vs3, (${rs1})"> {
181  let vm = 1;
182  let Uses = [];
183}
184
185// segment store vd, vs3, (rs1), vm
186class VUnitStrideSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr>
187    : RVInstVSU<nf, width.Value{3}, SUMOPUnitStride, width.Value{2-0},
188                (outs), (ins VR:$vs3, GPR:$rs1, VMaskOp:$vm), opcodestr,
189                "$vs3, (${rs1})$vm">;
190
191// segment store vd, vs3, (rs1), rs2, vm
192class VStridedSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr>
193    : RVInstVSS<nf, width.Value{3}, width.Value{2-0}, (outs),
194                (ins VR:$vs3, GPR:$rs1, GPR:$rs2, VMaskOp:$vm),
195                opcodestr, "$vs3, (${rs1}), $rs2$vm">;
196
197// segment store vd, vs3, (rs1), vs2, vm
198class VIndexedSegmentStore<bits<3> nf, RISCVMOP mop, RISCVWidth width,
199                           string opcodestr>
200    : RVInstVSX<nf, width.Value{3}, mop, width.Value{2-0}, (outs),
201                (ins VR:$vs3, GPR:$rs1, VR:$vs2, VMaskOp:$vm),
202                opcodestr, "$vs3, (${rs1}), $vs2$vm">;
203} // hasSideEffects = 0, mayLoad = 0, mayStore = 1
204
205let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
206// op vd, vs2, vs1, vm
207class VALUVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
208    : RVInstVV<funct6, opv, (outs VR:$vd),
209                (ins VR:$vs2, VR:$vs1, VMaskOp:$vm),
210                opcodestr, "$vd, $vs2, $vs1$vm">;
211
212// op vd, vs2, vs1, v0 (without mask, use v0 as carry input)
213class VALUmVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
214    : RVInstVV<funct6, opv, (outs VR:$vd),
215                (ins VR:$vs2, VR:$vs1, VMV0:$v0),
216                opcodestr, "$vd, $vs2, $vs1, v0"> {
217  let vm = 0;
218}
219
220// op vd, vs1, vs2, vm (reverse the order of vs1 and vs2)
221class VALUrVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
222    : RVInstVV<funct6, opv, (outs VR:$vd),
223                (ins VR:$vs1, VR:$vs2, VMaskOp:$vm),
224                opcodestr, "$vd, $vs1, $vs2$vm">;
225
226// op vd, vs2, vs1
227class VALUVVNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr>
228    : RVInstVV<funct6, opv, (outs VR:$vd),
229               (ins VR:$vs2, VR:$vs1),
230               opcodestr, "$vd, $vs2, $vs1"> {
231  let vm = 1;
232}
233
234// op vd, vs2, rs1, vm
235class VALUVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
236    : RVInstVX<funct6, opv, (outs VR:$vd),
237                (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
238                opcodestr, "$vd, $vs2, $rs1$vm">;
239
240// op vd, vs2, rs1, v0 (without mask, use v0 as carry input)
241class VALUmVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
242    : RVInstVX<funct6, opv, (outs VR:$vd),
243                (ins VR:$vs2, GPR:$rs1, VMV0:$v0),
244                opcodestr, "$vd, $vs2, $rs1, v0"> {
245  let vm = 0;
246}
247
248// op vd, rs1, vs2, vm (reverse the order of rs1 and vs2)
249class VALUrVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
250    : RVInstVX<funct6, opv, (outs VR:$vd),
251                (ins GPR:$rs1, VR:$vs2, VMaskOp:$vm),
252                opcodestr, "$vd, $rs1, $vs2$vm">;
253
254// op vd, vs1, vs2
255class VALUVXNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr>
256    : RVInstVX<funct6, opv, (outs VR:$vd),
257               (ins VR:$vs2, GPR:$rs1),
258               opcodestr, "$vd, $vs2, $rs1"> {
259  let vm = 1;
260}
261
262// op vd, vs2, imm, vm
263class VALUVI<bits<6> funct6, string opcodestr, Operand optype = simm5>
264    : RVInstIVI<funct6, (outs VR:$vd),
265                (ins VR:$vs2, optype:$imm, VMaskOp:$vm),
266                opcodestr, "$vd, $vs2, $imm$vm">;
267
268// op vd, vs2, imm, v0 (without mask, use v0 as carry input)
269class VALUmVI<bits<6> funct6, string opcodestr, Operand optype = simm5>
270    : RVInstIVI<funct6, (outs VR:$vd),
271                (ins VR:$vs2, optype:$imm, VMV0:$v0),
272                opcodestr, "$vd, $vs2, $imm, v0"> {
273  let vm = 0;
274}
275
276// op vd, vs2, imm, vm
277class VALUVINoVm<bits<6> funct6, string opcodestr, Operand optype = simm5>
278    : RVInstIVI<funct6, (outs VR:$vd),
279                (ins VR:$vs2, optype:$imm),
280                opcodestr, "$vd, $vs2, $imm"> {
281  let vm = 1;
282}
283
284// op vd, vs2, rs1, vm (Float)
285class VALUVF<bits<6> funct6, RISCVVFormat opv, string opcodestr>
286    : RVInstVX<funct6, opv, (outs VR:$vd),
287                (ins VR:$vs2, FPR32:$rs1, VMaskOp:$vm),
288                opcodestr, "$vd, $vs2, $rs1$vm">;
289
290// op vd, rs1, vs2, vm (Float) (with mask, reverse the order of rs1 and vs2)
291class VALUrVF<bits<6> funct6, RISCVVFormat opv, string opcodestr>
292    : RVInstVX<funct6, opv, (outs VR:$vd),
293                (ins FPR32:$rs1, VR:$vs2, VMaskOp:$vm),
294                opcodestr, "$vd, $rs1, $vs2$vm">;
295
296// op vd, vs2, vm (use vs1 as instruction encoding)
297class VALUVs2<bits<6> funct6, bits<5> vs1, RISCVVFormat opv, string opcodestr>
298    : RVInstV<funct6, vs1, opv, (outs VR:$vd),
299               (ins VR:$vs2, VMaskOp:$vm),
300               opcodestr, "$vd, $vs2$vm">;
301} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
302
303let hasSideEffects = 0, mayLoad = 1, mayStore = 1 in {
304// vamo vd, (rs1), vs2, vd, vm
305class VAMOWd<RISCVAMOOP amoop, RISCVWidth width, string opcodestr>
306    : RVInstVAMO<amoop, width.Value{2-0}, (outs VR:$vd_wd),
307            (ins GPR:$rs1, VR:$vs2, VR:$vd, VMaskOp:$vm),
308            opcodestr, "$vd_wd, (${rs1}), $vs2, $vd$vm"> {
309    let Constraints = "$vd_wd = $vd";
310    let wd = 1;
311    bits<5> vd;
312    let Inst{11-7} = vd;
313}
314
315// vamo x0, (rs1), vs2, vs3, vm
316class VAMONoWd<RISCVAMOOP amoop, RISCVWidth width, string opcodestr>
317    : RVInstVAMO<amoop, width.Value{2-0}, (outs),
318            (ins GPR:$rs1, VR:$vs2, VR:$vs3, VMaskOp:$vm),
319            opcodestr, "x0, (${rs1}), $vs2, $vs3$vm"> {
320    bits<5> vs3;
321    let Inst{11-7} = vs3;
322}
323
324} // hasSideEffects = 0, mayLoad = 1, mayStore = 1
325
326//===----------------------------------------------------------------------===//
327// Combination of instruction classes.
328// Use these multiclasses to define instructions more easily.
329//===----------------------------------------------------------------------===//
330multiclass VALU_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
331  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">;
332  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">;
333  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>;
334}
335
336multiclass VALU_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
337  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">;
338  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">;
339}
340
341multiclass VALUr_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
342  def V : VALUrVV<funct6, OPIVV, opcodestr # "." # vw # "v">;
343  def X : VALUrVX<funct6, OPIVX, opcodestr # "." # vw # "x">;
344}
345
346multiclass VALU_IV_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
347  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">;
348  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>;
349}
350
351multiclass VALU_IV_V<string opcodestr, bits<6> funct6> {
352  def _VS  : VALUVV<funct6, OPIVV, opcodestr # ".vs">;
353}
354
355multiclass VALUr_IV_X<string opcodestr, bits<6> funct6, string vw = "v"> {
356  def X : VALUrVX<funct6, OPIVX, opcodestr # "." # vw # "x">;
357}
358
359multiclass VALU_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
360  def V : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">;
361  def X  : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">;
362}
363
364multiclass VALU_MV_V<string opcodestr, bits<6> funct6> {
365  def _VS : VALUVV<funct6, OPMVV, opcodestr # ".vs">;
366}
367
368multiclass VALU_MV_Mask<string opcodestr, bits<6> funct6, string vm = "v"> {
369  def M : VALUVVNoVm<funct6, OPMVV, opcodestr # "." # vm # "m">;
370}
371
372multiclass VALU_MV_X<string opcodestr, bits<6> funct6, string vw = "v"> {
373  def X  : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">;
374}
375
376multiclass VALUr_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
377  def V : VALUrVV<funct6, OPMVV, opcodestr # "." # vw # "v">;
378  def X : VALUrVX<funct6, OPMVX, opcodestr # "." # vw # "x">;
379}
380
381multiclass VALUr_MV_X<string opcodestr, bits<6> funct6, string vw = "v"> {
382  def X : VALUrVX<funct6, OPMVX, opcodestr # "." # vw # "x">;
383}
384
385multiclass VALU_MV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
386  def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>;
387}
388
389multiclass VALUm_IV_V_X_I<string opcodestr, bits<6> funct6> {
390  def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">;
391  def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">;
392  def IM : VALUmVI<funct6, opcodestr # ".vim">;
393}
394
395multiclass VALUm_IV_V_X<string opcodestr, bits<6> funct6> {
396  def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">;
397  def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">;
398}
399
400multiclass VALUNoVm_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5> {
401  def V : VALUVVNoVm<funct6, OPIVV, opcodestr # ".vv">;
402  def X : VALUVXNoVm<funct6, OPIVX, opcodestr # ".vx">;
403  def I : VALUVINoVm<funct6, opcodestr # ".vi", optype>;
404}
405
406multiclass VALUNoVm_IV_V_X<string opcodestr, bits<6> funct6> {
407  def V : VALUVVNoVm<funct6, OPIVV, opcodestr # ".vv">;
408  def X : VALUVXNoVm<funct6, OPIVX, opcodestr # ".vx">;
409}
410
411multiclass VALU_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
412  def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">;
413  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">;
414}
415
416multiclass VALU_FV_F<string opcodestr, bits<6> funct6, string vw = "v"> {
417  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">;
418}
419
420multiclass VALUr_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
421  def V : VALUrVV<funct6, OPFVV, opcodestr # "." # vw # "v">;
422  def F : VALUrVF<funct6, OPFVF, opcodestr # "." # vw # "f">;
423}
424
425multiclass VALU_FV_V<string opcodestr, bits<6> funct6> {
426  def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">;
427}
428
429multiclass VALU_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
430  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>;
431}
432
433multiclass VAMO<RISCVAMOOP amoop, RISCVWidth width, string opcodestr> {
434  def _WD : VAMOWd<amoop, width, opcodestr>;
435  def _UNWD : VAMONoWd<amoop, width, opcodestr>;
436}
437
438multiclass VWholeLoad<bits<3> nf, string opcodestr, RegisterClass VRC> {
439  def E8_V : VWholeLoad<nf, LSWidth8, opcodestr # "e8.v", VRC>;
440  def E16_V : VWholeLoad<nf, LSWidth16, opcodestr # "e16.v", VRC>;
441  def E32_V : VWholeLoad<nf, LSWidth32, opcodestr # "e32.v", VRC>;
442  def E64_V : VWholeLoad<nf, LSWidth64, opcodestr # "e64.v", VRC>;
443}
444
445//===----------------------------------------------------------------------===//
446// Instructions
447//===----------------------------------------------------------------------===//
448
449let Predicates = [HasStdExtV] in {
450let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in {
451def VSETVLI : RVInstSetVLi<(outs GPR:$rd), (ins GPR:$rs1, VTypeIOp:$vtypei),
452                           "vsetvli", "$rd, $rs1, $vtypei">;
453
454def VSETIVLI : RVInstSetiVLi<(outs GPR:$rd), (ins uimm5:$uimm, VTypeIOp:$vtypei),
455                             "vsetivli", "$rd, $uimm, $vtypei">;
456
457def VSETVL : RVInstSetVL<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2),
458                         "vsetvl", "$rd, $rs1, $rs2">;
459} // hasSideEffects = 1, mayLoad = 0, mayStore = 0
460
461// Vector Unit-Stride Instructions
462def VLE8_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth8, "vle8.v">;
463def VLE16_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth16, "vle16.v">;
464def VLE32_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth32, "vle32.v">;
465def VLE64_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth64, "vle64.v">;
466
467def VLE8FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth8, "vle8ff.v">;
468def VLE16FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth16, "vle16ff.v">;
469def VLE32FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth32, "vle32ff.v">;
470def VLE64FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth64, "vle64ff.v">;
471
472def VLE1_V : VUnitStrideLoadMask<"vle1.v">;
473def VSE1_V : VUnitStrideStoreMask<"vse1.v">;
474
475def VSE8_V : VUnitStrideStore<SUMOPUnitStride, LSWidth8, "vse8.v">;
476def VSE16_V : VUnitStrideStore<SUMOPUnitStride, LSWidth16, "vse16.v">;
477def VSE32_V : VUnitStrideStore<SUMOPUnitStride, LSWidth32, "vse32.v">;
478def VSE64_V : VUnitStrideStore<SUMOPUnitStride, LSWidth64, "vse64.v">;
479
480// Vector Strided Instructions
481def VLSE8_V : VStridedLoad<LSWidth8, "vlse8.v">;
482def VLSE16_V : VStridedLoad<LSWidth16, "vlse16.v">;
483def VLSE32_V : VStridedLoad<LSWidth32, "vlse32.v">;
484def VLSE64_V : VStridedLoad<LSWidth64, "vlse64.v">;
485
486def VSSE8_V : VStridedStore<LSWidth8, "vsse8.v">;
487def VSSE16_V : VStridedStore<LSWidth16, "vsse16.v">;
488def VSSE32_V : VStridedStore<LSWidth32, "vsse32.v">;
489def VSSE64_V : VStridedStore<LSWidth64, "vsse64.v">;
490
491// Vector Indexed Instructions
492def VLUXEI8_V : VIndexedLoad<MOPLDIndexedUnord, LSWidth8, "vluxei8.v">;
493def VLUXEI16_V : VIndexedLoad<MOPLDIndexedUnord, LSWidth16, "vluxei16.v">;
494def VLUXEI32_V : VIndexedLoad<MOPLDIndexedUnord, LSWidth32, "vluxei32.v">;
495def VLUXEI64_V : VIndexedLoad<MOPLDIndexedUnord, LSWidth64, "vluxei64.v">;
496
497def VLOXEI8_V : VIndexedLoad<MOPLDIndexedOrder, LSWidth8, "vloxei8.v">;
498def VLOXEI16_V : VIndexedLoad<MOPLDIndexedOrder, LSWidth16, "vloxei16.v">;
499def VLOXEI32_V : VIndexedLoad<MOPLDIndexedOrder, LSWidth32, "vloxei32.v">;
500def VLOXEI64_V : VIndexedLoad<MOPLDIndexedOrder, LSWidth64, "vloxei64.v">;
501
502def VSUXEI8_V : VIndexedStore<MOPSTIndexedUnord, LSWidth8, "vsuxei8.v">;
503def VSUXEI16_V : VIndexedStore<MOPSTIndexedUnord, LSWidth16, "vsuxei16.v">;
504def VSUXEI32_V : VIndexedStore<MOPSTIndexedUnord, LSWidth32, "vsuxei32.v">;
505def VSUXEI64_V : VIndexedStore<MOPSTIndexedUnord, LSWidth64, "vsuxei64.v">;
506
507def VSOXEI8_V : VIndexedStore<MOPSTIndexedOrder, LSWidth8, "vsoxei8.v">;
508def VSOXEI16_V : VIndexedStore<MOPSTIndexedOrder, LSWidth16, "vsoxei16.v">;
509def VSOXEI32_V : VIndexedStore<MOPSTIndexedOrder, LSWidth32, "vsoxei32.v">;
510def VSOXEI64_V : VIndexedStore<MOPSTIndexedOrder, LSWidth64, "vsoxei64.v">;
511
512defm VL1R : VWholeLoad<0, "vl1r", VR>;
513defm VL2R : VWholeLoad<1, "vl2r", VRM2>;
514defm VL4R : VWholeLoad<3, "vl4r", VRM4>;
515defm VL8R : VWholeLoad<7, "vl8r", VRM8>;
516def : InstAlias<"vl1r.v $vd, (${rs1})", (VL1RE8_V VR:$vd, GPR:$rs1)>;
517def : InstAlias<"vl2r.v $vd, (${rs1})", (VL2RE8_V VRM2:$vd, GPR:$rs1)>;
518def : InstAlias<"vl4r.v $vd, (${rs1})", (VL4RE8_V VRM4:$vd, GPR:$rs1)>;
519def : InstAlias<"vl8r.v $vd, (${rs1})", (VL8RE8_V VRM8:$vd, GPR:$rs1)>;
520
521def VS1R_V : VWholeStore<0, "vs1r.v", VR>;
522def VS2R_V : VWholeStore<1, "vs2r.v", VRM2>;
523def VS4R_V : VWholeStore<3, "vs4r.v", VRM4>;
524def VS8R_V : VWholeStore<7, "vs8r.v", VRM8>;
525
526// Vector Single-Width Integer Add and Subtract
527defm VADD_V : VALU_IV_V_X_I<"vadd", 0b000000>;
528defm VSUB_V : VALU_IV_V_X<"vsub", 0b000010>;
529defm VRSUB_V : VALU_IV_X_I<"vrsub", 0b000011>;
530
531def : InstAlias<"vneg.v $vd, $vs$vm", (VRSUB_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
532
533// Vector Widening Integer Add/Subtract
534// Refer to 11.2 Widening Vector Arithmetic Instructions
535// The destination vector register group cannot overlap a source vector
536// register group of a different element width (including the mask register
537// if masked), otherwise an illegal instruction exception is raised.
538let Constraints = "@earlyclobber $vd" in {
539let RVVConstraint = WidenV in {
540defm VWADDU_V : VALU_MV_V_X<"vwaddu", 0b110000>;
541defm VWSUBU_V : VALU_MV_V_X<"vwsubu", 0b110010>;
542defm VWADD_V : VALU_MV_V_X<"vwadd", 0b110001>;
543defm VWSUB_V : VALU_MV_V_X<"vwsub", 0b110011>;
544} // RVVConstraint = WidenV
545// Set earlyclobber for following instructions for second and mask operands.
546// This has the downside that the earlyclobber constraint is too coarse and
547// will impose unnecessary restrictions by not allowing the destination to
548// overlap with the first (wide) operand.
549let RVVConstraint = WidenW in {
550defm VWADDU_W : VALU_MV_V_X<"vwaddu", 0b110100, "w">;
551defm VWSUBU_W : VALU_MV_V_X<"vwsubu", 0b110110, "w">;
552defm VWADD_W : VALU_MV_V_X<"vwadd", 0b110101, "w">;
553defm VWSUB_W : VALU_MV_V_X<"vwsub", 0b110111, "w">;
554} // RVVConstraint = WidenW
555} // Constraints = "@earlyclobber $vd"
556
557def : InstAlias<"vwcvt.x.x.v $vd, $vs$vm",
558                (VWADD_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
559def : InstAlias<"vwcvtu.x.x.v $vd, $vs$vm",
560                (VWADDU_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
561
562// Vector Integer Extension
563defm VZEXT_VF8 : VALU_MV_VS2<"vzext.vf8", 0b010010, 0b00010>;
564defm VSEXT_VF8 : VALU_MV_VS2<"vsext.vf8", 0b010010, 0b00011>;
565defm VZEXT_VF4 : VALU_MV_VS2<"vzext.vf4", 0b010010, 0b00100>;
566defm VSEXT_VF4 : VALU_MV_VS2<"vsext.vf4", 0b010010, 0b00101>;
567defm VZEXT_VF2 : VALU_MV_VS2<"vzext.vf2", 0b010010, 0b00110>;
568defm VSEXT_VF2 : VALU_MV_VS2<"vsext.vf2", 0b010010, 0b00111>;
569
570// Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
571defm VADC_V : VALUm_IV_V_X_I<"vadc", 0b010000>;
572let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
573defm VMADC_V : VALUm_IV_V_X_I<"vmadc", 0b010001>;
574defm VMADC_V : VALUNoVm_IV_V_X_I<"vmadc", 0b010001>;
575} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
576defm VSBC_V : VALUm_IV_V_X<"vsbc", 0b010010>;
577let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
578defm VMSBC_V : VALUm_IV_V_X<"vmsbc", 0b010011>;
579defm VMSBC_V : VALUNoVm_IV_V_X<"vmsbc", 0b010011>;
580} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
581
582// Vector Bitwise Logical Instructions
583defm VAND_V : VALU_IV_V_X_I<"vand", 0b001001>;
584defm VOR_V : VALU_IV_V_X_I<"vor", 0b001010>;
585defm VXOR_V : VALU_IV_V_X_I<"vxor", 0b001011>;
586
587def : InstAlias<"vnot.v $vd, $vs$vm",
588                (VXOR_VI VR:$vd, VR:$vs, -1, VMaskOp:$vm)>;
589
590// Vector Single-Width Bit Shift Instructions
591defm VSLL_V : VALU_IV_V_X_I<"vsll", 0b100101, uimm5>;
592defm VSRL_V : VALU_IV_V_X_I<"vsrl", 0b101000, uimm5>;
593defm VSRA_V : VALU_IV_V_X_I<"vsra", 0b101001, uimm5>;
594
595// Vector Narrowing Integer Right Shift Instructions
596// Refer to 11.3. Narrowing Vector Arithmetic Instructions
597// The destination vector register group cannot overlap the first source
598// vector register group (specified by vs2). The destination vector register
599// group cannot overlap the mask register if used, unless LMUL=1.
600let Constraints = "@earlyclobber $vd" in {
601defm VNSRL_W : VALU_IV_V_X_I<"vnsrl", 0b101100, uimm5, "w">;
602defm VNSRA_W : VALU_IV_V_X_I<"vnsra", 0b101101, uimm5, "w">;
603} // Constraints = "@earlyclobber $vd"
604
605def : InstAlias<"vncvt.x.x.w $vd, $vs$vm",
606                (VNSRL_WX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
607
608// Vector Integer Comparison Instructions
609let RVVConstraint = NoConstraint in {
610defm VMSEQ_V : VALU_IV_V_X_I<"vmseq", 0b011000>;
611defm VMSNE_V : VALU_IV_V_X_I<"vmsne", 0b011001>;
612defm VMSLTU_V : VALU_IV_V_X<"vmsltu", 0b011010>;
613defm VMSLT_V : VALU_IV_V_X<"vmslt", 0b011011>;
614defm VMSLEU_V : VALU_IV_V_X_I<"vmsleu", 0b011100>;
615defm VMSLE_V : VALU_IV_V_X_I<"vmsle", 0b011101>;
616defm VMSGTU_V : VALU_IV_X_I<"vmsgtu", 0b011110>;
617defm VMSGT_V : VALU_IV_X_I<"vmsgt", 0b011111>;
618} // RVVConstraint = NoConstraint
619
620def : InstAlias<"vmsgtu.vv $vd, $va, $vb$vm",
621                (VMSLTU_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
622def : InstAlias<"vmsgt.vv $vd, $va, $vb$vm",
623                (VMSLT_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
624def : InstAlias<"vmsgeu.vv $vd, $va, $vb$vm",
625                (VMSLEU_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
626def : InstAlias<"vmsge.vv $vd, $va, $vb$vm",
627                (VMSLE_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
628
629let isCodeGenOnly = 0, isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 0,
630    mayStore = 0 in {
631// For unsigned comparisons we need to special case 0 immediate to maintain
632// the always true/false semantics we would invert if we just decremented the
633// immediate like we do for signed. To match the GNU assembler we will use
634// vmseq/vmsne.vv with the same register for both operands which we can't do
635// from an InstAlias.
636def PseudoVMSGEU_VI : Pseudo<(outs VR:$vd),
637                             (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
638                             [], "vmsgeu.vi", "$vd, $vs2, $imm$vm">;
639def PseudoVMSLTU_VI : Pseudo<(outs VR:$vd),
640                             (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
641                             [], "vmsltu.vi", "$vd, $vs2, $imm$vm">;
642// Handle signed with pseudos as well for more consistency in the
643// implementation.
644def PseudoVMSGE_VI : Pseudo<(outs VR:$vd),
645                            (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
646                            [], "vmsge.vi", "$vd, $vs2, $imm$vm">;
647def PseudoVMSLT_VI : Pseudo<(outs VR:$vd),
648                            (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
649                            [], "vmslt.vi", "$vd, $vs2, $imm$vm">;
650}
651
652let isCodeGenOnly = 0, isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 0,
653    mayStore = 0 in {
654def PseudoVMSGEU_VX : Pseudo<(outs VR:$vd),
655                             (ins VR:$vs2, GPR:$rs1),
656                             [], "vmsgeu.vx", "$vd, $vs2, $rs1">;
657def PseudoVMSGE_VX : Pseudo<(outs VR:$vd),
658                            (ins VR:$vs2, GPR:$rs1),
659                            [], "vmsge.vx", "$vd, $vs2, $rs1">;
660def PseudoVMSGEU_VX_M : Pseudo<(outs VRNoV0:$vd),
661                               (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
662                               [], "vmsgeu.vx", "$vd, $vs2, $rs1$vm">;
663def PseudoVMSGE_VX_M : Pseudo<(outs VRNoV0:$vd),
664                              (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
665                              [], "vmsge.vx", "$vd, $vs2, $rs1$vm">;
666def PseudoVMSGEU_VX_M_T : Pseudo<(outs VR:$vd, VRNoV0:$scratch),
667                                 (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
668                                 [], "vmsgeu.vx", "$vd, $vs2, $rs1$vm, $scratch">;
669def PseudoVMSGE_VX_M_T : Pseudo<(outs VR:$vd, VRNoV0:$scratch),
670                                (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
671                                [], "vmsge.vx", "$vd, $vs2, $rs1$vm, $scratch">;
672}
673
674// Vector Integer Min/Max Instructions
675defm VMINU_V : VALU_IV_V_X<"vminu", 0b000100>;
676defm VMIN_V : VALU_IV_V_X<"vmin", 0b000101>;
677defm VMAXU_V : VALU_IV_V_X<"vmaxu", 0b000110>;
678defm VMAX_V : VALU_IV_V_X<"vmax", 0b000111>;
679
680// Vector Single-Width Integer Multiply Instructions
681defm VMUL_V : VALU_MV_V_X<"vmul", 0b100101>;
682defm VMULH_V : VALU_MV_V_X<"vmulh", 0b100111>;
683defm VMULHU_V : VALU_MV_V_X<"vmulhu", 0b100100>;
684defm VMULHSU_V : VALU_MV_V_X<"vmulhsu", 0b100110>;
685
686// Vector Integer Divide Instructions
687defm VDIVU_V : VALU_MV_V_X<"vdivu", 0b100000>;
688defm VDIV_V : VALU_MV_V_X<"vdiv", 0b100001>;
689defm VREMU_V : VALU_MV_V_X<"vremu", 0b100010>;
690defm VREM_V : VALU_MV_V_X<"vrem", 0b100011>;
691
692// Vector Widening Integer Multiply Instructions
693let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
694defm VWMUL_V : VALU_MV_V_X<"vwmul", 0b111011>;
695defm VWMULU_V : VALU_MV_V_X<"vwmulu", 0b111000>;
696defm VWMULSU_V : VALU_MV_V_X<"vwmulsu", 0b111010>;
697} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
698
699// Vector Single-Width Integer Multiply-Add Instructions
700defm VMACC_V : VALUr_MV_V_X<"vmacc", 0b101101>;
701defm VNMSAC_V : VALUr_MV_V_X<"vnmsac", 0b101111>;
702defm VMADD_V : VALUr_MV_V_X<"vmadd", 0b101001>;
703defm VNMSUB_V : VALUr_MV_V_X<"vnmsub", 0b101011>;
704
705// Vector Widening Integer Multiply-Add Instructions
706let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
707defm VWMACCU_V : VALUr_MV_V_X<"vwmaccu", 0b111100>;
708defm VWMACC_V : VALUr_MV_V_X<"vwmacc", 0b111101>;
709defm VWMACCSU_V : VALUr_MV_V_X<"vwmaccsu", 0b111111>;
710defm VWMACCUS_V : VALUr_MV_X<"vwmaccus", 0b111110>;
711} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
712
713// Vector Integer Merge Instructions
714defm VMERGE_V : VALUm_IV_V_X_I<"vmerge", 0b010111>;
715
716// Vector Integer Move Instructions
717let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vs2 = 0, vm = 1,
718    RVVConstraint = NoConstraint  in {
719// op vd, vs1
720def VMV_V_V : RVInstVV<0b010111, OPIVV, (outs VR:$vd),
721                       (ins VR:$vs1), "vmv.v.v", "$vd, $vs1">;
722// op vd, rs1
723def VMV_V_X : RVInstVX<0b010111, OPIVX, (outs VR:$vd),
724                       (ins GPR:$rs1), "vmv.v.x", "$vd, $rs1">;
725// op vd, imm
726def VMV_V_I : RVInstIVI<0b010111, (outs VR:$vd),
727                       (ins simm5:$imm), "vmv.v.i", "$vd, $imm">;
728} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
729
730// Vector Fixed-Point Arithmetic Instructions
731defm VSADDU_V : VALU_IV_V_X_I<"vsaddu", 0b100000>;
732defm VSADD_V : VALU_IV_V_X_I<"vsadd", 0b100001>;
733defm VSSUBU_V : VALU_IV_V_X<"vssubu", 0b100010>;
734defm VSSUB_V : VALU_IV_V_X<"vssub", 0b100011>;
735
736// Vector Single-Width Averaging Add and Subtract
737defm VAADDU_V : VALU_MV_V_X<"vaaddu", 0b001000>;
738defm VAADD_V : VALU_MV_V_X<"vaadd", 0b001001>;
739defm VASUBU_V : VALU_MV_V_X<"vasubu", 0b001010>;
740defm VASUB_V : VALU_MV_V_X<"vasub", 0b001011>;
741
742// Vector Single-Width Fractional Multiply with Rounding and Saturation
743defm VSMUL_V : VALU_IV_V_X<"vsmul", 0b100111>;
744
745// Vector Single-Width Scaling Shift Instructions
746defm VSSRL_V : VALU_IV_V_X_I<"vssrl", 0b101010, uimm5>;
747defm VSSRA_V : VALU_IV_V_X_I<"vssra", 0b101011, uimm5>;
748
749// Vector Narrowing Fixed-Point Clip Instructions
750let Constraints = "@earlyclobber $vd" in {
751defm VNCLIPU_W : VALU_IV_V_X_I<"vnclipu", 0b101110, uimm5, "w">;
752defm VNCLIP_W : VALU_IV_V_X_I<"vnclip", 0b101111, uimm5, "w">;
753} // Constraints = "@earlyclobber $vd"
754} // Predicates = [HasStdExtV]
755
756let Predicates = [HasStdExtV, HasStdExtF] in {
757// Vector Single-Width Floating-Point Add/Subtract Instructions
758defm VFADD_V : VALU_FV_V_F<"vfadd", 0b000000>;
759defm VFSUB_V : VALU_FV_V_F<"vfsub", 0b000010>;
760defm VFRSUB_V : VALU_FV_F<"vfrsub", 0b100111>;
761
762// Vector Widening Floating-Point Add/Subtract Instructions
763let Constraints = "@earlyclobber $vd" in {
764let RVVConstraint = WidenV in {
765defm VFWADD_V : VALU_FV_V_F<"vfwadd", 0b110000>;
766defm VFWSUB_V : VALU_FV_V_F<"vfwsub", 0b110010>;
767} // RVVConstraint = WidenV
768// Set earlyclobber for following instructions for second and mask operands.
769// This has the downside that the earlyclobber constraint is too coarse and
770// will impose unnecessary restrictions by not allowing the destination to
771// overlap with the first (wide) operand.
772let RVVConstraint = WidenW in {
773defm VFWADD_W : VALU_FV_V_F<"vfwadd", 0b110100, "w">;
774defm VFWSUB_W : VALU_FV_V_F<"vfwsub", 0b110110, "w">;
775} // RVVConstraint = WidenW
776} // Constraints = "@earlyclobber $vd"
777
778// Vector Single-Width Floating-Point Multiply/Divide Instructions
779defm VFMUL_V : VALU_FV_V_F<"vfmul", 0b100100>;
780defm VFDIV_V : VALU_FV_V_F<"vfdiv", 0b100000>;
781defm VFRDIV_V : VALU_FV_F<"vfrdiv", 0b100001>;
782
783// Vector Widening Floating-Point Multiply
784let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
785defm VFWMUL_V : VALU_FV_V_F<"vfwmul", 0b111000>;
786} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
787
788// Vector Single-Width Floating-Point Fused Multiply-Add Instructions
789defm VFMACC_V : VALUr_FV_V_F<"vfmacc", 0b101100>;
790defm VFNMACC_V : VALUr_FV_V_F<"vfnmacc", 0b101101>;
791defm VFMSAC_V : VALUr_FV_V_F<"vfmsac", 0b101110>;
792defm VFNMSAC_V : VALUr_FV_V_F<"vfnmsac", 0b101111>;
793defm VFMADD_V : VALUr_FV_V_F<"vfmadd", 0b101000>;
794defm VFNMADD_V : VALUr_FV_V_F<"vfnmadd", 0b101001>;
795defm VFMSUB_V : VALUr_FV_V_F<"vfmsub", 0b101010>;
796defm VFNMSUB_V : VALUr_FV_V_F<"vfnmsub", 0b101011>;
797
798// Vector Widening Floating-Point Fused Multiply-Add Instructions
799let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
800defm VFWMACC_V : VALUr_FV_V_F<"vfwmacc", 0b111100>;
801defm VFWNMACC_V : VALUr_FV_V_F<"vfwnmacc", 0b111101>;
802defm VFWMSAC_V : VALUr_FV_V_F<"vfwmsac", 0b111110>;
803defm VFWNMSAC_V : VALUr_FV_V_F<"vfwnmsac", 0b111111>;
804} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
805
806// Vector Floating-Point Square-Root Instruction
807defm VFSQRT_V : VALU_FV_VS2<"vfsqrt.v", 0b010011, 0b00000>;
808defm VFRSQRT7_V : VALU_FV_VS2<"vfrsqrt7.v", 0b010011, 0b00100>;
809defm VFREC7_V : VALU_FV_VS2<"vfrec7.v", 0b010011, 0b00101>;
810
811// Vector Floating-Point MIN/MAX Instructions
812defm VFMIN_V : VALU_FV_V_F<"vfmin", 0b000100>;
813defm VFMAX_V : VALU_FV_V_F<"vfmax", 0b000110>;
814
815// Vector Floating-Point Sign-Injection Instructions
816defm VFSGNJ_V : VALU_FV_V_F<"vfsgnj", 0b001000>;
817defm VFSGNJN_V : VALU_FV_V_F<"vfsgnjn", 0b001001>;
818defm VFSGNJX_V : VALU_FV_V_F<"vfsgnjx", 0b001010>;
819
820def : InstAlias<"vfneg.v $vd, $vs$vm",
821                (VFSGNJN_VV VR:$vd, VR:$vs, VR:$vs, VMaskOp:$vm)>;
822def : InstAlias<"vfabs.v $vd, $vs$vm",
823                (VFSGNJX_VV VR:$vd, VR:$vs, VR:$vs, VMaskOp:$vm)>;
824
825// Vector Floating-Point Compare Instructions
826let RVVConstraint = NoConstraint in {
827defm VMFEQ_V : VALU_FV_V_F<"vmfeq", 0b011000>;
828defm VMFNE_V : VALU_FV_V_F<"vmfne", 0b011100>;
829defm VMFLT_V : VALU_FV_V_F<"vmflt", 0b011011>;
830defm VMFLE_V : VALU_FV_V_F<"vmfle", 0b011001>;
831defm VMFGT_V : VALU_FV_F<"vmfgt", 0b011101>;
832defm VMFGE_V : VALU_FV_F<"vmfge", 0b011111>;
833} // RVVConstraint = NoConstraint
834
835def : InstAlias<"vmfgt.vv $vd, $va, $vb$vm",
836                (VMFLT_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
837def : InstAlias<"vmfge.vv $vd, $va, $vb$vm",
838                (VMFLE_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
839
840// Vector Floating-Point Classify Instruction
841defm VFCLASS_V : VALU_FV_VS2<"vfclass.v", 0b010011, 0b10000>;
842
843let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
844// Vector Floating-Point Merge Instruction
845def VFMERGE_VFM : RVInstVX<0b010111, OPFVF, (outs VR:$vd),
846                           (ins VR:$vs2, FPR32:$rs1, VMV0:$v0),
847                           "vfmerge.vfm", "$vd, $vs2, $rs1, v0"> {
848  let vm = 0;
849}
850
851// Vector Floating-Point Move Instruction
852let RVVConstraint = NoConstraint in
853def VFMV_V_F : RVInstVX<0b010111, OPFVF, (outs VR:$vd),
854                       (ins FPR32:$rs1), "vfmv.v.f", "$vd, $rs1"> {
855  let vs2 = 0;
856  let vm = 1;
857}
858} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
859
860// Single-Width Floating-Point/Integer Type-Convert Instructions
861defm VFCVT_XU_F_V : VALU_FV_VS2<"vfcvt.xu.f.v", 0b010010, 0b00000>;
862defm VFCVT_X_F_V : VALU_FV_VS2<"vfcvt.x.f.v", 0b010010, 0b00001>;
863defm VFCVT_RTZ_XU_F_V : VALU_FV_VS2<"vfcvt.rtz.xu.f.v", 0b010010, 0b00110>;
864defm VFCVT_RTZ_X_F_V : VALU_FV_VS2<"vfcvt.rtz.x.f.v", 0b010010, 0b00111>;
865defm VFCVT_F_XU_V : VALU_FV_VS2<"vfcvt.f.xu.v", 0b010010, 0b00010>;
866defm VFCVT_F_X_V : VALU_FV_VS2<"vfcvt.f.x.v", 0b010010, 0b00011>;
867
868// Widening Floating-Point/Integer Type-Convert Instructions
869let Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt in {
870defm VFWCVT_XU_F_V : VALU_FV_VS2<"vfwcvt.xu.f.v", 0b010010, 0b01000>;
871defm VFWCVT_X_F_V : VALU_FV_VS2<"vfwcvt.x.f.v", 0b010010, 0b01001>;
872defm VFWCVT_RTZ_XU_F_V : VALU_FV_VS2<"vfwcvt.rtz.xu.f.v", 0b010010, 0b01110>;
873defm VFWCVT_RTZ_X_F_V : VALU_FV_VS2<"vfwcvt.rtz.x.f.v", 0b010010, 0b01111>;
874defm VFWCVT_F_XU_V : VALU_FV_VS2<"vfwcvt.f.xu.v", 0b010010, 0b01010>;
875defm VFWCVT_F_X_V : VALU_FV_VS2<"vfwcvt.f.x.v", 0b010010, 0b01011>;
876defm VFWCVT_F_F_V : VALU_FV_VS2<"vfwcvt.f.f.v", 0b010010, 0b01100>;
877} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt
878
879// Narrowing Floating-Point/Integer Type-Convert Instructions
880let Constraints = "@earlyclobber $vd" in {
881defm VFNCVT_XU_F_W : VALU_FV_VS2<"vfncvt.xu.f.w", 0b010010, 0b10000>;
882defm VFNCVT_X_F_W : VALU_FV_VS2<"vfncvt.x.f.w", 0b010010, 0b10001>;
883defm VFNCVT_RTZ_XU_F_W : VALU_FV_VS2<"vfncvt.rtz.xu.f.w", 0b010010, 0b10110>;
884defm VFNCVT_RTZ_X_F_W : VALU_FV_VS2<"vfncvt.rtz.x.f.w", 0b010010, 0b10111>;
885defm VFNCVT_F_XU_W : VALU_FV_VS2<"vfncvt.f.xu.w", 0b010010, 0b10010>;
886defm VFNCVT_F_X_W : VALU_FV_VS2<"vfncvt.f.x.w", 0b010010, 0b10011>;
887defm VFNCVT_F_F_W : VALU_FV_VS2<"vfncvt.f.f.w", 0b010010, 0b10100>;
888defm VFNCVT_ROD_F_F_W : VALU_FV_VS2<"vfncvt.rod.f.f.w", 0b010010, 0b10101>;
889} // Constraints = "@earlyclobber $vd"
890} // Predicates = [HasStdExtV, HasStdExtF]
891
892let Predicates = [HasStdExtV] in {
893// Vector Single-Width Integer Reduction Instructions
894let RVVConstraint = NoConstraint in {
895defm VREDSUM : VALU_MV_V<"vredsum", 0b000000>;
896defm VREDMAXU : VALU_MV_V<"vredmaxu", 0b000110>;
897defm VREDMAX : VALU_MV_V<"vredmax", 0b000111>;
898defm VREDMINU : VALU_MV_V<"vredminu", 0b000100>;
899defm VREDMIN : VALU_MV_V<"vredmin", 0b000101>;
900defm VREDAND : VALU_MV_V<"vredand", 0b000001>;
901defm VREDOR : VALU_MV_V<"vredor", 0b000010>;
902defm VREDXOR : VALU_MV_V<"vredxor", 0b000011>;
903} // RVVConstraint = NoConstraint
904
905// Vector Widening Integer Reduction Instructions
906let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
907// Set earlyclobber for following instructions for second and mask operands.
908// This has the downside that the earlyclobber constraint is too coarse and
909// will impose unnecessary restrictions by not allowing the destination to
910// overlap with the first (wide) operand.
911defm VWREDSUMU : VALU_IV_V<"vwredsumu", 0b110000>;
912defm VWREDSUM : VALU_IV_V<"vwredsum", 0b110001>;
913} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
914} // Predicates = [HasStdExtV]
915
916let Predicates = [HasStdExtV, HasStdExtF] in {
917// Vector Single-Width Floating-Point Reduction Instructions
918let RVVConstraint = NoConstraint in {
919defm VFREDOSUM : VALU_FV_V<"vfredosum", 0b000011>;
920defm VFREDSUM : VALU_FV_V<"vfredsum", 0b000001>;
921defm VFREDMAX : VALU_FV_V<"vfredmax", 0b000111>;
922defm VFREDMIN : VALU_FV_V<"vfredmin", 0b000101>;
923} // RVVConstraint = NoConstraint
924
925// Vector Widening Floating-Point Reduction Instructions
926let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
927// Set earlyclobber for following instructions for second and mask operands.
928// This has the downside that the earlyclobber constraint is too coarse and
929// will impose unnecessary restrictions by not allowing the destination to
930// overlap with the first (wide) operand.
931defm VFWREDOSUM : VALU_FV_V<"vfwredosum", 0b110011>;
932defm VFWREDSUM : VALU_FV_V<"vfwredsum", 0b110001>;
933} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
934} // Predicates = [HasStdExtV, HasStdExtF]
935
936let Predicates = [HasStdExtV] in {
937// Vector Mask-Register Logical Instructions
938let RVVConstraint = NoConstraint in {
939defm VMAND_M : VALU_MV_Mask<"vmand", 0b011001, "m">;
940defm VMNAND_M : VALU_MV_Mask<"vmnand", 0b011101, "m">;
941defm VMANDNOT_M : VALU_MV_Mask<"vmandnot", 0b011000, "m">;
942defm VMXOR_M : VALU_MV_Mask<"vmxor", 0b011011, "m">;
943defm VMOR_M : VALU_MV_Mask<"vmor", 0b011010, "m">;
944defm VMNOR_M : VALU_MV_Mask<"vmnor", 0b011110, "m">;
945defm VMORNOT_M : VALU_MV_Mask<"vmornot", 0b011100, "m">;
946defm VMXNOR_M : VALU_MV_Mask<"vmxnor", 0b011111, "m">;
947}
948
949def : InstAlias<"vmmv.m $vd, $vs",
950                (VMAND_MM VR:$vd, VR:$vs, VR:$vs)>;
951def : InstAlias<"vmclr.m $vd",
952                (VMXOR_MM VR:$vd, VR:$vd, VR:$vd)>;
953def : InstAlias<"vmset.m $vd",
954                (VMXNOR_MM VR:$vd, VR:$vd, VR:$vd)>;
955def : InstAlias<"vmnot.m $vd, $vs",
956                (VMNAND_MM VR:$vd, VR:$vs, VR:$vs)>;
957
958let hasSideEffects = 0, mayLoad = 0, mayStore = 0,
959    RVVConstraint = NoConstraint  in {
960// Vector mask population count vpopc
961def VPOPC_M : RVInstV<0b010000, 0b10000, OPMVV, (outs GPR:$vd),
962                        (ins VR:$vs2, VMaskOp:$vm),
963                        "vpopc.m", "$vd, $vs2$vm">;
964
965// vfirst find-first-set mask bit
966def VFIRST_M : RVInstV<0b010000, 0b10001, OPMVV, (outs GPR:$vd),
967                        (ins VR:$vs2, VMaskOp:$vm),
968                        "vfirst.m", "$vd, $vs2$vm">;
969} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
970
971let Constraints = "@earlyclobber $vd", RVVConstraint = Iota in {
972// vmsbf.m set-before-first mask bit
973defm VMSBF_M : VALU_MV_VS2<"vmsbf.m", 0b010100, 0b00001>;
974// vmsif.m set-including-first mask bit
975defm VMSIF_M : VALU_MV_VS2<"vmsif.m", 0b010100, 0b00011>;
976// vmsof.m set-only-first mask bit
977defm VMSOF_M : VALU_MV_VS2<"vmsof.m", 0b010100, 0b00010>;
978// Vector Iota Instruction
979defm VIOTA_M : VALU_MV_VS2<"viota.m", 0b010100, 0b10000>;
980} // Constraints = "@earlyclobber $vd", RVVConstraint = Iota
981
982// Vector Element Index Instruction
983let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
984def VID_V : RVInstV<0b010100, 0b10001, OPMVV, (outs VR:$vd),
985                      (ins VMaskOp:$vm), "vid.v", "$vd$vm"> {
986  let vs2 = 0;
987}
988
989// Integer Scalar Move Instructions
990let vm = 1, RVVConstraint = NoConstraint in {
991def VMV_X_S : RVInstV<0b010000, 0b00000, OPMVV, (outs GPR:$vd),
992                      (ins VR:$vs2), "vmv.x.s", "$vd, $vs2">;
993let Constraints = "$vd = $vd_wb" in
994def VMV_S_X : RVInstV2<0b010000, 0b00000, OPMVX, (outs VR:$vd_wb),
995                      (ins VR:$vd, GPR:$rs1), "vmv.s.x", "$vd, $rs1">;
996
997}
998} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
999} // Predicates = [HasStdExtV]
1000
1001let Predicates = [HasStdExtV, HasStdExtF] in {
1002let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1,
1003    RVVConstraint = NoConstraint  in {
1004// Floating-Point Scalar Move Instructions
1005def VFMV_F_S : RVInstV<0b010000, 0b00000, OPFVV, (outs FPR32:$vd),
1006                      (ins VR:$vs2), "vfmv.f.s", "$vd, $vs2">;
1007let Constraints = "$vd = $vd_wb" in
1008def VFMV_S_F : RVInstV2<0b010000, 0b00000, OPFVF, (outs VR:$vd_wb),
1009                      (ins VR:$vd, FPR32:$rs1), "vfmv.s.f", "$vd, $rs1">;
1010
1011} // hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1
1012} // Predicates = [HasStdExtV, HasStdExtF]
1013
1014let Predicates = [HasStdExtV] in {
1015// Vector Slide Instructions
1016let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
1017defm VSLIDEUP_V : VALU_IV_X_I<"vslideup", 0b001110, uimm5>;
1018defm VSLIDE1UP_V : VALU_MV_X<"vslide1up", 0b001110>;
1019} // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
1020defm VSLIDEDOWN_V : VALU_IV_X_I<"vslidedown", 0b001111, uimm5>;
1021defm VSLIDE1DOWN_V : VALU_MV_X<"vslide1down", 0b001111>;
1022} // Predicates = [HasStdExtV]
1023
1024let Predicates = [HasStdExtV, HasStdExtF] in {
1025let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
1026defm VFSLIDE1UP_V : VALU_FV_F<"vfslide1up", 0b001110>;
1027} // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
1028defm VFSLIDE1DOWN_V : VALU_FV_F<"vfslide1down", 0b001111>;
1029} // Predicates = [HasStdExtV, HasStdExtF]
1030
1031let Predicates = [HasStdExtV] in {
1032// Vector Register Gather Instruction
1033let Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather in {
1034defm VRGATHER_V : VALU_IV_V_X_I<"vrgather", 0b001100, uimm5>;
1035def VRGATHEREI16_VV : VALUVV<0b001110, OPIVV, "vrgatherei16.vv">;
1036} // Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather
1037
1038// Vector Compress Instruction
1039let Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress in {
1040defm VCOMPRESS_V : VALU_MV_Mask<"vcompress", 0b010111>;
1041} // Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress
1042
1043let hasSideEffects = 0, mayLoad = 0, mayStore = 0,
1044    RVVConstraint = NoConstraint in {
1045foreach nf = [1, 2, 4, 8] in {
1046  def VMV#nf#R_V  : RVInstV<0b100111, !add(nf, -1), OPIVI, (outs VR:$vd),
1047                            (ins VR:$vs2), "vmv" # nf # "r.v",
1048                            "$vd, $vs2"> {
1049    let Uses = [];
1050    let vm = 1;
1051  }
1052}
1053} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1054} // Predicates = [HasStdExtV]
1055
1056let Predicates = [HasStdExtZvlsseg] in {
1057  foreach nf=2-8 in {
1058    def VLSEG#nf#E8_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth8, "vlseg"#nf#"e8.v">;
1059    def VLSEG#nf#E16_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth16, "vlseg"#nf#"e16.v">;
1060    def VLSEG#nf#E32_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth32, "vlseg"#nf#"e32.v">;
1061    def VLSEG#nf#E64_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth64, "vlseg"#nf#"e64.v">;
1062
1063    def VLSEG#nf#E8FF_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStrideFF, LSWidth8, "vlseg"#nf#"e8ff.v">;
1064    def VLSEG#nf#E16FF_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStrideFF, LSWidth16, "vlseg"#nf#"e16ff.v">;
1065    def VLSEG#nf#E32FF_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStrideFF, LSWidth32, "vlseg"#nf#"e32ff.v">;
1066    def VLSEG#nf#E64FF_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStrideFF, LSWidth64, "vlseg"#nf#"e64ff.v">;
1067
1068    def VSSEG#nf#E8_V : VUnitStrideSegmentStore<!add(nf, -1), LSWidth8, "vsseg"#nf#"e8.v">;
1069    def VSSEG#nf#E16_V : VUnitStrideSegmentStore<!add(nf, -1), LSWidth16, "vsseg"#nf#"e16.v">;
1070    def VSSEG#nf#E32_V : VUnitStrideSegmentStore<!add(nf, -1), LSWidth32, "vsseg"#nf#"e32.v">;
1071    def VSSEG#nf#E64_V : VUnitStrideSegmentStore<!add(nf, -1), LSWidth64, "vsseg"#nf#"e64.v">;
1072
1073    // Vector Strided Instructions
1074    def VLSSEG#nf#E8_V : VStridedSegmentLoad<!add(nf, -1), LSWidth8, "vlsseg"#nf#"e8.v">;
1075    def VLSSEG#nf#E16_V : VStridedSegmentLoad<!add(nf, -1), LSWidth16, "vlsseg"#nf#"e16.v">;
1076    def VLSSEG#nf#E32_V : VStridedSegmentLoad<!add(nf, -1), LSWidth32, "vlsseg"#nf#"e32.v">;
1077    def VLSSEG#nf#E64_V : VStridedSegmentLoad<!add(nf, -1), LSWidth64, "vlsseg"#nf#"e64.v">;
1078
1079    def VSSSEG#nf#E8_V : VStridedSegmentStore<!add(nf, -1), LSWidth8, "vssseg"#nf#"e8.v">;
1080    def VSSSEG#nf#E16_V : VStridedSegmentStore<!add(nf, -1), LSWidth16, "vssseg"#nf#"e16.v">;
1081    def VSSSEG#nf#E32_V : VStridedSegmentStore<!add(nf, -1), LSWidth32, "vssseg"#nf#"e32.v">;
1082    def VSSSEG#nf#E64_V : VStridedSegmentStore<!add(nf, -1), LSWidth64, "vssseg"#nf#"e64.v">;
1083
1084    // Vector Indexed Instructions
1085    def VLUXSEG#nf#EI8_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
1086                             LSWidth8, "vluxseg"#nf#"ei8.v">;
1087    def VLUXSEG#nf#EI16_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
1088                              LSWidth16, "vluxseg"#nf#"ei16.v">;
1089    def VLUXSEG#nf#EI32_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
1090                              LSWidth32, "vluxseg"#nf#"ei32.v">;
1091    def VLUXSEG#nf#EI64_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
1092                              LSWidth64, "vluxseg"#nf#"ei64.v">;
1093
1094    def VLOXSEG#nf#EI8_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
1095                             LSWidth8, "vloxseg"#nf#"ei8.v">;
1096    def VLOXSEG#nf#EI16_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
1097                              LSWidth16, "vloxseg"#nf#"ei16.v">;
1098    def VLOXSEG#nf#EI32_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
1099                              LSWidth32, "vloxseg"#nf#"ei32.v">;
1100    def VLOXSEG#nf#EI64_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
1101                              LSWidth64, "vloxseg"#nf#"ei64.v">;
1102
1103    def VSUXSEG#nf#EI8_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
1104                             LSWidth8, "vsuxseg"#nf#"ei8.v">;
1105    def VSUXSEG#nf#EI16_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
1106                              LSWidth16, "vsuxseg"#nf#"ei16.v">;
1107    def VSUXSEG#nf#EI32_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
1108                              LSWidth32, "vsuxseg"#nf#"ei32.v">;
1109    def VSUXSEG#nf#EI64_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
1110                              LSWidth64, "vsuxseg"#nf#"ei64.v">;
1111
1112    def VSOXSEG#nf#EI8_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
1113                             LSWidth8, "vsoxseg"#nf#"ei8.v">;
1114    def VSOXSEG#nf#EI16_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
1115                              LSWidth16, "vsoxseg"#nf#"ei16.v">;
1116    def VSOXSEG#nf#EI32_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
1117                              LSWidth32, "vsoxseg"#nf#"ei32.v">;
1118    def VSOXSEG#nf#EI64_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
1119                              LSWidth64, "vsoxseg"#nf#"ei64.v">;
1120  }
1121} // Predicates = [HasStdExtZvlsseg]
1122
1123let Predicates = [HasStdExtZvamo, HasStdExtA] in {
1124  defm VAMOSWAPEI8 : VAMO<AMOOPVamoSwap, LSWidth8, "vamoswapei8.v">;
1125  defm VAMOSWAPEI16 : VAMO<AMOOPVamoSwap, LSWidth16, "vamoswapei16.v">;
1126  defm VAMOSWAPEI32 : VAMO<AMOOPVamoSwap, LSWidth32, "vamoswapei32.v">;
1127
1128  defm VAMOADDEI8 : VAMO<AMOOPVamoAdd, LSWidth8, "vamoaddei8.v">;
1129  defm VAMOADDEI16 : VAMO<AMOOPVamoAdd, LSWidth16, "vamoaddei16.v">;
1130  defm VAMOADDEI32 : VAMO<AMOOPVamoAdd, LSWidth32, "vamoaddei32.v">;
1131
1132  defm VAMOXOREI8 : VAMO<AMOOPVamoXor, LSWidth8, "vamoxorei8.v">;
1133  defm VAMOXOREI16 : VAMO<AMOOPVamoXor, LSWidth16, "vamoxorei16.v">;
1134  defm VAMOXOREI32 : VAMO<AMOOPVamoXor, LSWidth32, "vamoxorei32.v">;
1135
1136  defm VAMOANDEI8 : VAMO<AMOOPVamoAnd, LSWidth8, "vamoandei8.v">;
1137  defm VAMOANDEI16 : VAMO<AMOOPVamoAnd, LSWidth16, "vamoandei16.v">;
1138  defm VAMOANDEI32 : VAMO<AMOOPVamoAnd, LSWidth32, "vamoandei32.v">;
1139
1140  defm VAMOOREI8 : VAMO<AMOOPVamoOr, LSWidth8, "vamoorei8.v">;
1141  defm VAMOOREI16 : VAMO<AMOOPVamoOr, LSWidth16, "vamoorei16.v">;
1142  defm VAMOOREI32 : VAMO<AMOOPVamoOr, LSWidth32, "vamoorei32.v">;
1143
1144  defm VAMOMINEI8 : VAMO<AMOOPVamoMin, LSWidth8, "vamominei8.v">;
1145  defm VAMOMINEI16 : VAMO<AMOOPVamoMin, LSWidth16, "vamominei16.v">;
1146  defm VAMOMINEI32 : VAMO<AMOOPVamoMin, LSWidth32, "vamominei32.v">;
1147
1148  defm VAMOMAXEI8 : VAMO<AMOOPVamoMax, LSWidth8, "vamomaxei8.v">;
1149  defm VAMOMAXEI16 : VAMO<AMOOPVamoMax, LSWidth16, "vamomaxei16.v">;
1150  defm VAMOMAXEI32 : VAMO<AMOOPVamoMax, LSWidth32, "vamomaxei32.v">;
1151
1152  defm VAMOMINUEI8 : VAMO<AMOOPVamoMinu, LSWidth8, "vamominuei8.v">;
1153  defm VAMOMINUEI16 : VAMO<AMOOPVamoMinu, LSWidth16, "vamominuei16.v">;
1154  defm VAMOMINUEI32 : VAMO<AMOOPVamoMinu, LSWidth32, "vamominuei32.v">;
1155
1156  defm VAMOMAXUEI8 : VAMO<AMOOPVamoMaxu, LSWidth8, "vamomaxuei8.v">;
1157  defm VAMOMAXUEI16 : VAMO<AMOOPVamoMaxu, LSWidth16, "vamomaxuei16.v">;
1158  defm VAMOMAXUEI32 : VAMO<AMOOPVamoMaxu, LSWidth32, "vamomaxuei32.v">;
1159} // Predicates = [HasStdExtZvamo, HasStdExtA]
1160
1161let Predicates = [HasStdExtZvamo, HasStdExtA, IsRV64] in {
1162  defm VAMOSWAPEI64 : VAMO<AMOOPVamoSwap, LSWidth64, "vamoswapei64.v">;
1163  defm VAMOADDEI64 : VAMO<AMOOPVamoAdd, LSWidth64, "vamoaddei64.v">;
1164  defm VAMOXOREI64 : VAMO<AMOOPVamoXor, LSWidth64, "vamoxorei64.v">;
1165  defm VAMOANDEI64 : VAMO<AMOOPVamoAnd, LSWidth64, "vamoandei64.v">;
1166  defm VAMOOREI64 : VAMO<AMOOPVamoOr, LSWidth64, "vamoorei64.v">;
1167  defm VAMOMINEI64 : VAMO<AMOOPVamoMin, LSWidth64, "vamominei64.v">;
1168  defm VAMOMAXEI64 : VAMO<AMOOPVamoMax, LSWidth64, "vamomaxei64.v">;
1169  defm VAMOMINUEI64 : VAMO<AMOOPVamoMinu, LSWidth64, "vamominuei64.v">;
1170  defm VAMOMAXUEI64 : VAMO<AMOOPVamoMaxu, LSWidth64, "vamomaxuei64.v">;
1171} // Predicates = [HasStdExtZvamo, HasStdExtA, IsRV64]
1172
1173include "RISCVInstrInfoVPseudos.td"
1174