1//===-- RISCVInstrInfoV.td - RISC-V 'V' instructions -------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// This file describes the RISC-V instructions from the standard 'V' Vector
10/// extension, version 0.8.
11/// This version is still experimental as the 'V' extension hasn't been
12/// ratified yet.
13///
14//===----------------------------------------------------------------------===//
15
16include "RISCVInstrFormatsV.td"
17
18//===----------------------------------------------------------------------===//
19// Operand and SDNode transformation definitions.
20//===----------------------------------------------------------------------===//
21
22def VTypeIAsmOperand : AsmOperandClass {
23  let Name = "VTypeI";
24  let ParserMethod = "parseVTypeI";
25  let DiagnosticType = "InvalidVTypeI";
26}
27
28def VTypeIOp : Operand<XLenVT> {
29  let ParserMatchClass = VTypeIAsmOperand;
30  let PrintMethod = "printVTypeI";
31  let DecoderMethod = "decodeUImmOperand<11>";
32}
33
34def VRegAsmOperand : AsmOperandClass {
35  let Name = "RVVRegOpOperand";
36  let RenderMethod = "addRegOperands";
37  let PredicateMethod = "isReg";
38  let ParserMethod = "parseRegister";
39}
40
41def VRegOp : RegisterOperand<VR> {
42  let ParserMatchClass = VRegAsmOperand;
43  let PrintMethod = "printOperand";
44}
45
46def VMaskAsmOperand : AsmOperandClass {
47  let Name = "RVVMaskRegOpOperand";
48  let RenderMethod = "addRegOperands";
49  let PredicateMethod = "isV0Reg";
50  let ParserMethod = "parseMaskReg";
51  let IsOptional = 1;
52  let DefaultMethod = "defaultMaskRegOp";
53  let DiagnosticType = "InvalidVMaskRegister";
54}
55
56def VMaskOp : RegisterOperand<VMV0> {
57  let ParserMatchClass = VMaskAsmOperand;
58  let PrintMethod = "printVMaskReg";
59  let EncoderMethod = "getVMaskReg";
60  let DecoderMethod = "decodeVMaskReg";
61}
62
63def simm5 : Operand<XLenVT>, ImmLeaf<XLenVT, [{return isInt<5>(Imm);}]> {
64  let ParserMatchClass = SImmAsmOperand<5>;
65  let EncoderMethod = "getImmOpValue";
66  let DecoderMethod = "decodeSImmOperand<5>";
67  let MCOperandPredicate = [{
68    int64_t Imm;
69    if (MCOp.evaluateAsConstantImm(Imm))
70      return isInt<5>(Imm);
71    return MCOp.isBareSymbolRef();
72  }];
73}
74
75def SImm5Plus1AsmOperand : AsmOperandClass {
76  let Name = "SImm5Plus1";
77  let RenderMethod = "addSImm5Plus1Operands";
78  let DiagnosticType = "InvalidSImm5Plus1";
79}
80
81def simm5_plus1 : Operand<XLenVT>, ImmLeaf<XLenVT,
82                                           [{return isInt<5>(Imm - 1);}]> {
83  let ParserMatchClass = SImm5Plus1AsmOperand;
84  let PrintMethod = "printSImm5Plus1";
85  let MCOperandPredicate = [{
86    int64_t Imm;
87    if (MCOp.evaluateAsConstantImm(Imm))
88      return isInt<5>(Imm - 1);
89    return MCOp.isBareSymbolRef();
90  }];
91}
92
93//===----------------------------------------------------------------------===//
94// Instruction class templates
95//===----------------------------------------------------------------------===//
96
97let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in {
98// load vd, (rs1), vm
99class VUnitStrideLoad<RISCVMOP mop, RISCVLSUMOP lumop, RISCVWidth width,
100                        string opcodestr>
101    : RVInstVLU<0b000, mop, lumop, width, (outs VRegOp:$vd),
102                (ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
103
104// load vd, (rs1), rs2, vm
105class VStridedLoad<RISCVMOP mop, RISCVWidth width, string opcodestr>
106    : RVInstVLS<0b000, mop, width, (outs VRegOp:$vd),
107                (ins GPR:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr,
108                "$vd, (${rs1}), $rs2$vm">;
109
110// load vd, (rs1), vs2, vm
111class VIndexedLoad<RISCVMOP mop, RISCVWidth width, string opcodestr>
112    : RVInstVLX<0b000, mop, width, (outs VRegOp:$vd),
113                (ins GPR:$rs1, VRegOp:$vs2, VMaskOp:$vm), opcodestr,
114                "$vd, (${rs1}), $vs2$vm">;
115
116// vl<nf>r.v vd, (rs1)
117class VWholeLoad<bits<3> nf, string opcodestr>
118    : RVInstVLU<nf, MOPLDUnitStrideU, LUMOPUnitStrideWholeReg,
119                LSWidthVSEW, (outs VRegOp:$vd), (ins GPR:$rs1),
120                opcodestr, "$vd, (${rs1})"> {
121  let vm = 1;
122  let Uses = [];
123}
124} // hasSideEffects = 0, mayLoad = 1, mayStore = 0
125
126let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in {
127// store vd, vs3, (rs1), vm
128class VUnitStrideStore<RISCVMOP mop, RISCVLSUMOP sumop, RISCVWidth width,
129                         string opcodestr>
130    : RVInstVSU<0b000, mop, sumop, width, (outs),
131                (ins VRegOp:$vs3, GPR:$rs1, VMaskOp:$vm), opcodestr,
132                "$vs3, (${rs1})$vm">;
133
134// store vd, vs3, (rs1), rs2, vm
135class VStridedStore<RISCVMOP mop, RISCVWidth width, string opcodestr>
136    : RVInstVSS<0b000, mop, width, (outs),
137                (ins VRegOp:$vs3, GPR:$rs1, GPR:$rs2, VMaskOp:$vm),
138                opcodestr, "$vs3, (${rs1}), $rs2$vm">;
139
140// store vd, vs3, (rs1), vs2, vm
141class VIndexedStore<RISCVMOP mop, RISCVWidth width, string opcodestr>
142    : RVInstVSX<0b000, mop, width, (outs),
143                (ins VRegOp:$vs3, GPR:$rs1, VRegOp:$vs2, VMaskOp:$vm),
144                opcodestr, "$vs3, (${rs1}), $vs2$vm">;
145
146// vs<nf>r.v vd, (rs1)
147class VWholeStore<bits<3> nf, string opcodestr>
148    : RVInstVSU<nf, MOPSTUnitStride, SUMOPUnitStrideWholeReg,
149                LSWidthVSEW, (outs), (ins VRegOp:$vs3, GPR:$rs1),
150                opcodestr, "$vs3, (${rs1})"> {
151  let vm = 1;
152  let Uses = [];
153}
154} // hasSideEffects = 0, mayLoad = 0, mayStore = 1
155
156let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
157// op vd, vs2, vs1, vm
158class VALUVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
159    : RVInstVV<funct6, opv, (outs VRegOp:$vd),
160                (ins VRegOp:$vs2, VRegOp:$vs1, VMaskOp:$vm),
161                opcodestr, "$vd, $vs2, $vs1$vm">;
162
163// op vd, vs2, vs1, v0 (without mask, use v0 as carry input)
164class VALUmVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
165    : RVInstVV<funct6, opv, (outs VRegOp:$vd),
166                (ins VRegOp:$vs2, VRegOp:$vs1, VMV0:$v0),
167                opcodestr, "$vd, $vs2, $vs1, v0"> {
168  let vm = 0;
169}
170
171// op vd, vs1, vs2, vm (reverse the order of vs1 and vs2)
172class VALUrVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
173    : RVInstVV<funct6, opv, (outs VRegOp:$vd),
174                (ins VRegOp:$vs1, VRegOp:$vs2, VMaskOp:$vm),
175                opcodestr, "$vd, $vs1, $vs2$vm">;
176
177// op vd, vs1, vs2
178class VALUVVNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr>
179    : RVInstVV<funct6, opv, (outs VRegOp:$vd),
180               (ins VRegOp:$vs2, VRegOp:$vs1),
181               opcodestr, "$vd, $vs2, $vs1"> {
182  let vm = 1;
183}
184
185// op vd, vs2, rs1, vm
186class VALUVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
187    : RVInstVX<funct6, opv, (outs VRegOp:$vd),
188                (ins VRegOp:$vs2, GPR:$rs1, VMaskOp:$vm),
189                opcodestr, "$vd, $vs2, $rs1$vm">;
190
191// op vd, vs2, rs1, v0 (without mask, use v0 as carry input)
192class VALUmVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
193    : RVInstVX<funct6, opv, (outs VRegOp:$vd),
194                (ins VRegOp:$vs2, GPR:$rs1, VMV0:$v0),
195                opcodestr, "$vd, $vs2, $rs1, v0"> {
196  let vm = 0;
197}
198
199// op vd, rs1, vs2, vm (reverse the order of rs1 and vs2)
200class VALUrVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
201    : RVInstVX<funct6, opv, (outs VRegOp:$vd),
202                (ins GPR:$rs1, VRegOp:$vs2, VMaskOp:$vm),
203                opcodestr, "$vd, $rs1, $vs2$vm">;
204
205// op vd, vs1, vs2
206class VALUVXNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr>
207    : RVInstVX<funct6, opv, (outs VRegOp:$vd),
208               (ins VRegOp:$vs2, GPR:$rs1),
209               opcodestr, "$vd, $vs2, $rs1"> {
210  let vm = 1;
211}
212
213// op vd, vs2, imm, vm
214class VALUVI<bits<6> funct6, string opcodestr, Operand optype = simm5>
215    : RVInstIVI<funct6, (outs VRegOp:$vd),
216                (ins VRegOp:$vs2, optype:$imm, VMaskOp:$vm),
217                opcodestr, "$vd, $vs2, $imm$vm">;
218
219// op vd, vs2, imm, v0 (without mask, use v0 as carry input)
220class VALUmVI<bits<6> funct6, string opcodestr, Operand optype = simm5>
221    : RVInstIVI<funct6, (outs VRegOp:$vd),
222                (ins VRegOp:$vs2, optype:$imm, VMV0:$v0),
223                opcodestr, "$vd, $vs2, $imm, v0"> {
224  let vm = 0;
225}
226
227// op vd, vs2, imm, vm
228class VALUVINoVm<bits<6> funct6, string opcodestr, Operand optype = simm5>
229    : RVInstIVI<funct6, (outs VRegOp:$vd),
230                (ins VRegOp:$vs2, optype:$imm),
231                opcodestr, "$vd, $vs2, $imm"> {
232  let vm = 1;
233}
234
235// op vd, vs2, rs1, vm (Float)
236class VALUVF<bits<6> funct6, RISCVVFormat opv, string opcodestr>
237    : RVInstVX<funct6, opv, (outs VRegOp:$vd),
238                (ins VRegOp:$vs2, FPR32:$rs1, VMaskOp:$vm),
239                opcodestr, "$vd, $vs2, $rs1$vm">;
240
241// op vd, rs1, vs2, vm (Float) (with mask, reverse the order of rs1 and vs2)
242class VALUrVF<bits<6> funct6, RISCVVFormat opv, string opcodestr>
243    : RVInstVX<funct6, opv, (outs VRegOp:$vd),
244                (ins FPR32:$rs1, VRegOp:$vs2, VMaskOp:$vm),
245                opcodestr, "$vd, $rs1, $vs2$vm">;
246
247// op vd, vs2, vm (use vs1 as instruction encoding)
248class VALUVs2<bits<6> funct6, bits<5> vs1, RISCVVFormat opv, string opcodestr>
249    : RVInstV<funct6, vs1, opv, (outs VRegOp:$vd),
250               (ins VRegOp:$vs2, VMaskOp:$vm),
251               opcodestr, "$vd, $vs2$vm">;
252} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
253
254//===----------------------------------------------------------------------===//
255// Combination of instruction classes.
256// Use these multiclasses to define instructions more easily.
257//===----------------------------------------------------------------------===//
258multiclass VALU_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
259  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">;
260  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">;
261  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>;
262}
263
264multiclass VALU_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
265  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">;
266  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">;
267}
268
269multiclass VALUr_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
270  def V : VALUrVV<funct6, OPIVV, opcodestr # "." # vw # "v">;
271  def X : VALUrVX<funct6, OPIVX, opcodestr # "." # vw # "x">;
272}
273
274multiclass VALU_IV_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
275  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">;
276  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>;
277}
278
279multiclass VALU_IV_V<string opcodestr, bits<6> funct6> {
280  def _VS  : VALUVV<funct6, OPIVV, opcodestr # ".vs">;
281}
282
283multiclass VALUr_IV_X<string opcodestr, bits<6> funct6, string vw = "v"> {
284  def X : VALUrVX<funct6, OPIVX, opcodestr # "." # vw # "x">;
285}
286
287multiclass VALU_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
288  def V : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">;
289  def X  : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">;
290}
291
292multiclass VALU_MV_V<string opcodestr, bits<6> funct6> {
293  def _VS : VALUVV<funct6, OPMVV, opcodestr # ".vs">;
294}
295
296multiclass VALU_MV_Mask<string opcodestr, bits<6> funct6, string vm = "v"> {
297  def M : VALUVVNoVm<funct6, OPMVV, opcodestr # "." # vm # "m">;
298}
299
300multiclass VALU_MV_X<string opcodestr, bits<6> funct6, string vw = "v"> {
301  def X  : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">;
302}
303
304multiclass VALUr_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
305  def V : VALUrVV<funct6, OPMVV, opcodestr # "." # vw # "v">;
306  def X : VALUrVX<funct6, OPMVX, opcodestr # "." # vw # "x">;
307}
308
309multiclass VALUr_MV_X<string opcodestr, bits<6> funct6, string vw = "v"> {
310  def X : VALUrVX<funct6, OPMVX, opcodestr # "." # vw # "x">;
311}
312
313multiclass VALU_MV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
314  def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>;
315}
316
317multiclass VALUm_IV_V_X_I<string opcodestr, bits<6> funct6> {
318  def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">;
319  def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">;
320  def IM : VALUmVI<funct6, opcodestr # ".vim">;
321}
322
323multiclass VALUm_IV_V_X<string opcodestr, bits<6> funct6> {
324  def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">;
325  def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">;
326}
327
328multiclass VALUNoVm_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5> {
329  def V : VALUVVNoVm<funct6, OPIVV, opcodestr # ".vv">;
330  def X : VALUVXNoVm<funct6, OPIVX, opcodestr # ".vx">;
331  def I : VALUVINoVm<funct6, opcodestr # ".vi", optype>;
332}
333
334multiclass VALUNoVm_IV_V_X<string opcodestr, bits<6> funct6> {
335  def V : VALUVVNoVm<funct6, OPIVV, opcodestr # ".vv">;
336  def X : VALUVXNoVm<funct6, OPIVX, opcodestr # ".vx">;
337}
338
339multiclass VALU_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
340  def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">;
341  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">;
342}
343
344multiclass VALU_FV_F<string opcodestr, bits<6> funct6, string vw = "v"> {
345  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">;
346}
347
348multiclass VALUr_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
349  def V : VALUrVV<funct6, OPFVV, opcodestr # "." # vw # "v">;
350  def F : VALUrVF<funct6, OPFVF, opcodestr # "." # vw # "f">;
351}
352
353multiclass VALU_FV_V<string opcodestr, bits<6> funct6> {
354  def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">;
355}
356
357multiclass VALU_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
358  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>;
359}
360
361//===----------------------------------------------------------------------===//
362// Instructions
363//===----------------------------------------------------------------------===//
364
365let Predicates = [HasStdExtV] in {
366let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in {
367def VSETVLI : RVInstSetVLi<(outs GPR:$rd), (ins GPR:$rs1, VTypeIOp:$vtypei),
368                           "vsetvli", "$rd, $rs1, $vtypei">;
369
370def VSETVL : RVInstSetVL<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2),
371                         "vsetvl", "$rd, $rs1, $rs2">;
372} // hasSideEffects = 1, mayLoad = 0, mayStore = 0
373
374// Vector Unit-Stride Instructions
375def VLB_V : VUnitStrideLoad<MOPLDUnitStrideS, LUMOPUnitStride, LSWidthVByte, "vlb.v">;
376def VLH_V : VUnitStrideLoad<MOPLDUnitStrideS, LUMOPUnitStride, LSWidthVHalf, "vlh.v">;
377def VLW_V : VUnitStrideLoad<MOPLDUnitStrideS, LUMOPUnitStride, LSWidthVWord, "vlw.v">;
378
379def VLBU_V : VUnitStrideLoad<MOPLDUnitStrideU, LUMOPUnitStride, LSWidthVByte, "vlbu.v">;
380def VLHU_V : VUnitStrideLoad<MOPLDUnitStrideU, LUMOPUnitStride, LSWidthVHalf, "vlhu.v">;
381def VLWU_V : VUnitStrideLoad<MOPLDUnitStrideU, LUMOPUnitStride, LSWidthVWord, "vlwu.v">;
382
383def VLE_V : VUnitStrideLoad<MOPLDUnitStrideU, LUMOPUnitStride, LSWidthVSEW, "vle.v">;
384
385def VLBFF_V : VUnitStrideLoad<MOPLDUnitStrideS, LUMOPUnitStrideFF, LSWidthVByte, "vlbff.v">;
386def VLHFF_V : VUnitStrideLoad<MOPLDUnitStrideS, LUMOPUnitStrideFF, LSWidthVHalf, "vlhff.v">;
387def VLWFF_V : VUnitStrideLoad<MOPLDUnitStrideS, LUMOPUnitStrideFF, LSWidthVWord, "vlwff.v">;
388
389def VLBUFF_V : VUnitStrideLoad<MOPLDUnitStrideU, LUMOPUnitStrideFF, LSWidthVByte, "vlbuff.v">;
390def VLHUFF_V : VUnitStrideLoad<MOPLDUnitStrideU, LUMOPUnitStrideFF, LSWidthVHalf, "vlhuff.v">;
391def VLWUFF_V : VUnitStrideLoad<MOPLDUnitStrideU, LUMOPUnitStrideFF, LSWidthVWord, "vlwuff.v">;
392
393def VLEFF_V : VUnitStrideLoad<MOPLDUnitStrideU, LUMOPUnitStrideFF, LSWidthVSEW, "vleff.v">;
394
395def VSB_V : VUnitStrideStore<MOPSTUnitStride, SUMOPUnitStride, LSWidthVByte, "vsb.v">;
396def VSH_V : VUnitStrideStore<MOPSTUnitStride, SUMOPUnitStride, LSWidthVHalf, "vsh.v">;
397def VSW_V : VUnitStrideStore<MOPSTUnitStride, SUMOPUnitStride, LSWidthVWord, "vsw.v">;
398
399def VSE_V : VUnitStrideStore<MOPSTUnitStride, SUMOPUnitStride, LSWidthVSEW, "vse.v">;
400
401// Vector Strided Instructions
402def VLSB_V : VStridedLoad<MOPLDStridedS, LSWidthVByte, "vlsb.v">;
403def VLSH_V : VStridedLoad<MOPLDStridedS, LSWidthVHalf, "vlsh.v">;
404def VLSW_V : VStridedLoad<MOPLDStridedS, LSWidthVWord, "vlsw.v">;
405
406def VLSBU_V : VStridedLoad<MOPLDStridedU, LSWidthVByte, "vlsbu.v">;
407def VLSHU_V : VStridedLoad<MOPLDStridedU, LSWidthVHalf, "vlshu.v">;
408def VLSWU_V : VStridedLoad<MOPLDStridedU, LSWidthVWord, "vlswu.v">;
409
410def VLSE_V : VStridedLoad<MOPLDStridedU, LSWidthVSEW, "vlse.v">;
411
412def VSSB_V : VStridedStore<MOPSTStrided, LSWidthVByte, "vssb.v">;
413def VSSH_V : VStridedStore<MOPSTStrided, LSWidthVHalf, "vssh.v">;
414def VSSW_V : VStridedStore<MOPSTStrided, LSWidthVWord, "vssw.v">;
415def VSSE_V : VStridedStore<MOPSTStrided, LSWidthVSEW, "vsse.v">;
416
417// Vector Indexed Instructions
418def VLXB_V : VIndexedLoad<MOPLDIndexedS, LSWidthVByte, "vlxb.v">;
419def VLXH_V : VIndexedLoad<MOPLDIndexedS, LSWidthVHalf, "vlxh.v">;
420def VLXW_V : VIndexedLoad<MOPLDIndexedS, LSWidthVWord, "vlxw.v">;
421
422def VLXBU_V : VIndexedLoad<MOPLDIndexedU, LSWidthVByte, "vlxbu.v">;
423def VLXHU_V : VIndexedLoad<MOPLDIndexedU, LSWidthVHalf, "vlxhu.v">;
424def VLXWU_V : VIndexedLoad<MOPLDIndexedU, LSWidthVWord, "vlxwu.v">;
425
426def VLXE_V : VIndexedLoad<MOPLDIndexedU, LSWidthVSEW, "vlxe.v">;
427
428def VSXB_V : VIndexedStore<MOPSTIndexedOrder, LSWidthVByte, "vsxb.v">;
429def VSXH_V : VIndexedStore<MOPSTIndexedOrder, LSWidthVHalf, "vsxh.v">;
430def VSXW_V : VIndexedStore<MOPSTIndexedOrder, LSWidthVWord, "vsxw.v">;
431def VSXE_V : VIndexedStore<MOPSTIndexedOrder, LSWidthVSEW, "vsxe.v">;
432
433def VSUXB_V : VIndexedStore<MOPSTIndexedUnOrd, LSWidthVByte, "vsuxb.v">;
434def VSUXH_V : VIndexedStore<MOPSTIndexedUnOrd, LSWidthVHalf, "vsuxh.v">;
435def VSUXW_V : VIndexedStore<MOPSTIndexedUnOrd, LSWidthVWord, "vsuxw.v">;
436def VSUXE_V : VIndexedStore<MOPSTIndexedUnOrd, LSWidthVSEW, "vsuxe.v">;
437
438def VL1R_V : VWholeLoad<0, "vl1r.v">;
439def VS1R_V : VWholeStore<0, "vs1r.v">;
440
441// Vector Single-Width Integer Add and Subtract
442defm VADD_V : VALU_IV_V_X_I<"vadd", 0b000000>;
443defm VSUB_V : VALU_IV_V_X<"vsub", 0b000010>;
444defm VRSUB_V : VALU_IV_X_I<"vrsub", 0b000011>;
445
446// Vector Widening Integer Add/Subtract
447// Refer to 11.2 Widening Vector Arithmetic Instructions
448// The destination vector register group cannot overlap a source vector
449// register group of a different element width (including the mask register
450// if masked), otherwise an illegal instruction exception is raised.
451let Constraints = "@earlyclobber $vd" in {
452let RVVConstraint = WidenV in {
453defm VWADDU_V : VALU_MV_V_X<"vwaddu", 0b110000>;
454defm VWSUBU_V : VALU_MV_V_X<"vwsubu", 0b110010>;
455defm VWADD_V : VALU_MV_V_X<"vwadd", 0b110001>;
456defm VWSUB_V : VALU_MV_V_X<"vwsub", 0b110011>;
457} // RVVConstraint = WidenV
458// Set earlyclobber for following instructions for second and mask operands.
459// This has the downside that the earlyclobber constraint is too coarse and
460// will impose unnecessary restrictions by not allowing the destination to
461// overlap with the first (wide) operand.
462let RVVConstraint = WidenW in {
463defm VWADDU_W : VALU_MV_V_X<"vwaddu", 0b110100, "w">;
464defm VWSUBU_W : VALU_MV_V_X<"vwsubu", 0b110110, "w">;
465defm VWADD_W : VALU_MV_V_X<"vwadd", 0b110101, "w">;
466defm VWSUB_W : VALU_MV_V_X<"vwsub", 0b110111, "w">;
467} // RVVConstraint = WidenW
468} // Constraints = "@earlyclobber $vd"
469
470def : InstAlias<"vwcvt.x.x.v $vd, $vs$vm",
471                (VWADD_VX VRegOp:$vd, VRegOp:$vs, X0, VMaskOp:$vm)>;
472def : InstAlias<"vwcvtu.x.x.v $vd, $vs$vm",
473                (VWADDU_VX VRegOp:$vd, VRegOp:$vs, X0, VMaskOp:$vm)>;
474
475// Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
476defm VADC_V : VALUm_IV_V_X_I<"vadc", 0b010000>;
477defm VMADC_V : VALUm_IV_V_X_I<"vmadc", 0b010001>;
478defm VMADC_V : VALUNoVm_IV_V_X_I<"vmadc", 0b010001>;
479defm VSBC_V : VALUm_IV_V_X<"vsbc", 0b010010>;
480defm VMSBC_V : VALUm_IV_V_X<"vmsbc", 0b010011>;
481defm VMSBC_V : VALUNoVm_IV_V_X<"vmsbc", 0b010011>;
482
483// Vector Bitwise Logical Instructions
484defm VAND_V : VALU_IV_V_X_I<"vand", 0b001001>;
485defm VOR_V : VALU_IV_V_X_I<"vor", 0b001010>;
486defm VXOR_V : VALU_IV_V_X_I<"vxor", 0b001011>;
487
488def : InstAlias<"vnot.v $vd, $vs$vm",
489                (VXOR_VI VRegOp:$vd, VRegOp:$vs, -1, VMaskOp:$vm)>;
490
491// Vector Single-Width Bit Shift Instructions
492defm VSLL_V : VALU_IV_V_X_I<"vsll", 0b100101, uimm5>;
493defm VSRL_V : VALU_IV_V_X_I<"vsrl", 0b101000, uimm5>;
494defm VSRA_V : VALU_IV_V_X_I<"vsra", 0b101001, uimm5>;
495
496// Vector Narrowing Integer Right Shift Instructions
497// Refer to 11.3. Narrowing Vector Arithmetic Instructions
498// The destination vector register group cannot overlap the first source
499// vector register group (specified by vs2). The destination vector register
500// group cannot overlap the mask register if used, unless LMUL=1.
501let Constraints = "@earlyclobber $vd", RVVConstraint = Narrow in {
502defm VNSRL_W : VALU_IV_V_X_I<"vnsrl", 0b101100, uimm5, "w">;
503defm VNSRA_W : VALU_IV_V_X_I<"vnsra", 0b101101, uimm5, "w">;
504} // Constraints = "@earlyclobber $vd", RVVConstraint = Narrow
505
506// Vector Integer Comparison Instructions
507defm VMSEQ_V : VALU_IV_V_X_I<"vmseq", 0b011000>;
508defm VMSNE_V : VALU_IV_V_X_I<"vmsne", 0b011001>;
509defm VMSLTU_V : VALU_IV_V_X<"vmsltu", 0b011010>;
510defm VMSLT_V : VALU_IV_V_X<"vmslt", 0b011011>;
511defm VMSLEU_V : VALU_IV_V_X_I<"vmsleu", 0b011100>;
512defm VMSLE_V : VALU_IV_V_X_I<"vmsle", 0b011101>;
513defm VMSGTU_V : VALU_IV_X_I<"vmsgtu", 0b011110>;
514defm VMSGT_V : VALU_IV_X_I<"vmsgt", 0b011111>;
515
516def : InstAlias<"vmsgtu.vv $vd, $va, $vb$vm",
517                (VMSLTU_VV VRegOp:$vd, VRegOp:$vb, VRegOp:$va, VMaskOp:$vm), 0>;
518def : InstAlias<"vmsgt.vv $vd, $va, $vb$vm",
519                (VMSLT_VV VRegOp:$vd, VRegOp:$vb, VRegOp:$va, VMaskOp:$vm), 0>;
520def : InstAlias<"vmsgeu.vv $vd, $va, $vb$vm",
521                (VMSLEU_VV VRegOp:$vd, VRegOp:$vb, VRegOp:$va, VMaskOp:$vm), 0>;
522def : InstAlias<"vmsge.vv $vd, $va, $vb$vm",
523                (VMSLE_VV VRegOp:$vd, VRegOp:$vb, VRegOp:$va, VMaskOp:$vm), 0>;
524def : InstAlias<"vmsltu.vi $vd, $va, $imm$vm",
525                (VMSLEU_VI VRegOp:$vd, VRegOp:$va, simm5_plus1:$imm,
526                 VMaskOp:$vm), 0>;
527def : InstAlias<"vmslt.vi $vd, $va, $imm$vm",
528                (VMSLE_VI VRegOp:$vd, VRegOp:$va, simm5_plus1:$imm,
529                 VMaskOp:$vm), 0>;
530def : InstAlias<"vmsgeu.vi $vd, $va, $imm$vm",
531                (VMSGTU_VI VRegOp:$vd, VRegOp:$va, simm5_plus1:$imm,
532                 VMaskOp:$vm), 0>;
533def : InstAlias<"vmsge.vi $vd, $va, $imm$vm",
534                (VMSGT_VI VRegOp:$vd, VRegOp:$va, simm5_plus1:$imm,
535                 VMaskOp:$vm), 0>;
536
537// Vector Integer Min/Max Instructions
538defm VMINU_V : VALU_IV_V_X<"vminu", 0b000100>;
539defm VMIN_V : VALU_IV_V_X<"vmin", 0b000101>;
540defm VMAXU_V : VALU_IV_V_X<"vmaxu", 0b000110>;
541defm VMAX_V : VALU_IV_V_X<"vmax", 0b000111>;
542
543// Vector Single-Width Integer Multiply Instructions
544defm VMUL_V : VALU_MV_V_X<"vmul", 0b100101>;
545defm VMULH_V : VALU_MV_V_X<"vmulh", 0b100111>;
546defm VMULHU_V : VALU_MV_V_X<"vmulhu", 0b100100>;
547defm VMULHSU_V : VALU_MV_V_X<"vmulhsu", 0b100110>;
548
549// Vector Integer Divide Instructions
550defm VDIVU_V : VALU_MV_V_X<"vdivu", 0b100000>;
551defm VDIV_V : VALU_MV_V_X<"vdiv", 0b100001>;
552defm VREMU_V : VALU_MV_V_X<"vremu", 0b100010>;
553defm VREM_V : VALU_MV_V_X<"vrem", 0b100011>;
554
555// Vector Widening Integer Multiply Instructions
556let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
557defm VWMUL_V : VALU_MV_V_X<"vwmul", 0b111011>;
558defm VWMULU_V : VALU_MV_V_X<"vwmulu", 0b111000>;
559defm VWMULSU_V : VALU_MV_V_X<"vwmulsu", 0b111010>;
560} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
561
562// Vector Single-Width Integer Multiply-Add Instructions
563defm VMACC_V : VALUr_MV_V_X<"vmacc", 0b101101>;
564defm VNMSAC_V : VALUr_MV_V_X<"vnmsac", 0b101111>;
565defm VMADD_V : VALUr_MV_V_X<"vmadd", 0b101001>;
566defm VNMSUB_V : VALUr_MV_V_X<"vnmsub", 0b101011>;
567
568// Vector Widening Integer Multiply-Add Instructions
569let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
570defm VWMACCU_V : VALUr_MV_V_X<"vwmaccu", 0b111100>;
571defm VWMACC_V : VALUr_MV_V_X<"vwmacc", 0b111101>;
572defm VWMACCSU_V : VALUr_MV_V_X<"vwmaccsu", 0b111111>;
573defm VWMACCUS_V : VALUr_MV_X<"vwmaccus", 0b111110>;
574} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
575
576// Vector Integer Merge Instructions
577defm VMERGE_V : VALUm_IV_V_X_I<"vmerge", 0b010111>;
578
579// Vector Integer Move Instructions
580let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vs2 = 0, vm = 1 in {
581// op vd, vs1
582def VMV_V_V : RVInstVV<0b010111, OPIVV, (outs VRegOp:$vd),
583                       (ins VRegOp:$vs1), "vmv.v.v", "$vd, $vs1">;
584// op vd, rs1
585def VMV_V_X : RVInstVX<0b010111, OPIVX, (outs VRegOp:$vd),
586                       (ins GPR:$rs1), "vmv.v.x", "$vd, $rs1">;
587// op vd, imm
588def VMV_V_I : RVInstIVI<0b010111, (outs VRegOp:$vd),
589                       (ins simm5:$imm), "vmv.v.i", "$vd, $imm">;
590} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
591
592// Vector Fixed-Point Arithmetic Instructions
593defm VSADDU_V : VALU_IV_V_X_I<"vsaddu", 0b100000>;
594defm VSADD_V : VALU_IV_V_X_I<"vsadd", 0b100001>;
595defm VSSUBU_V : VALU_IV_V_X<"vssubu", 0b100010>;
596defm VSSUB_V : VALU_IV_V_X<"vssub", 0b100011>;
597
598// Vector Single-Width Averaging Add and Subtract
599defm VAADDU_V : VALU_MV_V_X<"vaaddu", 0b001000>;
600defm VAADD_V : VALU_MV_V_X<"vaadd", 0b001001>;
601defm VASUBU_V : VALU_MV_V_X<"vasubu", 0b001010>;
602defm VASUB_V : VALU_MV_V_X<"vasub", 0b001011>;
603
604// Vector Single-Width Fractional Multiply with Rounding and Saturation
605defm VSMUL_V : VALU_IV_V_X<"vsmul", 0b100111>;
606
607// Vector Single-Width Scaling Shift Instructions
608defm VSSRL_V : VALU_IV_V_X_I<"vssrl", 0b101010, uimm5>;
609defm VSSRA_V : VALU_IV_V_X_I<"vssra", 0b101011, uimm5>;
610
611// Vector Narrowing Fixed-Point Clip Instructions
612let Constraints = "@earlyclobber $vd", RVVConstraint = Narrow in {
613defm VNCLIPU_W : VALU_IV_V_X_I<"vnclipu", 0b101110, uimm5, "w">;
614defm VNCLIP_W : VALU_IV_V_X_I<"vnclip", 0b101111, uimm5, "w">;
615} // Constraints = "@earlyclobber $vd", RVVConstraint = Narrow
616
617// Vector Single-Width Floating-Point Add/Subtract Instructions
618defm VFADD_V : VALU_FV_V_F<"vfadd", 0b000000>;
619defm VFSUB_V : VALU_FV_V_F<"vfsub", 0b000010>;
620defm VFRSUB_V : VALU_FV_F<"vfrsub", 0b100111>;
621
622// Vector Widening Floating-Point Add/Subtract Instructions
623let Constraints = "@earlyclobber $vd" in {
624let RVVConstraint = WidenV in {
625defm VFWADD_V : VALU_FV_V_F<"vfwadd", 0b110000>;
626defm VFWSUB_V : VALU_FV_V_F<"vfwsub", 0b110010>;
627} // RVVConstraint = WidenV
628// Set earlyclobber for following instructions for second and mask operands.
629// This has the downside that the earlyclobber constraint is too coarse and
630// will impose unnecessary restrictions by not allowing the destination to
631// overlap with the first (wide) operand.
632let RVVConstraint = WidenW in {
633defm VFWADD_W : VALU_FV_V_F<"vfwadd", 0b110100, "w">;
634defm VFWSUB_W : VALU_FV_V_F<"vfwsub", 0b110110, "w">;
635} // RVVConstraint = WidenW
636} // Constraints = "@earlyclobber $vd"
637
638// Vector Single-Width Floating-Point Multiply/Divide Instructions
639defm VFMUL_V : VALU_FV_V_F<"vfmul", 0b100100>;
640defm VFDIV_V : VALU_FV_V_F<"vfdiv", 0b100000>;
641defm VFRDIV_V : VALU_FV_F<"vfrdiv", 0b100001>;
642
643// Vector Widening Floating-Point Multiply
644let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
645defm VFWMUL_V : VALU_FV_V_F<"vfwmul", 0b111000>;
646} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
647
648// Vector Single-Width Floating-Point Fused Multiply-Add Instructions
649defm VFMACC_V : VALUr_FV_V_F<"vfmacc", 0b101100>;
650defm VFNMACC_V : VALUr_FV_V_F<"vfnmacc", 0b101101>;
651defm VFMSAC_V : VALUr_FV_V_F<"vfmsac", 0b101110>;
652defm VFNMSAC_V : VALUr_FV_V_F<"vfnmsac", 0b101111>;
653defm VFMADD_V : VALUr_FV_V_F<"vfmadd", 0b101000>;
654defm VFNMADD_V : VALUr_FV_V_F<"vfnmadd", 0b101001>;
655defm VFMSUB_V : VALUr_FV_V_F<"vfmsub", 0b101010>;
656defm VFNMSUB_V : VALUr_FV_V_F<"vfnmsub", 0b101011>;
657
658// Vector Widening Floating-Point Fused Multiply-Add Instructions
659let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
660defm VFWMACC_V : VALUr_FV_V_F<"vfwmacc", 0b111100>;
661defm VFWNMACC_V : VALUr_FV_V_F<"vfwnmacc", 0b111101>;
662defm VFWMSAC_V : VALUr_FV_V_F<"vfwmsac", 0b111110>;
663defm VFWNMSAC_V : VALUr_FV_V_F<"vfwnmsac", 0b111111>;
664} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
665
666// Vector Floating-Point Square-Root Instruction
667defm VFSQRT_V : VALU_FV_VS2<"vfsqrt.v", 0b100011, 0b00000>;
668
669// Vector Floating-Point MIN/MAX Instructions
670defm VFMIN_V : VALU_FV_V_F<"vfmin", 0b000100>;
671defm VFMAX_V : VALU_FV_V_F<"vfmax", 0b000110>;
672
673// Vector Floating-Point Sign-Injection Instructions
674defm VFSGNJ_V : VALU_FV_V_F<"vfsgnj", 0b001000>;
675defm VFSGNJN_V : VALU_FV_V_F<"vfsgnjn", 0b001001>;
676defm VFSGNJX_V : VALU_FV_V_F<"vfsgnjx", 0b001010>;
677
678// Vector Floating-Point Compare Instructions
679defm VMFEQ_V : VALU_FV_V_F<"vmfeq", 0b011000>;
680defm VMFNE_V : VALU_FV_V_F<"vmfne", 0b011100>;
681defm VMFLT_V : VALU_FV_V_F<"vmflt", 0b011011>;
682defm VMFLE_V : VALU_FV_V_F<"vmfle", 0b011001>;
683defm VMFGT_V : VALU_FV_F<"vmfgt", 0b011101>;
684defm VMFGE_V : VALU_FV_F<"vmfge", 0b011111>;
685
686def : InstAlias<"vmfgt.vv $vd, $va, $vb$vm",
687                (VMFLT_VV VRegOp:$vd, VRegOp:$vb, VRegOp:$va, VMaskOp:$vm), 0>;
688def : InstAlias<"vmfge.vv $vd, $va, $vb$vm",
689                (VMFLE_VV VRegOp:$vd, VRegOp:$vb, VRegOp:$va, VMaskOp:$vm), 0>;
690
691// Vector Floating-Point Classify Instruction
692defm VFCLASS_V : VALU_FV_VS2<"vfclass.v", 0b100011, 0b10000>;
693
694let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
695// Vector Floating-Point Merge Instruction
696def VFMERGE_VFM : RVInstVX<0b010111, OPFVF, (outs VRegOp:$vd),
697                           (ins VRegOp:$vs2, FPR32:$rs1, VMV0:$v0),
698                           "vfmerge.vfm", "$vd, $vs2, $rs1, v0"> {
699  let vm = 0;
700}
701
702// Vector Floating-Point Move Instruction
703def VFMV_V_F : RVInstVX<0b010111, OPFVF, (outs VRegOp:$vd),
704                       (ins FPR32:$rs1), "vfmv.v.f", "$vd, $rs1"> {
705  let vs2 = 0;
706  let vm = 1;
707}
708} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
709
710// Single-Width Floating-Point/Integer Type-Convert Instructions
711defm VFCVT_XU_F_V : VALU_FV_VS2<"vfcvt.xu.f.v", 0b100010, 0b00000>;
712defm VFCVT_X_F_V : VALU_FV_VS2<"vfcvt.x.f.v", 0b100010, 0b00001>;
713defm VFCVT_F_XU_V : VALU_FV_VS2<"vfcvt.f.xu.v", 0b100010, 0b00010>;
714defm VFCVT_F_X_V : VALU_FV_VS2<"vfcvt.f.x.v", 0b100010, 0b00011>;
715
716// Widening Floating-Point/Integer Type-Convert Instructions
717let Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt in {
718defm VFWCVT_XU_F_V : VALU_FV_VS2<"vfwcvt.xu.f.v", 0b100010, 0b01000>;
719defm VFWCVT_X_F_V : VALU_FV_VS2<"vfwcvt.x.f.v", 0b100010, 0b01001>;
720defm VFWCVT_F_XU_V : VALU_FV_VS2<"vfwcvt.f.xu.v", 0b100010, 0b01010>;
721defm VFWCVT_F_X_V : VALU_FV_VS2<"vfwcvt.f.x.v", 0b100010, 0b01011>;
722defm VFWCVT_F_F_V : VALU_FV_VS2<"vfwcvt.f.f.v", 0b100010, 0b01100>;
723} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt
724
725// Narrowing Floating-Point/Integer Type-Convert Instructions
726let Constraints = "@earlyclobber $vd", RVVConstraint = Narrow in {
727defm VFNCVT_XU_F_W : VALU_FV_VS2<"vfncvt.xu.f.w", 0b100010, 0b10000>;
728defm VFNCVT_X_F_W : VALU_FV_VS2<"vfncvt.x.f.w", 0b100010, 0b10001>;
729defm VFNCVT_F_XU_W : VALU_FV_VS2<"vfncvt.f.xu.w", 0b100010, 0b10010>;
730defm VFNCVT_F_X_W : VALU_FV_VS2<"vfncvt.f.x.w", 0b100010, 0b10011>;
731defm VFNCVT_F_F_W : VALU_FV_VS2<"vfncvt.f.f.w", 0b100010, 0b10100>;
732defm VFNCVT_ROD_F_F_W : VALU_FV_VS2<"vfncvt.rod.f.f.w", 0b100010, 0b10101>;
733} // Constraints = "@earlyclobber $vd", RVVConstraint = Narrow
734
735// Vector Single-Width Integer Reduction Instructions
736defm VREDSUM : VALU_MV_V<"vredsum", 0b000000>;
737defm VREDMAXU : VALU_MV_V<"vredmaxu", 0b000110>;
738defm VREDMAX : VALU_MV_V<"vredmax", 0b000111>;
739defm VREDMINU : VALU_MV_V<"vredminu", 0b000100>;
740defm VREDMIN : VALU_MV_V<"vredmin", 0b000101>;
741defm VREDAND : VALU_MV_V<"vredand", 0b000001>;
742defm VREDOR : VALU_MV_V<"vredor", 0b000010>;
743defm VREDXOR : VALU_MV_V<"vredxor", 0b000011>;
744
745// Vector Widening Integer Reduction Instructions
746let Constraints = "@earlyclobber $vd" in {
747// Set earlyclobber for following instructions for second and mask operands.
748// This has the downside that the earlyclobber constraint is too coarse and
749// will impose unnecessary restrictions by not allowing the destination to
750// overlap with the first (wide) operand.
751defm VWREDSUMU : VALU_IV_V<"vwredsumu", 0b110000>;
752defm VWREDSUM : VALU_IV_V<"vwredsum", 0b110001>;
753} // Constraints = "@earlyclobber $vd"
754
755// Vector Single-Width Floating-Point Reduction Instructions
756defm VFREDOSUM : VALU_FV_V<"vfredosum", 0b000011>;
757defm VFREDSUM : VALU_FV_V<"vfredsum", 0b000001>;
758defm VFREDMAX : VALU_FV_V<"vfredmax", 0b000111>;
759defm VFREDMIN : VALU_FV_V<"vfredmin", 0b000101>;
760
761// Vector Widening Floating-Point Reduction Instructions
762let Constraints = "@earlyclobber $vd" in {
763// Set earlyclobber for following instructions for second and mask operands.
764// This has the downside that the earlyclobber constraint is too coarse and
765// will impose unnecessary restrictions by not allowing the destination to
766// overlap with the first (wide) operand.
767defm VFWREDOSUM : VALU_FV_V<"vfwredosum", 0b110011>;
768defm VFWREDSUM : VALU_FV_V<"vfwredsum", 0b110001>;
769} // Constraints = "@earlyclobber $vd"
770
771// Vector Mask-Register Logical Instructions
772defm VMAND_M : VALU_MV_Mask<"vmand", 0b011001, "m">;
773defm VMNAND_M : VALU_MV_Mask<"vmnand", 0b011101, "m">;
774defm VMANDNOT_M : VALU_MV_Mask<"vmandnot", 0b011000, "m">;
775defm VMXOR_M : VALU_MV_Mask<"vmxor", 0b011011, "m">;
776defm VMOR_M : VALU_MV_Mask<"vmor", 0b011010, "m">;
777defm VMNOR_M : VALU_MV_Mask<"vmnor", 0b011110, "m">;
778defm VMORNOT_M : VALU_MV_Mask<"vmornot", 0b011100, "m">;
779defm VMXNOR_M : VALU_MV_Mask<"vmxnor", 0b011111, "m">;
780
781def : InstAlias<"vmcpy.m $vd, $vs",
782                (VMAND_MM VRegOp:$vd, VRegOp:$vs, VRegOp:$vs)>;
783def : InstAlias<"vmclr.m $vd",
784                (VMXOR_MM VRegOp:$vd, VRegOp:$vd, VRegOp:$vd)>;
785def : InstAlias<"vmset.m $vd",
786                (VMXNOR_MM VRegOp:$vd, VRegOp:$vd, VRegOp:$vd)>;
787def : InstAlias<"vmnot.m $vd, $vs",
788                (VMNAND_MM VRegOp:$vd, VRegOp:$vs, VRegOp:$vs)>;
789
790let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
791// Vector mask population count vpopc
792def VPOPC_M : RVInstV<0b010000, 0b10000, OPMVV, (outs GPR:$vd),
793                        (ins VRegOp:$vs2, VMaskOp:$vm),
794                        "vpopc.m", "$vd, $vs2$vm">;
795
796// vfirst find-first-set mask bit
797def VFIRST_M : RVInstV<0b010000, 0b10001, OPMVV, (outs GPR:$vd),
798                        (ins VRegOp:$vs2, VMaskOp:$vm),
799                        "vfirst.m", "$vd, $vs2$vm">;
800} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
801
802// vmsbf.m set-before-first mask bit
803defm VMSBF_M : VALU_MV_VS2<"vmsbf.m", 0b010100, 0b00001>;
804
805// vmsif.m set-including-first mask bit
806defm VMSIF_M : VALU_MV_VS2<"vmsif.m", 0b010100, 0b00011>;
807
808// vmsof.m set-only-first mask bit
809defm VMSOF_M : VALU_MV_VS2<"vmsof.m", 0b010100, 0b00010>;
810
811// Vector Iota Instruction
812let Constraints = "@earlyclobber $vd", RVVConstraint = Iota in {
813defm VIOTA_M : VALU_MV_VS2<"viota.m", 0b010100, 0b10000>;
814} // Constraints = "@earlyclobber $vd", RVVConstraint = Iota
815
816// Vector Element Index Instruction
817let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
818def VID_V : RVInstV<0b010100, 0b10001, OPMVV, (outs VRegOp:$vd),
819                      (ins VMaskOp:$vm), "vid.v", "$vd$vm"> {
820  let vs2 = 0;
821}
822
823// Integer Scalar Move Instructions
824let vm = 1 in {
825def VMV_X_S : RVInstV<0b010000, 0b00000, OPMVV, (outs GPR:$vd),
826                      (ins VRegOp:$vs2), "vmv.x.s", "$vd, $vs2">;
827def VMV_S_X : RVInstV2<0b010000, 0b00000, OPMVX, (outs VRegOp:$vd),
828                      (ins GPR:$rs1), "vmv.s.x", "$vd, $rs1">;
829
830}
831} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
832
833let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1 in {
834// Floating-Point Scalar Move Instructions
835def VFMV_F_S : RVInstV<0b010000, 0b00000, OPFVV, (outs FPR32:$vd),
836                      (ins VRegOp:$vs2), "vfmv.f.s", "$vd, $vs2">;
837def VFMV_S_F : RVInstV2<0b010000, 0b00000, OPFVF, (outs VRegOp:$vd),
838                      (ins FPR32:$rs1), "vfmv.s.f", "$vd, $rs1">;
839
840} // hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1
841
842// Vector Slide Instructions
843let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
844defm VSLIDEUP_V : VALU_IV_X_I<"vslideup", 0b001110, uimm5>;
845} // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
846defm VSLIDEDOWN_V : VALU_IV_X_I<"vslidedown", 0b001111, uimm5>;
847
848let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
849defm VSLIDE1UP_V : VALU_MV_X<"vslide1up", 0b001110>;
850} // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
851defm VSLIDE1DOWN_V : VALU_MV_X<"vslide1down", 0b001111>;
852
853// Vector Register Gather Instruction
854let Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather in {
855defm VRGATHER_V : VALU_IV_V_X_I<"vrgather", 0b001100, uimm5>;
856} // Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather
857
858// Vector Compress Instruction
859let Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress in {
860defm VCOMPRESS_V : VALU_MV_Mask<"vcompress", 0b010111>;
861} // Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress
862
863let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
864foreach nf = [1, 2, 4, 8] in {
865  def VMV#nf#R_V  : RVInstV<0b100111, !add(nf, -1), OPIVI, (outs VRegOp:$vd),
866                            (ins VRegOp:$vs2), "vmv" # nf # "r.v",
867                            "$vd, $vs2"> {
868    let Uses = [];
869    let vm = 1;
870  }
871}
872} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
873} // Predicates = [HasStdExtV]
874