1//===-- RISCVInstrInfoV.td - RISC-V 'V' instructions -------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// This file describes the RISC-V instructions from the standard 'V' Vector
10/// extension, version 0.9.
11/// This version is still experimental as the 'V' extension hasn't been
12/// ratified yet.
13///
14//===----------------------------------------------------------------------===//
15
16include "RISCVInstrFormatsV.td"
17
18//===----------------------------------------------------------------------===//
19// Operand and SDNode transformation definitions.
20//===----------------------------------------------------------------------===//
21
22def VTypeIAsmOperand : AsmOperandClass {
23  let Name = "VTypeI";
24  let ParserMethod = "parseVTypeI";
25  let DiagnosticType = "InvalidVTypeI";
26}
27
28def VTypeIOp : Operand<XLenVT> {
29  let ParserMatchClass = VTypeIAsmOperand;
30  let PrintMethod = "printVTypeI";
31  let DecoderMethod = "decodeUImmOperand<11>";
32}
33
34def VMaskAsmOperand : AsmOperandClass {
35  let Name = "RVVMaskRegOpOperand";
36  let RenderMethod = "addRegOperands";
37  let PredicateMethod = "isV0Reg";
38  let ParserMethod = "parseMaskReg";
39  let IsOptional = 1;
40  let DefaultMethod = "defaultMaskRegOp";
41  let DiagnosticType = "InvalidVMaskRegister";
42}
43
44def VMaskOp : RegisterOperand<VMV0> {
45  let ParserMatchClass = VMaskAsmOperand;
46  let PrintMethod = "printVMaskReg";
47  let EncoderMethod = "getVMaskReg";
48  let DecoderMethod = "decodeVMaskReg";
49}
50
51def simm5 : Operand<XLenVT>, ImmLeaf<XLenVT, [{return isInt<5>(Imm);}]> {
52  let ParserMatchClass = SImmAsmOperand<5>;
53  let EncoderMethod = "getImmOpValue";
54  let DecoderMethod = "decodeSImmOperand<5>";
55  let MCOperandPredicate = [{
56    int64_t Imm;
57    if (MCOp.evaluateAsConstantImm(Imm))
58      return isInt<5>(Imm);
59    return MCOp.isBareSymbolRef();
60  }];
61}
62
63def SImm5Plus1AsmOperand : AsmOperandClass {
64  let Name = "SImm5Plus1";
65  let RenderMethod = "addImmOperands";
66  let DiagnosticType = "InvalidSImm5Plus1";
67}
68
69def simm5_plus1 : Operand<XLenVT>, ImmLeaf<XLenVT,
70                                           [{return isInt<5>(Imm - 1);}]> {
71  let ParserMatchClass = SImm5Plus1AsmOperand;
72  let MCOperandPredicate = [{
73    int64_t Imm;
74    if (MCOp.evaluateAsConstantImm(Imm))
75      return isInt<5>(Imm - 1);
76    return MCOp.isBareSymbolRef();
77  }];
78}
79
80//===----------------------------------------------------------------------===//
81// Instruction class templates
82//===----------------------------------------------------------------------===//
83
84let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in {
85// load vd, (rs1), vm
86class VUnitStrideLoad<RISCVLSUMOP lumop, RISCVWidth width,
87                      string opcodestr>
88    : RVInstVLU<0b000, width.Value{3}, lumop, width.Value{2-0},
89                (outs VR:$vd),
90                (ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
91
92// load vd, (rs1), rs2, vm
93class VStridedLoad<RISCVWidth width, string opcodestr>
94    : RVInstVLS<0b000, width.Value{3}, width.Value{2-0},
95                (outs VR:$vd),
96                (ins GPR:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr,
97                "$vd, (${rs1}), $rs2$vm">;
98
99// load vd, (rs1), vs2, vm
100class VIndexedLoad<RISCVMOP mop, RISCVWidth width, string opcodestr>
101    : RVInstVLX<0b000, width.Value{3}, mop, width.Value{2-0},
102                (outs VR:$vd),
103                (ins GPR:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr,
104                "$vd, (${rs1}), $vs2$vm">;
105
106// vl<nf>r.v vd, (rs1)
107class VWholeLoad<bits<3> nf, RISCVWidth width, string opcodestr>
108    : RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideWholeReg,
109                width.Value{2-0}, (outs VR:$vd), (ins GPR:$rs1),
110                opcodestr, "$vd, (${rs1})"> {
111  let vm = 1;
112  let Uses = [];
113  let RVVConstraint = NoConstraint;
114}
115
116// segment load vd, (rs1), vm
117class VUnitStrideSegmentLoad<bits<3> nf, RISCVLSUMOP lumop,
118                             RISCVWidth width, string opcodestr>
119    : RVInstVLU<nf, width.Value{3}, lumop, width.Value{2-0},
120                (outs VR:$vd),
121                (ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
122
123// segment load vd, (rs1), rs2, vm
124class VStridedSegmentLoad<bits<3> nf, RISCVWidth width, string opcodestr>
125    : RVInstVLS<nf, width.Value{3}, width.Value{2-0},
126                (outs VR:$vd),
127                (ins GPR:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr,
128                "$vd, (${rs1}), $rs2$vm">;
129
130// segment load vd, (rs1), vs2, vm
131class VIndexedSegmentLoad<bits<3> nf, RISCVMOP mop, RISCVWidth width,
132                          string opcodestr>
133    : RVInstVLX<nf, width.Value{3}, mop, width.Value{2-0},
134                (outs VR:$vd),
135                (ins GPR:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr,
136                "$vd, (${rs1}), $vs2$vm">;
137} // hasSideEffects = 0, mayLoad = 1, mayStore = 0
138
139let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in {
140// store vd, vs3, (rs1), vm
141class VUnitStrideStore<RISCVLSUMOP sumop, RISCVWidth width,
142                         string opcodestr>
143    : RVInstVSU<0b000, width.Value{3}, sumop, width.Value{2-0},
144                (outs), (ins VR:$vs3, GPR:$rs1, VMaskOp:$vm), opcodestr,
145                "$vs3, (${rs1})$vm">;
146
147// store vd, vs3, (rs1), rs2, vm
148class VStridedStore<RISCVWidth width, string opcodestr>
149    : RVInstVSS<0b000, width.Value{3}, width.Value{2-0}, (outs),
150                (ins VR:$vs3, GPR:$rs1, GPR:$rs2, VMaskOp:$vm),
151                opcodestr, "$vs3, (${rs1}), $rs2$vm">;
152
153// store vd, vs3, (rs1), vs2, vm
154class VIndexedStore<RISCVMOP mop, RISCVWidth width, string opcodestr>
155    : RVInstVSX<0b000, width.Value{3}, mop, width.Value{2-0}, (outs),
156                (ins VR:$vs3, GPR:$rs1, VR:$vs2, VMaskOp:$vm),
157                opcodestr, "$vs3, (${rs1}), $vs2$vm">;
158
159// vs<nf>r.v vd, (rs1)
160class VWholeStore<bits<3> nf, string opcodestr>
161    : RVInstVSU<nf, 0, SUMOPUnitStrideWholeReg,
162                0b000, (outs), (ins VR:$vs3, GPR:$rs1),
163                opcodestr, "$vs3, (${rs1})"> {
164  let vm = 1;
165  let Uses = [];
166}
167
168// segment store vd, vs3, (rs1), vm
169class VUnitStrideSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr>
170    : RVInstVSU<nf, width.Value{3}, SUMOPUnitStride, width.Value{2-0},
171                (outs), (ins VR:$vs3, GPR:$rs1, VMaskOp:$vm), opcodestr,
172                "$vs3, (${rs1})$vm">;
173
174// segment store vd, vs3, (rs1), rs2, vm
175class VStridedSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr>
176    : RVInstVSS<nf, width.Value{3}, width.Value{2-0}, (outs),
177                (ins VR:$vs3, GPR:$rs1, GPR:$rs2, VMaskOp:$vm),
178                opcodestr, "$vs3, (${rs1}), $rs2$vm">;
179
180// segment store vd, vs3, (rs1), vs2, vm
181class VIndexedSegmentStore<bits<3> nf, RISCVMOP mop, RISCVWidth width,
182                           string opcodestr>
183    : RVInstVSX<nf, width.Value{3}, mop, width.Value{2-0}, (outs),
184                (ins VR:$vs3, GPR:$rs1, VR:$vs2, VMaskOp:$vm),
185                opcodestr, "$vs3, (${rs1}), $vs2$vm">;
186} // hasSideEffects = 0, mayLoad = 0, mayStore = 1
187
188let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
189// op vd, vs2, vs1, vm
190class VALUVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
191    : RVInstVV<funct6, opv, (outs VR:$vd),
192                (ins VR:$vs2, VR:$vs1, VMaskOp:$vm),
193                opcodestr, "$vd, $vs2, $vs1$vm">;
194
195// op vd, vs2, vs1, v0 (without mask, use v0 as carry input)
196class VALUmVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
197    : RVInstVV<funct6, opv, (outs VR:$vd),
198                (ins VR:$vs2, VR:$vs1, VMV0:$v0),
199                opcodestr, "$vd, $vs2, $vs1, v0"> {
200  let vm = 0;
201}
202
203// op vd, vs1, vs2, vm (reverse the order of vs1 and vs2)
204class VALUrVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
205    : RVInstVV<funct6, opv, (outs VR:$vd),
206                (ins VR:$vs1, VR:$vs2, VMaskOp:$vm),
207                opcodestr, "$vd, $vs1, $vs2$vm">;
208
209// op vd, vs2, vs1
210class VALUVVNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr>
211    : RVInstVV<funct6, opv, (outs VR:$vd),
212               (ins VR:$vs2, VR:$vs1),
213               opcodestr, "$vd, $vs2, $vs1"> {
214  let vm = 1;
215}
216
217// op vd, vs2, rs1, vm
218class VALUVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
219    : RVInstVX<funct6, opv, (outs VR:$vd),
220                (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
221                opcodestr, "$vd, $vs2, $rs1$vm">;
222
223// op vd, vs2, rs1, v0 (without mask, use v0 as carry input)
224class VALUmVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
225    : RVInstVX<funct6, opv, (outs VR:$vd),
226                (ins VR:$vs2, GPR:$rs1, VMV0:$v0),
227                opcodestr, "$vd, $vs2, $rs1, v0"> {
228  let vm = 0;
229}
230
231// op vd, rs1, vs2, vm (reverse the order of rs1 and vs2)
232class VALUrVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
233    : RVInstVX<funct6, opv, (outs VR:$vd),
234                (ins GPR:$rs1, VR:$vs2, VMaskOp:$vm),
235                opcodestr, "$vd, $rs1, $vs2$vm">;
236
237// op vd, vs1, vs2
238class VALUVXNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr>
239    : RVInstVX<funct6, opv, (outs VR:$vd),
240               (ins VR:$vs2, GPR:$rs1),
241               opcodestr, "$vd, $vs2, $rs1"> {
242  let vm = 1;
243}
244
245// op vd, vs2, imm, vm
246class VALUVI<bits<6> funct6, string opcodestr, Operand optype = simm5>
247    : RVInstIVI<funct6, (outs VR:$vd),
248                (ins VR:$vs2, optype:$imm, VMaskOp:$vm),
249                opcodestr, "$vd, $vs2, $imm$vm">;
250
251// op vd, vs2, imm, v0 (without mask, use v0 as carry input)
252class VALUmVI<bits<6> funct6, string opcodestr, Operand optype = simm5>
253    : RVInstIVI<funct6, (outs VR:$vd),
254                (ins VR:$vs2, optype:$imm, VMV0:$v0),
255                opcodestr, "$vd, $vs2, $imm, v0"> {
256  let vm = 0;
257}
258
259// op vd, vs2, imm, vm
260class VALUVINoVm<bits<6> funct6, string opcodestr, Operand optype = simm5>
261    : RVInstIVI<funct6, (outs VR:$vd),
262                (ins VR:$vs2, optype:$imm),
263                opcodestr, "$vd, $vs2, $imm"> {
264  let vm = 1;
265}
266
267// op vd, vs2, rs1, vm (Float)
268class VALUVF<bits<6> funct6, RISCVVFormat opv, string opcodestr>
269    : RVInstVX<funct6, opv, (outs VR:$vd),
270                (ins VR:$vs2, FPR32:$rs1, VMaskOp:$vm),
271                opcodestr, "$vd, $vs2, $rs1$vm">;
272
273// op vd, rs1, vs2, vm (Float) (with mask, reverse the order of rs1 and vs2)
274class VALUrVF<bits<6> funct6, RISCVVFormat opv, string opcodestr>
275    : RVInstVX<funct6, opv, (outs VR:$vd),
276                (ins FPR32:$rs1, VR:$vs2, VMaskOp:$vm),
277                opcodestr, "$vd, $rs1, $vs2$vm">;
278
279// op vd, vs2, vm (use vs1 as instruction encoding)
280class VALUVs2<bits<6> funct6, bits<5> vs1, RISCVVFormat opv, string opcodestr>
281    : RVInstV<funct6, vs1, opv, (outs VR:$vd),
282               (ins VR:$vs2, VMaskOp:$vm),
283               opcodestr, "$vd, $vs2$vm">;
284} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
285
286let hasSideEffects = 0, mayLoad = 1, mayStore = 1 in {
287// vamo vd, (rs1), vs2, vd, vm
288class VAMOWd<RISCVAMOOP amoop, RISCVWidth width, string opcodestr>
289    : RVInstVAMO<amoop, width.Value{2-0}, (outs VR:$vd_wd),
290            (ins GPR:$rs1, VR:$vs2, VR:$vd, VMaskOp:$vm),
291            opcodestr, "$vd_wd, (${rs1}), $vs2, $vd$vm"> {
292    let Constraints = "$vd_wd = $vd";
293    let wd = 1;
294    bits<5> vd;
295    let Inst{11-7} = vd;
296}
297
298// vamo x0, (rs1), vs2, vs3, vm
299class VAMONoWd<RISCVAMOOP amoop, RISCVWidth width, string opcodestr>
300    : RVInstVAMO<amoop, width.Value{2-0}, (outs),
301            (ins GPR:$rs1, VR:$vs2, VR:$vs3, VMaskOp:$vm),
302            opcodestr, "x0, (${rs1}), $vs2, $vs3$vm"> {
303    bits<5> vs3;
304    let Inst{11-7} = vs3;
305}
306
307} // hasSideEffects = 0, mayLoad = 1, mayStore = 1
308
309//===----------------------------------------------------------------------===//
310// Combination of instruction classes.
311// Use these multiclasses to define instructions more easily.
312//===----------------------------------------------------------------------===//
313multiclass VALU_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
314  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">;
315  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">;
316  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>;
317}
318
319multiclass VALU_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
320  def V  : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">;
321  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">;
322}
323
324multiclass VALUr_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
325  def V : VALUrVV<funct6, OPIVV, opcodestr # "." # vw # "v">;
326  def X : VALUrVX<funct6, OPIVX, opcodestr # "." # vw # "x">;
327}
328
329multiclass VALU_IV_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
330  def X  : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">;
331  def I  : VALUVI<funct6, opcodestr # "." # vw # "i", optype>;
332}
333
334multiclass VALU_IV_V<string opcodestr, bits<6> funct6> {
335  def _VS  : VALUVV<funct6, OPIVV, opcodestr # ".vs">;
336}
337
338multiclass VALUr_IV_X<string opcodestr, bits<6> funct6, string vw = "v"> {
339  def X : VALUrVX<funct6, OPIVX, opcodestr # "." # vw # "x">;
340}
341
342multiclass VALU_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
343  def V : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">;
344  def X  : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">;
345}
346
347multiclass VALU_MV_V<string opcodestr, bits<6> funct6> {
348  def _VS : VALUVV<funct6, OPMVV, opcodestr # ".vs">;
349}
350
351multiclass VALU_MV_Mask<string opcodestr, bits<6> funct6, string vm = "v"> {
352  def M : VALUVVNoVm<funct6, OPMVV, opcodestr # "." # vm # "m">;
353}
354
355multiclass VALU_MV_X<string opcodestr, bits<6> funct6, string vw = "v"> {
356  def X  : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">;
357}
358
359multiclass VALUr_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
360  def V : VALUrVV<funct6, OPMVV, opcodestr # "." # vw # "v">;
361  def X : VALUrVX<funct6, OPMVX, opcodestr # "." # vw # "x">;
362}
363
364multiclass VALUr_MV_X<string opcodestr, bits<6> funct6, string vw = "v"> {
365  def X : VALUrVX<funct6, OPMVX, opcodestr # "." # vw # "x">;
366}
367
368multiclass VALU_MV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
369  def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>;
370}
371
372multiclass VALUm_IV_V_X_I<string opcodestr, bits<6> funct6> {
373  def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">;
374  def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">;
375  def IM : VALUmVI<funct6, opcodestr # ".vim">;
376}
377
378multiclass VALUm_IV_V_X<string opcodestr, bits<6> funct6> {
379  def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">;
380  def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">;
381}
382
383multiclass VALUNoVm_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5> {
384  def V : VALUVVNoVm<funct6, OPIVV, opcodestr # ".vv">;
385  def X : VALUVXNoVm<funct6, OPIVX, opcodestr # ".vx">;
386  def I : VALUVINoVm<funct6, opcodestr # ".vi", optype>;
387}
388
389multiclass VALUNoVm_IV_V_X<string opcodestr, bits<6> funct6> {
390  def V : VALUVVNoVm<funct6, OPIVV, opcodestr # ".vv">;
391  def X : VALUVXNoVm<funct6, OPIVX, opcodestr # ".vx">;
392}
393
394multiclass VALU_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
395  def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">;
396  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">;
397}
398
399multiclass VALU_FV_F<string opcodestr, bits<6> funct6, string vw = "v"> {
400  def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">;
401}
402
403multiclass VALUr_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
404  def V : VALUrVV<funct6, OPFVV, opcodestr # "." # vw # "v">;
405  def F : VALUrVF<funct6, OPFVF, opcodestr # "." # vw # "f">;
406}
407
408multiclass VALU_FV_V<string opcodestr, bits<6> funct6> {
409  def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">;
410}
411
412multiclass VALU_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
413  def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>;
414}
415
416multiclass VAMO<RISCVAMOOP amoop, RISCVWidth width, string opcodestr> {
417  def _WD : VAMOWd<amoop, width, opcodestr>;
418  def _UNWD : VAMONoWd<amoop, width, opcodestr>;
419}
420
421multiclass VWholeLoad<bits<3> nf, string opcodestr> {
422  def E8_V : VWholeLoad<nf, LSWidth8, opcodestr # "e8.v">;
423  def E16_V : VWholeLoad<nf, LSWidth16, opcodestr # "e16.v">;
424  def E32_V : VWholeLoad<nf, LSWidth32, opcodestr # "e32.v">;
425  def E64_V : VWholeLoad<nf, LSWidth64, opcodestr # "e64.v">;
426  def E128_V : VWholeLoad<nf, LSWidth128, opcodestr # "e128.v">;
427  def E256_V : VWholeLoad<nf, LSWidth256, opcodestr # "e256.v">;
428  def E512_V : VWholeLoad<nf, LSWidth512, opcodestr # "e512.v">;
429  def E1024_V : VWholeLoad<nf, LSWidth1024, opcodestr # "e1024.v">;
430}
431
432//===----------------------------------------------------------------------===//
433// Instructions
434//===----------------------------------------------------------------------===//
435
436let Predicates = [HasStdExtV] in {
437let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in {
438def VSETVLI : RVInstSetVLi<(outs GPR:$rd), (ins GPR:$rs1, VTypeIOp:$vtypei),
439                           "vsetvli", "$rd, $rs1, $vtypei">;
440
441def VSETVL : RVInstSetVL<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2),
442                         "vsetvl", "$rd, $rs1, $rs2">;
443} // hasSideEffects = 1, mayLoad = 0, mayStore = 0
444
445// Vector Unit-Stride Instructions
446def VLE8_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth8, "vle8.v">;
447def VLE16_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth16, "vle16.v">;
448def VLE32_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth32, "vle32.v">;
449def VLE64_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth64, "vle64.v">;
450def VLE128_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth128, "vle128.v">;
451def VLE256_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth256, "vle256.v">;
452def VLE512_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth512, "vle512.v">;
453def VLE1024_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth1024, "vle1024.v">;
454
455def VLE8FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth8, "vle8ff.v">;
456def VLE16FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth16, "vle16ff.v">;
457def VLE32FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth32, "vle32ff.v">;
458def VLE64FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth64, "vle64ff.v">;
459def VLE128FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth128, "vle128ff.v">;
460def VLE256FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth256, "vle256ff.v">;
461def VLE512FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth512, "vle512ff.v">;
462def VLE1024FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth1024, "vle1024ff.v">;
463
464def VSE8_V : VUnitStrideStore<SUMOPUnitStride, LSWidth8, "vse8.v">;
465def VSE16_V : VUnitStrideStore<SUMOPUnitStride, LSWidth16, "vse16.v">;
466def VSE32_V : VUnitStrideStore<SUMOPUnitStride, LSWidth32, "vse32.v">;
467def VSE64_V : VUnitStrideStore<SUMOPUnitStride, LSWidth64, "vse64.v">;
468def VSE128_V : VUnitStrideStore<SUMOPUnitStride, LSWidth128, "vse128.v">;
469def VSE256_V : VUnitStrideStore<SUMOPUnitStride, LSWidth256, "vse256.v">;
470def VSE512_V : VUnitStrideStore<SUMOPUnitStride, LSWidth512, "vse512.v">;
471def VSE1024_V : VUnitStrideStore<SUMOPUnitStride, LSWidth1024, "vse1024.v">;
472
473// Vector Strided Instructions
474def VLSE8_V : VStridedLoad<LSWidth8, "vlse8.v">;
475def VLSE16_V : VStridedLoad<LSWidth16, "vlse16.v">;
476def VLSE32_V : VStridedLoad<LSWidth32, "vlse32.v">;
477def VLSE64_V : VStridedLoad<LSWidth64, "vlse64.v">;
478def VLSE128_V : VStridedLoad<LSWidth128, "vlse128.v">;
479def VLSE256_V : VStridedLoad<LSWidth256, "vlse256.v">;
480def VLSE512_V : VStridedLoad<LSWidth512, "vlse512.v">;
481def VLSE1024_V : VStridedLoad<LSWidth1024, "vlse1024.v">;
482
483def VSSE8_V : VStridedStore<LSWidth8, "vsse8.v">;
484def VSSE16_V : VStridedStore<LSWidth16, "vsse16.v">;
485def VSSE32_V : VStridedStore<LSWidth32, "vsse32.v">;
486def VSSE64_V : VStridedStore<LSWidth64, "vsse64.v">;
487def VSSE128_V : VStridedStore<LSWidth128, "vsse128.v">;
488def VSSE256_V : VStridedStore<LSWidth256, "vsse256.v">;
489def VSSE512_V : VStridedStore<LSWidth512, "vsse512.v">;
490def VSSE1024_V : VStridedStore<LSWidth1024, "vsse1024.v">;
491
492// Vector Indexed Instructions
493def VLUXEI8_V : VIndexedLoad<MOPLDIndexedUnord, LSWidth8, "vluxei8.v">;
494def VLUXEI16_V : VIndexedLoad<MOPLDIndexedUnord, LSWidth16, "vluxei16.v">;
495def VLUXEI32_V : VIndexedLoad<MOPLDIndexedUnord, LSWidth32, "vluxei32.v">;
496def VLUXEI64_V : VIndexedLoad<MOPLDIndexedUnord, LSWidth64, "vluxei64.v">;
497
498def VLOXEI8_V : VIndexedLoad<MOPLDIndexedOrder, LSWidth8, "vloxei8.v">;
499def VLOXEI16_V : VIndexedLoad<MOPLDIndexedOrder, LSWidth16, "vloxei16.v">;
500def VLOXEI32_V : VIndexedLoad<MOPLDIndexedOrder, LSWidth32, "vloxei32.v">;
501def VLOXEI64_V : VIndexedLoad<MOPLDIndexedOrder, LSWidth64, "vloxei64.v">;
502
503def VSUXEI8_V : VIndexedStore<MOPSTIndexedUnord, LSWidth8, "vsuxei8.v">;
504def VSUXEI16_V : VIndexedStore<MOPSTIndexedUnord, LSWidth16, "vsuxei16.v">;
505def VSUXEI32_V : VIndexedStore<MOPSTIndexedUnord, LSWidth32, "vsuxei32.v">;
506def VSUXEI64_V : VIndexedStore<MOPSTIndexedUnord, LSWidth64, "vsuxei64.v">;
507
508def VSOXEI8_V : VIndexedStore<MOPSTIndexedOrder, LSWidth8, "vsoxei8.v">;
509def VSOXEI16_V : VIndexedStore<MOPSTIndexedOrder, LSWidth16, "vsoxei16.v">;
510def VSOXEI32_V : VIndexedStore<MOPSTIndexedOrder, LSWidth32, "vsoxei32.v">;
511def VSOXEI64_V : VIndexedStore<MOPSTIndexedOrder, LSWidth64, "vsoxei64.v">;
512
513defm VL1R : VWholeLoad<1, "vl1r">;
514defm VL2R : VWholeLoad<2, "vl2r">;
515defm VL4R : VWholeLoad<4, "vl4r">;
516defm VL8R : VWholeLoad<8, "vl8r">;
517def : InstAlias<"vl1r.v $vd, (${rs1})", (VL1RE8_V VR:$vd, GPR:$rs1)>;
518def : InstAlias<"vl2r.v $vd, (${rs1})", (VL2RE8_V VR:$vd, GPR:$rs1)>;
519def : InstAlias<"vl4r.v $vd, (${rs1})", (VL4RE8_V VR:$vd, GPR:$rs1)>;
520def : InstAlias<"vl8r.v $vd, (${rs1})", (VL8RE8_V VR:$vd, GPR:$rs1)>;
521
522def VS1R_V : VWholeStore<1, "vs1r.v">;
523def VS2R_V : VWholeStore<2, "vs2r.v">;
524def VS4R_V : VWholeStore<4, "vs4r.v">;
525def VS8R_V : VWholeStore<8, "vs8r.v">;
526
527// Vector Single-Width Integer Add and Subtract
528defm VADD_V : VALU_IV_V_X_I<"vadd", 0b000000>;
529defm VSUB_V : VALU_IV_V_X<"vsub", 0b000010>;
530defm VRSUB_V : VALU_IV_X_I<"vrsub", 0b000011>;
531
532def : InstAlias<"vneg.v $vd, $vs$vm", (VRSUB_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
533
534// Vector Widening Integer Add/Subtract
535// Refer to 11.2 Widening Vector Arithmetic Instructions
536// The destination vector register group cannot overlap a source vector
537// register group of a different element width (including the mask register
538// if masked), otherwise an illegal instruction exception is raised.
539let Constraints = "@earlyclobber $vd" in {
540let RVVConstraint = WidenV in {
541defm VWADDU_V : VALU_MV_V_X<"vwaddu", 0b110000>;
542defm VWSUBU_V : VALU_MV_V_X<"vwsubu", 0b110010>;
543defm VWADD_V : VALU_MV_V_X<"vwadd", 0b110001>;
544defm VWSUB_V : VALU_MV_V_X<"vwsub", 0b110011>;
545} // RVVConstraint = WidenV
546// Set earlyclobber for following instructions for second and mask operands.
547// This has the downside that the earlyclobber constraint is too coarse and
548// will impose unnecessary restrictions by not allowing the destination to
549// overlap with the first (wide) operand.
550let RVVConstraint = WidenW in {
551defm VWADDU_W : VALU_MV_V_X<"vwaddu", 0b110100, "w">;
552defm VWSUBU_W : VALU_MV_V_X<"vwsubu", 0b110110, "w">;
553defm VWADD_W : VALU_MV_V_X<"vwadd", 0b110101, "w">;
554defm VWSUB_W : VALU_MV_V_X<"vwsub", 0b110111, "w">;
555} // RVVConstraint = WidenW
556} // Constraints = "@earlyclobber $vd"
557
558def : InstAlias<"vwcvt.x.x.v $vd, $vs$vm",
559                (VWADD_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
560def : InstAlias<"vwcvtu.x.x.v $vd, $vs$vm",
561                (VWADDU_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
562
563// Vector Integer Extension
564defm VZEXT_VF8 : VALU_MV_VS2<"vzext.vf8", 0b010010, 0b00010>;
565defm VSEXT_VF8 : VALU_MV_VS2<"vsext.vf8", 0b010010, 0b00011>;
566defm VZEXT_VF4 : VALU_MV_VS2<"vzext.vf4", 0b010010, 0b00100>;
567defm VSEXT_VF4 : VALU_MV_VS2<"vsext.vf4", 0b010010, 0b00101>;
568defm VZEXT_VF2 : VALU_MV_VS2<"vzext.vf2", 0b010010, 0b00110>;
569defm VSEXT_VF2 : VALU_MV_VS2<"vsext.vf2", 0b010010, 0b00111>;
570
571// Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
572defm VADC_V : VALUm_IV_V_X_I<"vadc", 0b010000>;
573let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
574defm VMADC_V : VALUm_IV_V_X_I<"vmadc", 0b010001>;
575defm VMADC_V : VALUNoVm_IV_V_X_I<"vmadc", 0b010001>;
576} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
577defm VSBC_V : VALUm_IV_V_X<"vsbc", 0b010010>;
578let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
579defm VMSBC_V : VALUm_IV_V_X<"vmsbc", 0b010011>;
580defm VMSBC_V : VALUNoVm_IV_V_X<"vmsbc", 0b010011>;
581} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
582
583// Vector Bitwise Logical Instructions
584defm VAND_V : VALU_IV_V_X_I<"vand", 0b001001>;
585defm VOR_V : VALU_IV_V_X_I<"vor", 0b001010>;
586defm VXOR_V : VALU_IV_V_X_I<"vxor", 0b001011>;
587
588def : InstAlias<"vnot.v $vd, $vs$vm",
589                (VXOR_VI VR:$vd, VR:$vs, -1, VMaskOp:$vm)>;
590
591// Vector Single-Width Bit Shift Instructions
592defm VSLL_V : VALU_IV_V_X_I<"vsll", 0b100101, uimm5>;
593defm VSRL_V : VALU_IV_V_X_I<"vsrl", 0b101000, uimm5>;
594defm VSRA_V : VALU_IV_V_X_I<"vsra", 0b101001, uimm5>;
595
596// Vector Narrowing Integer Right Shift Instructions
597// Refer to 11.3. Narrowing Vector Arithmetic Instructions
598// The destination vector register group cannot overlap the first source
599// vector register group (specified by vs2). The destination vector register
600// group cannot overlap the mask register if used, unless LMUL=1.
601let Constraints = "@earlyclobber $vd" in {
602defm VNSRL_W : VALU_IV_V_X_I<"vnsrl", 0b101100, uimm5, "w">;
603defm VNSRA_W : VALU_IV_V_X_I<"vnsra", 0b101101, uimm5, "w">;
604} // Constraints = "@earlyclobber $vd"
605
606def : InstAlias<"vncvt.x.x.w $vd, $vs$vm",
607                (VNSRL_WX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
608
609// Vector Integer Comparison Instructions
610let RVVConstraint = NoConstraint in {
611defm VMSEQ_V : VALU_IV_V_X_I<"vmseq", 0b011000>;
612defm VMSNE_V : VALU_IV_V_X_I<"vmsne", 0b011001>;
613defm VMSLTU_V : VALU_IV_V_X<"vmsltu", 0b011010>;
614defm VMSLT_V : VALU_IV_V_X<"vmslt", 0b011011>;
615defm VMSLEU_V : VALU_IV_V_X_I<"vmsleu", 0b011100>;
616defm VMSLE_V : VALU_IV_V_X_I<"vmsle", 0b011101>;
617defm VMSGTU_V : VALU_IV_X_I<"vmsgtu", 0b011110>;
618defm VMSGT_V : VALU_IV_X_I<"vmsgt", 0b011111>;
619} // RVVConstraint = NoConstraint
620
621def : InstAlias<"vmsgtu.vv $vd, $va, $vb$vm",
622                (VMSLTU_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
623def : InstAlias<"vmsgt.vv $vd, $va, $vb$vm",
624                (VMSLT_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
625def : InstAlias<"vmsgeu.vv $vd, $va, $vb$vm",
626                (VMSLEU_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
627def : InstAlias<"vmsge.vv $vd, $va, $vb$vm",
628                (VMSLE_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
629
630let isCodeGenOnly = 0, isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 0,
631    mayStore = 0 in {
632// For unsigned comparisons we need to special case 0 immediate to maintain
633// the always true/false semantics we would invert if we just decremented the
634// immediate like we do for signed. To match the GNU assembler we will use
635// vmseq/vmsne.vv with the same register for both operands which we can't do
636// from an InstAlias.
637def PseudoVMSGEU_VI : Pseudo<(outs VR:$vd),
638                             (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
639                             [], "vmsgeu.vi", "$vd, $vs2, $imm$vm">;
640def PseudoVMSLTU_VI : Pseudo<(outs VR:$vd),
641                             (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
642                             [], "vmsltu.vi", "$vd, $vs2, $imm$vm">;
643// Handle signed with pseudos as well for more consistency in the
644// implementation.
645def PseudoVMSGE_VI : Pseudo<(outs VR:$vd),
646                            (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
647                            [], "vmsge.vi", "$vd, $vs2, $imm$vm">;
648def PseudoVMSLT_VI : Pseudo<(outs VR:$vd),
649                            (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
650                            [], "vmslt.vi", "$vd, $vs2, $imm$vm">;
651}
652
653let isCodeGenOnly = 0, isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 0,
654    mayStore = 0 in {
655def PseudoVMSGEU_VX : Pseudo<(outs VR:$vd),
656                             (ins VR:$vs2, GPR:$rs1),
657                             [], "vmsgeu.vx", "$vd, $vs2, $rs1">;
658def PseudoVMSGE_VX : Pseudo<(outs VR:$vd),
659                            (ins VR:$vs2, GPR:$rs1),
660                            [], "vmsge.vx", "$vd, $vs2, $rs1">;
661def PseudoVMSGEU_VX_M : Pseudo<(outs VRNoV0:$vd),
662                               (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
663                               [], "vmsgeu.vx", "$vd, $vs2, $rs1$vm">;
664def PseudoVMSGE_VX_M : Pseudo<(outs VRNoV0:$vd),
665                              (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
666                              [], "vmsge.vx", "$vd, $vs2, $rs1$vm">;
667def PseudoVMSGEU_VX_M_T : Pseudo<(outs VMV0:$vd, VR:$scratch),
668                                 (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
669                                 [], "vmsgeu.vx", "$vd, $vs2, $rs1$vm, $scratch">;
670def PseudoVMSGE_VX_M_T : Pseudo<(outs VMV0:$vd, VR:$scratch),
671                                (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
672                                [], "vmsge.vx", "$vd, $vs2, $rs1$vm, $scratch">;
673}
674
675// Vector Integer Min/Max Instructions
676defm VMINU_V : VALU_IV_V_X<"vminu", 0b000100>;
677defm VMIN_V : VALU_IV_V_X<"vmin", 0b000101>;
678defm VMAXU_V : VALU_IV_V_X<"vmaxu", 0b000110>;
679defm VMAX_V : VALU_IV_V_X<"vmax", 0b000111>;
680
681// Vector Single-Width Integer Multiply Instructions
682defm VMUL_V : VALU_MV_V_X<"vmul", 0b100101>;
683defm VMULH_V : VALU_MV_V_X<"vmulh", 0b100111>;
684defm VMULHU_V : VALU_MV_V_X<"vmulhu", 0b100100>;
685defm VMULHSU_V : VALU_MV_V_X<"vmulhsu", 0b100110>;
686
687// Vector Integer Divide Instructions
688defm VDIVU_V : VALU_MV_V_X<"vdivu", 0b100000>;
689defm VDIV_V : VALU_MV_V_X<"vdiv", 0b100001>;
690defm VREMU_V : VALU_MV_V_X<"vremu", 0b100010>;
691defm VREM_V : VALU_MV_V_X<"vrem", 0b100011>;
692
693// Vector Widening Integer Multiply Instructions
694let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
695defm VWMUL_V : VALU_MV_V_X<"vwmul", 0b111011>;
696defm VWMULU_V : VALU_MV_V_X<"vwmulu", 0b111000>;
697defm VWMULSU_V : VALU_MV_V_X<"vwmulsu", 0b111010>;
698} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
699
700// Vector Single-Width Integer Multiply-Add Instructions
701defm VMACC_V : VALUr_MV_V_X<"vmacc", 0b101101>;
702defm VNMSAC_V : VALUr_MV_V_X<"vnmsac", 0b101111>;
703defm VMADD_V : VALUr_MV_V_X<"vmadd", 0b101001>;
704defm VNMSUB_V : VALUr_MV_V_X<"vnmsub", 0b101011>;
705
706// Vector Widening Integer Multiply-Add Instructions
707let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
708defm VWMACCU_V : VALUr_MV_V_X<"vwmaccu", 0b111100>;
709defm VWMACC_V : VALUr_MV_V_X<"vwmacc", 0b111101>;
710defm VWMACCSU_V : VALUr_MV_V_X<"vwmaccsu", 0b111111>;
711defm VWMACCUS_V : VALUr_MV_X<"vwmaccus", 0b111110>;
712} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
713
714// Vector Integer Merge Instructions
715defm VMERGE_V : VALUm_IV_V_X_I<"vmerge", 0b010111>;
716
717// Vector Integer Move Instructions
718let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vs2 = 0, vm = 1,
719    RVVConstraint = NoConstraint  in {
720// op vd, vs1
721def VMV_V_V : RVInstVV<0b010111, OPIVV, (outs VR:$vd),
722                       (ins VR:$vs1), "vmv.v.v", "$vd, $vs1">;
723// op vd, rs1
724def VMV_V_X : RVInstVX<0b010111, OPIVX, (outs VR:$vd),
725                       (ins GPR:$rs1), "vmv.v.x", "$vd, $rs1">;
726// op vd, imm
727def VMV_V_I : RVInstIVI<0b010111, (outs VR:$vd),
728                       (ins simm5:$imm), "vmv.v.i", "$vd, $imm">;
729} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
730
731// Vector Fixed-Point Arithmetic Instructions
732defm VSADDU_V : VALU_IV_V_X_I<"vsaddu", 0b100000>;
733defm VSADD_V : VALU_IV_V_X_I<"vsadd", 0b100001>;
734defm VSSUBU_V : VALU_IV_V_X<"vssubu", 0b100010>;
735defm VSSUB_V : VALU_IV_V_X<"vssub", 0b100011>;
736
737// Vector Single-Width Averaging Add and Subtract
738defm VAADDU_V : VALU_MV_V_X<"vaaddu", 0b001000>;
739defm VAADD_V : VALU_MV_V_X<"vaadd", 0b001001>;
740defm VASUBU_V : VALU_MV_V_X<"vasubu", 0b001010>;
741defm VASUB_V : VALU_MV_V_X<"vasub", 0b001011>;
742
743// Vector Single-Width Fractional Multiply with Rounding and Saturation
744defm VSMUL_V : VALU_IV_V_X<"vsmul", 0b100111>;
745
746// Vector Single-Width Scaling Shift Instructions
747defm VSSRL_V : VALU_IV_V_X_I<"vssrl", 0b101010, uimm5>;
748defm VSSRA_V : VALU_IV_V_X_I<"vssra", 0b101011, uimm5>;
749
750// Vector Narrowing Fixed-Point Clip Instructions
751let Constraints = "@earlyclobber $vd" in {
752defm VNCLIPU_W : VALU_IV_V_X_I<"vnclipu", 0b101110, uimm5, "w">;
753defm VNCLIP_W : VALU_IV_V_X_I<"vnclip", 0b101111, uimm5, "w">;
754} // Constraints = "@earlyclobber $vd"
755} // Predicates = [HasStdExtV]
756
757let Predicates = [HasStdExtV, HasStdExtF] in {
758// Vector Single-Width Floating-Point Add/Subtract Instructions
759defm VFADD_V : VALU_FV_V_F<"vfadd", 0b000000>;
760defm VFSUB_V : VALU_FV_V_F<"vfsub", 0b000010>;
761defm VFRSUB_V : VALU_FV_F<"vfrsub", 0b100111>;
762
763// Vector Widening Floating-Point Add/Subtract Instructions
764let Constraints = "@earlyclobber $vd" in {
765let RVVConstraint = WidenV in {
766defm VFWADD_V : VALU_FV_V_F<"vfwadd", 0b110000>;
767defm VFWSUB_V : VALU_FV_V_F<"vfwsub", 0b110010>;
768} // RVVConstraint = WidenV
769// Set earlyclobber for following instructions for second and mask operands.
770// This has the downside that the earlyclobber constraint is too coarse and
771// will impose unnecessary restrictions by not allowing the destination to
772// overlap with the first (wide) operand.
773let RVVConstraint = WidenW in {
774defm VFWADD_W : VALU_FV_V_F<"vfwadd", 0b110100, "w">;
775defm VFWSUB_W : VALU_FV_V_F<"vfwsub", 0b110110, "w">;
776} // RVVConstraint = WidenW
777} // Constraints = "@earlyclobber $vd"
778
779// Vector Single-Width Floating-Point Multiply/Divide Instructions
780defm VFMUL_V : VALU_FV_V_F<"vfmul", 0b100100>;
781defm VFDIV_V : VALU_FV_V_F<"vfdiv", 0b100000>;
782defm VFRDIV_V : VALU_FV_F<"vfrdiv", 0b100001>;
783
784// Vector Widening Floating-Point Multiply
785let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
786defm VFWMUL_V : VALU_FV_V_F<"vfwmul", 0b111000>;
787} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
788
789// Vector Single-Width Floating-Point Fused Multiply-Add Instructions
790defm VFMACC_V : VALUr_FV_V_F<"vfmacc", 0b101100>;
791defm VFNMACC_V : VALUr_FV_V_F<"vfnmacc", 0b101101>;
792defm VFMSAC_V : VALUr_FV_V_F<"vfmsac", 0b101110>;
793defm VFNMSAC_V : VALUr_FV_V_F<"vfnmsac", 0b101111>;
794defm VFMADD_V : VALUr_FV_V_F<"vfmadd", 0b101000>;
795defm VFNMADD_V : VALUr_FV_V_F<"vfnmadd", 0b101001>;
796defm VFMSUB_V : VALUr_FV_V_F<"vfmsub", 0b101010>;
797defm VFNMSUB_V : VALUr_FV_V_F<"vfnmsub", 0b101011>;
798
799// Vector Widening Floating-Point Fused Multiply-Add Instructions
800let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
801defm VFWMACC_V : VALUr_FV_V_F<"vfwmacc", 0b111100>;
802defm VFWNMACC_V : VALUr_FV_V_F<"vfwnmacc", 0b111101>;
803defm VFWMSAC_V : VALUr_FV_V_F<"vfwmsac", 0b111110>;
804defm VFWNMSAC_V : VALUr_FV_V_F<"vfwnmsac", 0b111111>;
805} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
806
807// Vector Floating-Point Square-Root Instruction
808defm VFSQRT_V : VALU_FV_VS2<"vfsqrt.v", 0b010011, 0b00000>;
809defm VFRSQRTE7_V : VALU_FV_VS2<"vfrsqrte7.v", 0b010011, 0b00100>;
810defm VFRECE7_V : VALU_FV_VS2<"vfrece7.v", 0b010011, 0b00101>;
811
812// Vector Floating-Point MIN/MAX Instructions
813defm VFMIN_V : VALU_FV_V_F<"vfmin", 0b000100>;
814defm VFMAX_V : VALU_FV_V_F<"vfmax", 0b000110>;
815
816// Vector Floating-Point Sign-Injection Instructions
817defm VFSGNJ_V : VALU_FV_V_F<"vfsgnj", 0b001000>;
818defm VFSGNJN_V : VALU_FV_V_F<"vfsgnjn", 0b001001>;
819defm VFSGNJX_V : VALU_FV_V_F<"vfsgnjx", 0b001010>;
820
821def : InstAlias<"vfneg.v $vd, $vs$vm",
822                (VFSGNJN_VV VR:$vd, VR:$vs, VR:$vs, VMaskOp:$vm)>;
823
824// Vector Floating-Point Compare Instructions
825let RVVConstraint = NoConstraint in {
826defm VMFEQ_V : VALU_FV_V_F<"vmfeq", 0b011000>;
827defm VMFNE_V : VALU_FV_V_F<"vmfne", 0b011100>;
828defm VMFLT_V : VALU_FV_V_F<"vmflt", 0b011011>;
829defm VMFLE_V : VALU_FV_V_F<"vmfle", 0b011001>;
830defm VMFGT_V : VALU_FV_F<"vmfgt", 0b011101>;
831defm VMFGE_V : VALU_FV_F<"vmfge", 0b011111>;
832} // RVVConstraint = NoConstraint
833
834def : InstAlias<"vmfgt.vv $vd, $va, $vb$vm",
835                (VMFLT_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
836def : InstAlias<"vmfge.vv $vd, $va, $vb$vm",
837                (VMFLE_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
838
839// Vector Floating-Point Classify Instruction
840defm VFCLASS_V : VALU_FV_VS2<"vfclass.v", 0b010011, 0b10000>;
841
842let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
843// Vector Floating-Point Merge Instruction
844def VFMERGE_VFM : RVInstVX<0b010111, OPFVF, (outs VR:$vd),
845                           (ins VR:$vs2, FPR32:$rs1, VMV0:$v0),
846                           "vfmerge.vfm", "$vd, $vs2, $rs1, v0"> {
847  let vm = 0;
848}
849
850// Vector Floating-Point Move Instruction
851let RVVConstraint = NoConstraint in
852def VFMV_V_F : RVInstVX<0b010111, OPFVF, (outs VR:$vd),
853                       (ins FPR32:$rs1), "vfmv.v.f", "$vd, $rs1"> {
854  let vs2 = 0;
855  let vm = 1;
856}
857} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
858
859// Single-Width Floating-Point/Integer Type-Convert Instructions
860defm VFCVT_XU_F_V : VALU_FV_VS2<"vfcvt.xu.f.v", 0b010010, 0b00000>;
861defm VFCVT_X_F_V : VALU_FV_VS2<"vfcvt.x.f.v", 0b010010, 0b00001>;
862defm VFCVT_RTZ_XU_F_V : VALU_FV_VS2<"vfcvt.rtz.xu.f.v", 0b010010, 0b00110>;
863defm VFCVT_RTZ_X_F_V : VALU_FV_VS2<"vfcvt.rtz.x.f.v", 0b010010, 0b00111>;
864defm VFCVT_F_XU_V : VALU_FV_VS2<"vfcvt.f.xu.v", 0b010010, 0b00010>;
865defm VFCVT_F_X_V : VALU_FV_VS2<"vfcvt.f.x.v", 0b010010, 0b00011>;
866
867// Widening Floating-Point/Integer Type-Convert Instructions
868let Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt in {
869defm VFWCVT_XU_F_V : VALU_FV_VS2<"vfwcvt.xu.f.v", 0b010010, 0b01000>;
870defm VFWCVT_X_F_V : VALU_FV_VS2<"vfwcvt.x.f.v", 0b010010, 0b01001>;
871defm VFWCVT_RTZ_XU_F_V : VALU_FV_VS2<"vfwcvt.rtz.xu.f.v", 0b010010, 0b01110>;
872defm VFWCVT_RTZ_X_F_V : VALU_FV_VS2<"vfwcvt.rtz.x.f.v", 0b010010, 0b01111>;
873defm VFWCVT_F_XU_V : VALU_FV_VS2<"vfwcvt.f.xu.v", 0b010010, 0b01010>;
874defm VFWCVT_F_X_V : VALU_FV_VS2<"vfwcvt.f.x.v", 0b010010, 0b01011>;
875defm VFWCVT_F_F_V : VALU_FV_VS2<"vfwcvt.f.f.v", 0b010010, 0b01100>;
876} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt
877
878// Narrowing Floating-Point/Integer Type-Convert Instructions
879let Constraints = "@earlyclobber $vd" in {
880defm VFNCVT_XU_F_W : VALU_FV_VS2<"vfncvt.xu.f.w", 0b010010, 0b10000>;
881defm VFNCVT_X_F_W : VALU_FV_VS2<"vfncvt.x.f.w", 0b010010, 0b10001>;
882defm VFNCVT_RTZ_XU_F_W : VALU_FV_VS2<"vfncvt.rtz.xu.f.w", 0b010010, 0b10110>;
883defm VFNCVT_RTZ_X_F_W : VALU_FV_VS2<"vfncvt.rtz.x.f.w", 0b010010, 0b10111>;
884defm VFNCVT_F_XU_W : VALU_FV_VS2<"vfncvt.f.xu.w", 0b010010, 0b10010>;
885defm VFNCVT_F_X_W : VALU_FV_VS2<"vfncvt.f.x.w", 0b010010, 0b10011>;
886defm VFNCVT_F_F_W : VALU_FV_VS2<"vfncvt.f.f.w", 0b010010, 0b10100>;
887defm VFNCVT_ROD_F_F_W : VALU_FV_VS2<"vfncvt.rod.f.f.w", 0b010010, 0b10101>;
888} // Constraints = "@earlyclobber $vd"
889} // Predicates = [HasStdExtV, HasStdExtF]
890
891let Predicates = [HasStdExtV] in {
892// Vector Single-Width Integer Reduction Instructions
893let RVVConstraint = NoConstraint in {
894defm VREDSUM : VALU_MV_V<"vredsum", 0b000000>;
895defm VREDMAXU : VALU_MV_V<"vredmaxu", 0b000110>;
896defm VREDMAX : VALU_MV_V<"vredmax", 0b000111>;
897defm VREDMINU : VALU_MV_V<"vredminu", 0b000100>;
898defm VREDMIN : VALU_MV_V<"vredmin", 0b000101>;
899defm VREDAND : VALU_MV_V<"vredand", 0b000001>;
900defm VREDOR : VALU_MV_V<"vredor", 0b000010>;
901defm VREDXOR : VALU_MV_V<"vredxor", 0b000011>;
902} // RVVConstraint = NoConstraint
903
904// Vector Widening Integer Reduction Instructions
905let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
906// Set earlyclobber for following instructions for second and mask operands.
907// This has the downside that the earlyclobber constraint is too coarse and
908// will impose unnecessary restrictions by not allowing the destination to
909// overlap with the first (wide) operand.
910defm VWREDSUMU : VALU_IV_V<"vwredsumu", 0b110000>;
911defm VWREDSUM : VALU_IV_V<"vwredsum", 0b110001>;
912} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
913} // Predicates = [HasStdExtV]
914
915let Predicates = [HasStdExtV, HasStdExtF] in {
916// Vector Single-Width Floating-Point Reduction Instructions
917let RVVConstraint = NoConstraint in {
918defm VFREDOSUM : VALU_FV_V<"vfredosum", 0b000011>;
919defm VFREDSUM : VALU_FV_V<"vfredsum", 0b000001>;
920defm VFREDMAX : VALU_FV_V<"vfredmax", 0b000111>;
921defm VFREDMIN : VALU_FV_V<"vfredmin", 0b000101>;
922} // RVVConstraint = NoConstraint
923
924// Vector Widening Floating-Point Reduction Instructions
925let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
926// Set earlyclobber for following instructions for second and mask operands.
927// This has the downside that the earlyclobber constraint is too coarse and
928// will impose unnecessary restrictions by not allowing the destination to
929// overlap with the first (wide) operand.
930defm VFWREDOSUM : VALU_FV_V<"vfwredosum", 0b110011>;
931defm VFWREDSUM : VALU_FV_V<"vfwredsum", 0b110001>;
932} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
933} // Predicates = [HasStdExtV, HasStdExtF]
934
935let Predicates = [HasStdExtV] in {
936// Vector Mask-Register Logical Instructions
937let RVVConstraint = NoConstraint in {
938defm VMAND_M : VALU_MV_Mask<"vmand", 0b011001, "m">;
939defm VMNAND_M : VALU_MV_Mask<"vmnand", 0b011101, "m">;
940defm VMANDNOT_M : VALU_MV_Mask<"vmandnot", 0b011000, "m">;
941defm VMXOR_M : VALU_MV_Mask<"vmxor", 0b011011, "m">;
942defm VMOR_M : VALU_MV_Mask<"vmor", 0b011010, "m">;
943defm VMNOR_M : VALU_MV_Mask<"vmnor", 0b011110, "m">;
944defm VMORNOT_M : VALU_MV_Mask<"vmornot", 0b011100, "m">;
945defm VMXNOR_M : VALU_MV_Mask<"vmxnor", 0b011111, "m">;
946}
947
948def : InstAlias<"vmmv.m $vd, $vs",
949                (VMAND_MM VR:$vd, VR:$vs, VR:$vs)>;
950def : InstAlias<"vmclr.m $vd",
951                (VMXOR_MM VR:$vd, VR:$vd, VR:$vd)>;
952def : InstAlias<"vmset.m $vd",
953                (VMXNOR_MM VR:$vd, VR:$vd, VR:$vd)>;
954def : InstAlias<"vmnot.m $vd, $vs",
955                (VMNAND_MM VR:$vd, VR:$vs, VR:$vs)>;
956
957let hasSideEffects = 0, mayLoad = 0, mayStore = 0,
958    RVVConstraint = NoConstraint  in {
959// Vector mask population count vpopc
960def VPOPC_M : RVInstV<0b010000, 0b10000, OPMVV, (outs GPR:$vd),
961                        (ins VR:$vs2, VMaskOp:$vm),
962                        "vpopc.m", "$vd, $vs2$vm">;
963
964// vfirst find-first-set mask bit
965def VFIRST_M : RVInstV<0b010000, 0b10001, OPMVV, (outs GPR:$vd),
966                        (ins VR:$vs2, VMaskOp:$vm),
967                        "vfirst.m", "$vd, $vs2$vm">;
968} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
969
970let Constraints = "@earlyclobber $vd", RVVConstraint = Iota in {
971// vmsbf.m set-before-first mask bit
972defm VMSBF_M : VALU_MV_VS2<"vmsbf.m", 0b010100, 0b00001>;
973// vmsif.m set-including-first mask bit
974defm VMSIF_M : VALU_MV_VS2<"vmsif.m", 0b010100, 0b00011>;
975// vmsof.m set-only-first mask bit
976defm VMSOF_M : VALU_MV_VS2<"vmsof.m", 0b010100, 0b00010>;
977// Vector Iota Instruction
978defm VIOTA_M : VALU_MV_VS2<"viota.m", 0b010100, 0b10000>;
979} // Constraints = "@earlyclobber $vd", RVVConstraint = Iota
980
981// Vector Element Index Instruction
982let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
983def VID_V : RVInstV<0b010100, 0b10001, OPMVV, (outs VR:$vd),
984                      (ins VMaskOp:$vm), "vid.v", "$vd$vm"> {
985  let vs2 = 0;
986}
987
988// Integer Scalar Move Instructions
989let vm = 1, RVVConstraint = NoConstraint in {
990def VMV_X_S : RVInstV<0b010000, 0b00000, OPMVV, (outs GPR:$vd),
991                      (ins VR:$vs2), "vmv.x.s", "$vd, $vs2">;
992let Constraints = "$vd = $vd_wb" in
993def VMV_S_X : RVInstV2<0b010000, 0b00000, OPMVX, (outs VR:$vd_wb),
994                      (ins VR:$vd, GPR:$rs1), "vmv.s.x", "$vd, $rs1">;
995
996}
997} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
998} // Predicates = [HasStdExtV]
999
1000let Predicates = [HasStdExtV, HasStdExtF] in {
1001let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1,
1002    RVVConstraint = NoConstraint  in {
1003// Floating-Point Scalar Move Instructions
1004def VFMV_F_S : RVInstV<0b010000, 0b00000, OPFVV, (outs FPR32:$vd),
1005                      (ins VR:$vs2), "vfmv.f.s", "$vd, $vs2">;
1006let Constraints = "$vd = $vd_wb" in
1007def VFMV_S_F : RVInstV2<0b010000, 0b00000, OPFVF, (outs VR:$vd_wb),
1008                      (ins VR:$vd, FPR32:$rs1), "vfmv.s.f", "$vd, $rs1">;
1009
1010} // hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1
1011} // Predicates = [HasStdExtV, HasStdExtF]
1012
1013let Predicates = [HasStdExtV] in {
1014// Vector Slide Instructions
1015let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
1016defm VSLIDEUP_V : VALU_IV_X_I<"vslideup", 0b001110, uimm5>;
1017defm VSLIDE1UP_V : VALU_MV_X<"vslide1up", 0b001110>;
1018} // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
1019defm VSLIDEDOWN_V : VALU_IV_X_I<"vslidedown", 0b001111, uimm5>;
1020defm VSLIDE1DOWN_V : VALU_MV_X<"vslide1down", 0b001111>;
1021} // Predicates = [HasStdExtV]
1022
1023let Predicates = [HasStdExtV, HasStdExtF] in {
1024let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
1025defm VFSLIDE1UP_V : VALU_FV_F<"vfslide1up", 0b001110>;
1026} // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
1027defm VFSLIDE1DOWN_V : VALU_FV_F<"vfslide1down", 0b001111>;
1028} // Predicates = [HasStdExtV, HasStdExtF]
1029
1030let Predicates = [HasStdExtV] in {
1031// Vector Register Gather Instruction
1032let Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather in {
1033defm VRGATHER_V : VALU_IV_V_X_I<"vrgather", 0b001100, uimm5>;
1034def VRGATHEREI16_VV : VALUVV<0b001110, OPIVV, "vrgatherei16.vv">;
1035} // Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather
1036
1037// Vector Compress Instruction
1038let Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress in {
1039defm VCOMPRESS_V : VALU_MV_Mask<"vcompress", 0b010111>;
1040} // Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress
1041
1042let hasSideEffects = 0, mayLoad = 0, mayStore = 0,
1043    RVVConstraint = NoConstraint in {
1044foreach nf = [1, 2, 4, 8] in {
1045  def VMV#nf#R_V  : RVInstV<0b100111, !add(nf, -1), OPIVI, (outs VR:$vd),
1046                            (ins VR:$vs2), "vmv" # nf # "r.v",
1047                            "$vd, $vs2"> {
1048    let Uses = [];
1049    let vm = 1;
1050  }
1051}
1052} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1053} // Predicates = [HasStdExtV]
1054
1055let Predicates = [HasStdExtZvlsseg] in {
1056  foreach nf=2-8 in {
1057    def VLSEG#nf#E8_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth8, "vlseg"#nf#"e8.v">;
1058    def VLSEG#nf#E16_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth16, "vlseg"#nf#"e16.v">;
1059    def VLSEG#nf#E32_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth32, "vlseg"#nf#"e32.v">;
1060    def VLSEG#nf#E64_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth64, "vlseg"#nf#"e64.v">;
1061    def VLSEG#nf#E128_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth128, "vlseg"#nf#"e128.v">;
1062    def VLSEG#nf#E256_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth256, "vlseg"#nf#"e256.v">;
1063    def VLSEG#nf#E512_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth512, "vlseg"#nf#"e512.v">;
1064    def VLSEG#nf#E1024_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth1024, "vlseg"#nf#"e1024.v">;
1065
1066    def VLSEG#nf#E8FF_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStrideFF, LSWidth8, "vlseg"#nf#"e8ff.v">;
1067    def VLSEG#nf#E16FF_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStrideFF, LSWidth16, "vlseg"#nf#"e16ff.v">;
1068    def VLSEG#nf#E32FF_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStrideFF, LSWidth32, "vlseg"#nf#"e32ff.v">;
1069    def VLSEG#nf#E64FF_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStrideFF, LSWidth64, "vlseg"#nf#"e64ff.v">;
1070    def VLSEG#nf#E128FF_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStrideFF, LSWidth128, "vlseg"#nf#"e128ff.v">;
1071    def VLSEG#nf#E256FF_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStrideFF, LSWidth256, "vlseg"#nf#"e256ff.v">;
1072    def VLSEG#nf#E512FF_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStrideFF, LSWidth512, "vlseg"#nf#"e512ff.v">;
1073    def VLSEG#nf#E1024FF_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStrideFF, LSWidth1024, "vlseg"#nf#"e1024ff.v">;
1074
1075    def VSSEG#nf#E8_V : VUnitStrideSegmentStore<!add(nf, -1), LSWidth8, "vsseg"#nf#"e8.v">;
1076    def VSSEG#nf#E16_V : VUnitStrideSegmentStore<!add(nf, -1), LSWidth16, "vsseg"#nf#"e16.v">;
1077    def VSSEG#nf#E32_V : VUnitStrideSegmentStore<!add(nf, -1), LSWidth32, "vsseg"#nf#"e32.v">;
1078    def VSSEG#nf#E64_V : VUnitStrideSegmentStore<!add(nf, -1), LSWidth64, "vsseg"#nf#"e64.v">;
1079    def VSSEG#nf#E128_V : VUnitStrideSegmentStore<!add(nf, -1), LSWidth128, "vsseg"#nf#"e128.v">;
1080    def VSSEG#nf#E256_V : VUnitStrideSegmentStore<!add(nf, -1), LSWidth256, "vsseg"#nf#"e256.v">;
1081    def VSSEG#nf#E512_V : VUnitStrideSegmentStore<!add(nf, -1), LSWidth512, "vsseg"#nf#"e512.v">;
1082    def VSSEG#nf#E1024_V : VUnitStrideSegmentStore<!add(nf, -1), LSWidth1024, "vsseg"#nf#"e1024.v">;
1083
1084    // Vector Strided Instructions
1085    def VLSSEG#nf#E8_V : VStridedSegmentLoad<!add(nf, -1), LSWidth8, "vlsseg"#nf#"e8.v">;
1086    def VLSSEG#nf#E16_V : VStridedSegmentLoad<!add(nf, -1), LSWidth16, "vlsseg"#nf#"e16.v">;
1087    def VLSSEG#nf#E32_V : VStridedSegmentLoad<!add(nf, -1), LSWidth32, "vlsseg"#nf#"e32.v">;
1088    def VLSSEG#nf#E64_V : VStridedSegmentLoad<!add(nf, -1), LSWidth64, "vlsseg"#nf#"e64.v">;
1089    def VLSSEG#nf#E128_V : VStridedSegmentLoad<!add(nf, -1), LSWidth128, "vlsseg"#nf#"e128.v">;
1090    def VLSSEG#nf#E256_V : VStridedSegmentLoad<!add(nf, -1), LSWidth256, "vlsseg"#nf#"e256.v">;
1091    def VLSSEG#nf#E512_V : VStridedSegmentLoad<!add(nf, -1), LSWidth512, "vlsseg"#nf#"e512.v">;
1092    def VLSSEG#nf#E1024_V : VStridedSegmentLoad<!add(nf, -1), LSWidth1024, "vlsseg"#nf#"e1024.v">;
1093
1094    def VSSSEG#nf#E8_V : VStridedSegmentStore<!add(nf, -1), LSWidth8, "vssseg"#nf#"e8.v">;
1095    def VSSSEG#nf#E16_V : VStridedSegmentStore<!add(nf, -1), LSWidth16, "vssseg"#nf#"e16.v">;
1096    def VSSSEG#nf#E32_V : VStridedSegmentStore<!add(nf, -1), LSWidth32, "vssseg"#nf#"e32.v">;
1097    def VSSSEG#nf#E64_V : VStridedSegmentStore<!add(nf, -1), LSWidth64, "vssseg"#nf#"e64.v">;
1098    def VSSSEG#nf#E128_V : VStridedSegmentStore<!add(nf, -1), LSWidth128, "vssseg"#nf#"e128.v">;
1099    def VSSSEG#nf#E256_V : VStridedSegmentStore<!add(nf, -1), LSWidth256, "vssseg"#nf#"e256.v">;
1100    def VSSSEG#nf#E512_V : VStridedSegmentStore<!add(nf, -1), LSWidth512, "vssseg"#nf#"e512.v">;
1101    def VSSSEG#nf#E1024_V : VStridedSegmentStore<!add(nf, -1), LSWidth1024, "vssseg"#nf#"e1024.v">;
1102
1103    // Vector Indexed Instructions
1104    def VLUXSEG#nf#EI8_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
1105                             LSWidth8, "vluxseg"#nf#"ei8.v">;
1106    def VLUXSEG#nf#EI16_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
1107                              LSWidth16, "vluxseg"#nf#"ei16.v">;
1108    def VLUXSEG#nf#EI32_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
1109                              LSWidth32, "vluxseg"#nf#"ei32.v">;
1110    def VLUXSEG#nf#EI64_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
1111                              LSWidth64, "vluxseg"#nf#"ei64.v">;
1112    def VLUXSEG#nf#EI128_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
1113                               LSWidth128, "vluxseg"#nf#"ei128.v">;
1114    def VLUXSEG#nf#EI256_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
1115                               LSWidth256, "vluxseg"#nf#"ei256.v">;
1116    def VLUXSEG#nf#EI512_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
1117                               LSWidth512, "vluxseg"#nf#"ei512.v">;
1118    def VLUXSEG#nf#EI1024_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
1119                               LSWidth1024, "vluxseg"#nf#"ei1024.v">;
1120
1121    def VLOXSEG#nf#EI8_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
1122                             LSWidth8, "vloxseg"#nf#"ei8.v">;
1123    def VLOXSEG#nf#EI16_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
1124                              LSWidth16, "vloxseg"#nf#"ei16.v">;
1125    def VLOXSEG#nf#EI32_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
1126                              LSWidth32, "vloxseg"#nf#"ei32.v">;
1127    def VLOXSEG#nf#EI64_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
1128                              LSWidth64, "vloxseg"#nf#"ei64.v">;
1129    def VLOXSEG#nf#EI128_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
1130                               LSWidth128, "vloxseg"#nf#"ei128.v">;
1131    def VLOXSEG#nf#EI256_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
1132                               LSWidth256, "vloxseg"#nf#"ei256.v">;
1133    def VLOXSEG#nf#EI512_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
1134                               LSWidth512, "vloxseg"#nf#"ei512.v">;
1135    def VLOXSEG#nf#EI1024_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
1136                               LSWidth1024, "vloxseg"#nf#"ei1024.v">;
1137
1138    def VSUXSEG#nf#EI8_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
1139                             LSWidth8, "vsuxseg"#nf#"ei8.v">;
1140    def VSUXSEG#nf#EI16_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
1141                              LSWidth16, "vsuxseg"#nf#"ei16.v">;
1142    def VSUXSEG#nf#EI32_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
1143                              LSWidth32, "vsuxseg"#nf#"ei32.v">;
1144    def VSUXSEG#nf#EI64_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
1145                              LSWidth64, "vsuxseg"#nf#"ei64.v">;
1146    def VSUXSEG#nf#EI128_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
1147                               LSWidth128, "vsuxseg"#nf#"ei128.v">;
1148    def VSUXSEG#nf#EI256_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
1149                               LSWidth256, "vsuxseg"#nf#"ei256.v">;
1150    def VSUXSEG#nf#EI512_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
1151                               LSWidth512, "vsuxseg"#nf#"ei512.v">;
1152    def VSUXSEG#nf#EI1024_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
1153                                LSWidth1024, "vsuxseg"#nf#"ei1024.v">;
1154
1155    def VSOXSEG#nf#EI8_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
1156                             LSWidth8, "vsoxseg"#nf#"ei8.v">;
1157    def VSOXSEG#nf#EI16_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
1158                              LSWidth16, "vsoxseg"#nf#"ei16.v">;
1159    def VSOXSEG#nf#EI32_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
1160                              LSWidth32, "vsoxseg"#nf#"ei32.v">;
1161    def VSOXSEG#nf#EI64_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
1162                              LSWidth64, "vsoxseg"#nf#"ei64.v">;
1163    def VSOXSEG#nf#EI128_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
1164                               LSWidth128, "vsoxseg"#nf#"ei128.v">;
1165    def VSOXSEG#nf#EI256_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
1166                               LSWidth256, "vsoxseg"#nf#"ei256.v">;
1167    def VSOXSEG#nf#EI512_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
1168                               LSWidth512, "vsoxseg"#nf#"ei512.v">;
1169    def VSOXSEG#nf#EI1024_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
1170                                LSWidth1024, "vsoxseg"#nf#"ei1024.v">;
1171  }
1172} // Predicates = [HasStdExtZvlsseg]
1173
1174let Predicates = [HasStdExtZvamo, HasStdExtA] in {
1175  defm VAMOSWAPEI8 : VAMO<AMOOPVamoSwap, LSWidth8, "vamoswapei8.v">;
1176  defm VAMOSWAPEI16 : VAMO<AMOOPVamoSwap, LSWidth16, "vamoswapei16.v">;
1177  defm VAMOSWAPEI32 : VAMO<AMOOPVamoSwap, LSWidth32, "vamoswapei32.v">;
1178
1179  defm VAMOADDEI8 : VAMO<AMOOPVamoAdd, LSWidth8, "vamoaddei8.v">;
1180  defm VAMOADDEI16 : VAMO<AMOOPVamoAdd, LSWidth16, "vamoaddei16.v">;
1181  defm VAMOADDEI32 : VAMO<AMOOPVamoAdd, LSWidth32, "vamoaddei32.v">;
1182
1183  defm VAMOXOREI8 : VAMO<AMOOPVamoXor, LSWidth8, "vamoxorei8.v">;
1184  defm VAMOXOREI16 : VAMO<AMOOPVamoXor, LSWidth16, "vamoxorei16.v">;
1185  defm VAMOXOREI32 : VAMO<AMOOPVamoXor, LSWidth32, "vamoxorei32.v">;
1186
1187  defm VAMOANDEI8 : VAMO<AMOOPVamoAnd, LSWidth8, "vamoandei8.v">;
1188  defm VAMOANDEI16 : VAMO<AMOOPVamoAnd, LSWidth16, "vamoandei16.v">;
1189  defm VAMOANDEI32 : VAMO<AMOOPVamoAnd, LSWidth32, "vamoandei32.v">;
1190
1191  defm VAMOOREI8 : VAMO<AMOOPVamoOr, LSWidth8, "vamoorei8.v">;
1192  defm VAMOOREI16 : VAMO<AMOOPVamoOr, LSWidth16, "vamoorei16.v">;
1193  defm VAMOOREI32 : VAMO<AMOOPVamoOr, LSWidth32, "vamoorei32.v">;
1194
1195  defm VAMOMINEI8 : VAMO<AMOOPVamoMin, LSWidth8, "vamominei8.v">;
1196  defm VAMOMINEI16 : VAMO<AMOOPVamoMin, LSWidth16, "vamominei16.v">;
1197  defm VAMOMINEI32 : VAMO<AMOOPVamoMin, LSWidth32, "vamominei32.v">;
1198
1199  defm VAMOMAXEI8 : VAMO<AMOOPVamoMax, LSWidth8, "vamomaxei8.v">;
1200  defm VAMOMAXEI16 : VAMO<AMOOPVamoMax, LSWidth16, "vamomaxei16.v">;
1201  defm VAMOMAXEI32 : VAMO<AMOOPVamoMax, LSWidth32, "vamomaxei32.v">;
1202
1203  defm VAMOMINUEI8 : VAMO<AMOOPVamoMinu, LSWidth8, "vamominuei8.v">;
1204  defm VAMOMINUEI16 : VAMO<AMOOPVamoMinu, LSWidth16, "vamominuei16.v">;
1205  defm VAMOMINUEI32 : VAMO<AMOOPVamoMinu, LSWidth32, "vamominuei32.v">;
1206
1207  defm VAMOMAXUEI8 : VAMO<AMOOPVamoMaxu, LSWidth8, "vamomaxuei8.v">;
1208  defm VAMOMAXUEI16 : VAMO<AMOOPVamoMaxu, LSWidth16, "vamomaxuei16.v">;
1209  defm VAMOMAXUEI32 : VAMO<AMOOPVamoMaxu, LSWidth32, "vamomaxuei32.v">;
1210} // Predicates = [HasStdExtZvamo, HasStdExtA]
1211
1212let Predicates = [HasStdExtZvamo, HasStdExtA, IsRV64] in {
1213  defm VAMOSWAPEI64 : VAMO<AMOOPVamoSwap, LSWidth64, "vamoswapei64.v">;
1214  defm VAMOADDEI64 : VAMO<AMOOPVamoAdd, LSWidth64, "vamoaddei64.v">;
1215  defm VAMOXOREI64 : VAMO<AMOOPVamoXor, LSWidth64, "vamoxorei64.v">;
1216  defm VAMOANDEI64 : VAMO<AMOOPVamoAnd, LSWidth64, "vamoandei64.v">;
1217  defm VAMOOREI64 : VAMO<AMOOPVamoOr, LSWidth64, "vamoorei64.v">;
1218  defm VAMOMINEI64 : VAMO<AMOOPVamoMin, LSWidth64, "vamominei64.v">;
1219  defm VAMOMAXEI64 : VAMO<AMOOPVamoMax, LSWidth64, "vamomaxei64.v">;
1220  defm VAMOMINUEI64 : VAMO<AMOOPVamoMinu, LSWidth64, "vamominuei64.v">;
1221  defm VAMOMAXUEI64 : VAMO<AMOOPVamoMaxu, LSWidth64, "vamomaxuei64.v">;
1222} // Predicates = [HasStdExtZvamo, HasStdExtA, IsRV64]
1223
1224include "RISCVInstrInfoVPseudos.td"
1225