1//===-- RISCVRegisterInfo.td - RISC-V Register defs --------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9//===----------------------------------------------------------------------===//
10//  Declarations that describe the RISC-V register files
11//===----------------------------------------------------------------------===//
12
13let Namespace = "RISCV" in {
14class RISCVReg<bits<5> Enc, string n, list<string> alt = []> : Register<n> {
15  let HWEncoding{4-0} = Enc;
16  let AltNames = alt;
17}
18
19class RISCVReg16<bits<5> Enc, string n, list<string> alt = []> : Register<n> {
20  let HWEncoding{4-0} = Enc;
21  let AltNames = alt;
22}
23
24def sub_16 : SubRegIndex<16>;
25class RISCVReg32<RISCVReg16 subreg> : Register<""> {
26  let HWEncoding{4-0} = subreg.HWEncoding{4-0};
27  let SubRegs = [subreg];
28  let SubRegIndices = [sub_16];
29  let AsmName = subreg.AsmName;
30  let AltNames = subreg.AltNames;
31}
32
33// Because RISCVReg64 register have AsmName and AltNames that alias with their
34// 16/32-bit sub-register, RISCVAsmParser will need to coerce a register number
35// from a RISCVReg16/RISCVReg32 to the equivalent RISCVReg64 when appropriate.
36def sub_32 : SubRegIndex<32>;
37class RISCVReg64<RISCVReg32 subreg> : Register<""> {
38  let HWEncoding{4-0} = subreg.HWEncoding{4-0};
39  let SubRegs = [subreg];
40  let SubRegIndices = [sub_32];
41  let AsmName = subreg.AsmName;
42  let AltNames = subreg.AltNames;
43}
44
45class RISCVRegWithSubRegs<bits<5> Enc, string n, list<Register> subregs,
46                          list<string> alt = []>
47      : RegisterWithSubRegs<n, subregs> {
48  let HWEncoding{4-0} = Enc;
49  let AltNames = alt;
50}
51
52def ABIRegAltName : RegAltNameIndex;
53
54def sub_vrm4_0 : SubRegIndex<256>;
55def sub_vrm4_1 : SubRegIndex<256, 256>;
56def sub_vrm2_0 : SubRegIndex<128>;
57def sub_vrm2_1 : SubRegIndex<128, 128>;
58def sub_vrm2_2 : ComposedSubRegIndex<sub_vrm4_1, sub_vrm2_0>;
59def sub_vrm2_3 : ComposedSubRegIndex<sub_vrm4_1, sub_vrm2_1>;
60def sub_vrm1_0 : SubRegIndex<64>;
61def sub_vrm1_1 : SubRegIndex<64, 64>;
62def sub_vrm1_2 : ComposedSubRegIndex<sub_vrm2_1, sub_vrm1_0>;
63def sub_vrm1_3 : ComposedSubRegIndex<sub_vrm2_1, sub_vrm1_1>;
64def sub_vrm1_4 : ComposedSubRegIndex<sub_vrm2_2, sub_vrm1_0>;
65def sub_vrm1_5 : ComposedSubRegIndex<sub_vrm2_2, sub_vrm1_1>;
66def sub_vrm1_6 : ComposedSubRegIndex<sub_vrm2_3, sub_vrm1_0>;
67def sub_vrm1_7 : ComposedSubRegIndex<sub_vrm2_3, sub_vrm1_1>;
68
69} // Namespace = "RISCV"
70
71// Integer registers
72// CostPerUse is set higher for registers that may not be compressible as they
73// are not part of GPRC, the most restrictive register class used by the
74// compressed instruction set. This will influence the greedy register
75// allocator to reduce the use of registers that can't be encoded in 16 bit
76// instructions. This affects register allocation even when compressed
77// instruction isn't targeted, we see no major negative codegen impact.
78
79let RegAltNameIndices = [ABIRegAltName] in {
80  def X0  : RISCVReg<0, "x0", ["zero"]>, DwarfRegNum<[0]>;
81  let CostPerUse = [1] in {
82  def X1  : RISCVReg<1, "x1", ["ra"]>, DwarfRegNum<[1]>;
83  def X2  : RISCVReg<2, "x2", ["sp"]>, DwarfRegNum<[2]>;
84  def X3  : RISCVReg<3, "x3", ["gp"]>, DwarfRegNum<[3]>;
85  def X4  : RISCVReg<4, "x4", ["tp"]>, DwarfRegNum<[4]>;
86  def X5  : RISCVReg<5, "x5", ["t0"]>, DwarfRegNum<[5]>;
87  def X6  : RISCVReg<6, "x6", ["t1"]>, DwarfRegNum<[6]>;
88  def X7  : RISCVReg<7, "x7", ["t2"]>, DwarfRegNum<[7]>;
89  }
90  def X8  : RISCVReg<8, "x8", ["s0", "fp"]>, DwarfRegNum<[8]>;
91  def X9  : RISCVReg<9, "x9", ["s1"]>, DwarfRegNum<[9]>;
92  def X10 : RISCVReg<10,"x10", ["a0"]>, DwarfRegNum<[10]>;
93  def X11 : RISCVReg<11,"x11", ["a1"]>, DwarfRegNum<[11]>;
94  def X12 : RISCVReg<12,"x12", ["a2"]>, DwarfRegNum<[12]>;
95  def X13 : RISCVReg<13,"x13", ["a3"]>, DwarfRegNum<[13]>;
96  def X14 : RISCVReg<14,"x14", ["a4"]>, DwarfRegNum<[14]>;
97  def X15 : RISCVReg<15,"x15", ["a5"]>, DwarfRegNum<[15]>;
98  let CostPerUse = [1] in {
99  def X16 : RISCVReg<16,"x16", ["a6"]>, DwarfRegNum<[16]>;
100  def X17 : RISCVReg<17,"x17", ["a7"]>, DwarfRegNum<[17]>;
101  def X18 : RISCVReg<18,"x18", ["s2"]>, DwarfRegNum<[18]>;
102  def X19 : RISCVReg<19,"x19", ["s3"]>, DwarfRegNum<[19]>;
103  def X20 : RISCVReg<20,"x20", ["s4"]>, DwarfRegNum<[20]>;
104  def X21 : RISCVReg<21,"x21", ["s5"]>, DwarfRegNum<[21]>;
105  def X22 : RISCVReg<22,"x22", ["s6"]>, DwarfRegNum<[22]>;
106  def X23 : RISCVReg<23,"x23", ["s7"]>, DwarfRegNum<[23]>;
107  def X24 : RISCVReg<24,"x24", ["s8"]>, DwarfRegNum<[24]>;
108  def X25 : RISCVReg<25,"x25", ["s9"]>, DwarfRegNum<[25]>;
109  def X26 : RISCVReg<26,"x26", ["s10"]>, DwarfRegNum<[26]>;
110  def X27 : RISCVReg<27,"x27", ["s11"]>, DwarfRegNum<[27]>;
111  def X28 : RISCVReg<28,"x28", ["t3"]>, DwarfRegNum<[28]>;
112  def X29 : RISCVReg<29,"x29", ["t4"]>, DwarfRegNum<[29]>;
113  def X30 : RISCVReg<30,"x30", ["t5"]>, DwarfRegNum<[30]>;
114  def X31 : RISCVReg<31,"x31", ["t6"]>, DwarfRegNum<[31]>;
115  }
116}
117
118def XLenVT : ValueTypeByHwMode<[RV32, RV64],
119                               [i32,  i64]>;
120def XLenRI : RegInfoByHwMode<
121      [RV32,              RV64],
122      [RegInfo<32,32,32>, RegInfo<64,64,64>]>;
123
124// The order of registers represents the preferred allocation sequence.
125// Registers are listed in the order caller-save, callee-save, specials.
126def GPR : RegisterClass<"RISCV", [XLenVT], 32, (add
127    (sequence "X%u", 10, 17),
128    (sequence "X%u", 5, 7),
129    (sequence "X%u", 28, 31),
130    (sequence "X%u", 8, 9),
131    (sequence "X%u", 18, 27),
132    (sequence "X%u", 0, 4)
133  )> {
134  let RegInfos = XLenRI;
135}
136
137def GPRX0 : RegisterClass<"RISCV", [XLenVT], 32, (add X0)> {
138  let RegInfos = XLenRI;
139}
140
141// The order of registers represents the preferred allocation sequence.
142// Registers are listed in the order caller-save, callee-save, specials.
143def GPRNoX0 : RegisterClass<"RISCV", [XLenVT], 32, (add
144    (sequence "X%u", 10, 17),
145    (sequence "X%u", 5, 7),
146    (sequence "X%u", 28, 31),
147    (sequence "X%u", 8, 9),
148    (sequence "X%u", 18, 27),
149    (sequence "X%u", 1, 4)
150  )> {
151  let RegInfos = XLenRI;
152}
153
154def GPRNoX0X2 : RegisterClass<"RISCV", [XLenVT], 32, (add
155    (sequence "X%u", 10, 17),
156    (sequence "X%u", 5, 7),
157    (sequence "X%u", 28, 31),
158    (sequence "X%u", 8, 9),
159    (sequence "X%u", 18, 27),
160    X1, X3, X4
161  )> {
162  let RegInfos = XLenRI;
163}
164
165// Don't use X1 or X5 for JALR since that is a hint to pop the return address
166// stack on some microarchitectures. Also remove the reserved registers X0, X2,
167// X3, and X4 as it reduces the number of register classes that get synthesized
168// by tablegen.
169def GPRJALR : RegisterClass<"RISCV", [XLenVT], 32, (add
170    (sequence "X%u", 10, 17),
171    (sequence "X%u", 6, 7),
172    (sequence "X%u", 28, 31),
173    (sequence "X%u", 8, 9),
174    (sequence "X%u", 18, 27)
175  )> {
176  let RegInfos = XLenRI;
177}
178
179def GPRC : RegisterClass<"RISCV", [XLenVT], 32, (add
180    (sequence "X%u", 10, 15),
181    (sequence "X%u", 8, 9)
182  )> {
183  let RegInfos = XLenRI;
184}
185
186// For indirect tail calls, we can't use callee-saved registers, as they are
187// restored to the saved value before the tail call, which would clobber a call
188// address. We shouldn't use x5 since that is a hint for to pop the return
189// address stack on some microarchitectures.
190def GPRTC : RegisterClass<"RISCV", [XLenVT], 32, (add
191    (sequence "X%u", 6, 7),
192    (sequence "X%u", 10, 17),
193    (sequence "X%u", 28, 31)
194  )> {
195  let RegInfos = XLenRI;
196}
197
198def SP : RegisterClass<"RISCV", [XLenVT], 32, (add X2)> {
199  let RegInfos = XLenRI;
200}
201
202// Floating point registers
203let RegAltNameIndices = [ABIRegAltName] in {
204  def F0_H  : RISCVReg16<0, "f0", ["ft0"]>, DwarfRegNum<[32]>;
205  def F1_H  : RISCVReg16<1, "f1", ["ft1"]>, DwarfRegNum<[33]>;
206  def F2_H  : RISCVReg16<2, "f2", ["ft2"]>, DwarfRegNum<[34]>;
207  def F3_H  : RISCVReg16<3, "f3", ["ft3"]>, DwarfRegNum<[35]>;
208  def F4_H  : RISCVReg16<4, "f4", ["ft4"]>, DwarfRegNum<[36]>;
209  def F5_H  : RISCVReg16<5, "f5", ["ft5"]>, DwarfRegNum<[37]>;
210  def F6_H  : RISCVReg16<6, "f6", ["ft6"]>, DwarfRegNum<[38]>;
211  def F7_H  : RISCVReg16<7, "f7", ["ft7"]>, DwarfRegNum<[39]>;
212  def F8_H  : RISCVReg16<8, "f8", ["fs0"]>, DwarfRegNum<[40]>;
213  def F9_H  : RISCVReg16<9, "f9", ["fs1"]>, DwarfRegNum<[41]>;
214  def F10_H : RISCVReg16<10,"f10", ["fa0"]>, DwarfRegNum<[42]>;
215  def F11_H : RISCVReg16<11,"f11", ["fa1"]>, DwarfRegNum<[43]>;
216  def F12_H : RISCVReg16<12,"f12", ["fa2"]>, DwarfRegNum<[44]>;
217  def F13_H : RISCVReg16<13,"f13", ["fa3"]>, DwarfRegNum<[45]>;
218  def F14_H : RISCVReg16<14,"f14", ["fa4"]>, DwarfRegNum<[46]>;
219  def F15_H : RISCVReg16<15,"f15", ["fa5"]>, DwarfRegNum<[47]>;
220  def F16_H : RISCVReg16<16,"f16", ["fa6"]>, DwarfRegNum<[48]>;
221  def F17_H : RISCVReg16<17,"f17", ["fa7"]>, DwarfRegNum<[49]>;
222  def F18_H : RISCVReg16<18,"f18", ["fs2"]>, DwarfRegNum<[50]>;
223  def F19_H : RISCVReg16<19,"f19", ["fs3"]>, DwarfRegNum<[51]>;
224  def F20_H : RISCVReg16<20,"f20", ["fs4"]>, DwarfRegNum<[52]>;
225  def F21_H : RISCVReg16<21,"f21", ["fs5"]>, DwarfRegNum<[53]>;
226  def F22_H : RISCVReg16<22,"f22", ["fs6"]>, DwarfRegNum<[54]>;
227  def F23_H : RISCVReg16<23,"f23", ["fs7"]>, DwarfRegNum<[55]>;
228  def F24_H : RISCVReg16<24,"f24", ["fs8"]>, DwarfRegNum<[56]>;
229  def F25_H : RISCVReg16<25,"f25", ["fs9"]>, DwarfRegNum<[57]>;
230  def F26_H : RISCVReg16<26,"f26", ["fs10"]>, DwarfRegNum<[58]>;
231  def F27_H : RISCVReg16<27,"f27", ["fs11"]>, DwarfRegNum<[59]>;
232  def F28_H : RISCVReg16<28,"f28", ["ft8"]>, DwarfRegNum<[60]>;
233  def F29_H : RISCVReg16<29,"f29", ["ft9"]>, DwarfRegNum<[61]>;
234  def F30_H : RISCVReg16<30,"f30", ["ft10"]>, DwarfRegNum<[62]>;
235  def F31_H : RISCVReg16<31,"f31", ["ft11"]>, DwarfRegNum<[63]>;
236
237  foreach Index = 0-31 in {
238    def F#Index#_F : RISCVReg32<!cast<RISCVReg16>("F"#Index#"_H")>,
239      DwarfRegNum<[!add(Index, 32)]>;
240  }
241
242  foreach Index = 0-31 in {
243    def F#Index#_D : RISCVReg64<!cast<RISCVReg32>("F"#Index#"_F")>,
244      DwarfRegNum<[!add(Index, 32)]>;
245  }
246}
247
248// The order of registers represents the preferred allocation sequence,
249// meaning caller-save regs are listed before callee-save.
250def FPR16 : RegisterClass<"RISCV", [f16], 16, (add
251    (sequence "F%u_H", 0, 7),
252    (sequence "F%u_H", 10, 17),
253    (sequence "F%u_H", 28, 31),
254    (sequence "F%u_H", 8, 9),
255    (sequence "F%u_H", 18, 27)
256)>;
257
258def FPR32 : RegisterClass<"RISCV", [f32], 32, (add
259    (sequence "F%u_F", 0, 7),
260    (sequence "F%u_F", 10, 17),
261    (sequence "F%u_F", 28, 31),
262    (sequence "F%u_F", 8, 9),
263    (sequence "F%u_F", 18, 27)
264)>;
265
266def FPR32C : RegisterClass<"RISCV", [f32], 32, (add
267  (sequence "F%u_F", 10, 15),
268  (sequence "F%u_F", 8, 9)
269)>;
270
271// The order of registers represents the preferred allocation sequence,
272// meaning caller-save regs are listed before callee-save.
273def FPR64 : RegisterClass<"RISCV", [f64], 64, (add
274    (sequence "F%u_D", 0, 7),
275    (sequence "F%u_D", 10, 17),
276    (sequence "F%u_D", 28, 31),
277    (sequence "F%u_D", 8, 9),
278    (sequence "F%u_D", 18, 27)
279)>;
280
281def FPR64C : RegisterClass<"RISCV", [f64], 64, (add
282  (sequence "F%u_D", 10, 15),
283  (sequence "F%u_D", 8, 9)
284)>;
285
286// Vector type mapping to LLVM types.
287//
288// The V vector extension requires that VLEN >= 128 and <= 65536.
289// Additionally, the only supported ELEN values are 32 and 64,
290// thus `vscale` can be defined as VLEN/64,
291// allowing the same types with either ELEN value.
292//
293//         MF8    MF4     MF2     M1      M2      M4       M8
294// i64*    N/A    N/A     N/A     nxv1i64 nxv2i64 nxv4i64  nxv8i64
295// i32     N/A    N/A     nxv1i32 nxv2i32 nxv4i32 nxv8i32  nxv16i32
296// i16     N/A    nxv1i16 nxv2i16 nxv4i16 nxv8i16 nxv16i16 nxv32i16
297// i8      nxv1i8 nxv2i8  nxv4i8  nxv8i8  nxv16i8 nxv32i8  nxv64i8
298// double* N/A    N/A     N/A     nxv1f64 nxv2f64 nxv4f64  nxv8f64
299// float   N/A    N/A     nxv1f32 nxv2f32 nxv4f32 nxv8f32  nxv16f32
300// half    N/A    nxv1f16 nxv2f16 nxv4f16 nxv8f16 nxv16f16 nxv32f16
301// * ELEN=64
302
303defvar vint8mf8_t = nxv1i8;
304defvar vint8mf4_t = nxv2i8;
305defvar vint8mf2_t = nxv4i8;
306defvar vint8m1_t = nxv8i8;
307defvar vint8m2_t = nxv16i8;
308defvar vint8m4_t = nxv32i8;
309defvar vint8m8_t = nxv64i8;
310
311defvar vint16mf4_t = nxv1i16;
312defvar vint16mf2_t = nxv2i16;
313defvar vint16m1_t  = nxv4i16;
314defvar vint16m2_t  = nxv8i16;
315defvar vint16m4_t  = nxv16i16;
316defvar vint16m8_t  = nxv32i16;
317
318defvar vint32mf2_t = nxv1i32;
319defvar vint32m1_t  = nxv2i32;
320defvar vint32m2_t  = nxv4i32;
321defvar vint32m4_t  = nxv8i32;
322defvar vint32m8_t  = nxv16i32;
323
324defvar vint64m1_t = nxv1i64;
325defvar vint64m2_t = nxv2i64;
326defvar vint64m4_t = nxv4i64;
327defvar vint64m8_t = nxv8i64;
328
329defvar vfloat16mf4_t = nxv1f16;
330defvar vfloat16mf2_t = nxv2f16;
331defvar vfloat16m1_t  = nxv4f16;
332defvar vfloat16m2_t  = nxv8f16;
333defvar vfloat16m4_t  = nxv16f16;
334defvar vfloat16m8_t  = nxv32f16;
335
336defvar vfloat32mf2_t = nxv1f32;
337defvar vfloat32m1_t  = nxv2f32;
338defvar vfloat32m2_t  = nxv4f32;
339defvar vfloat32m4_t  = nxv8f32;
340defvar vfloat32m8_t  = nxv16f32;
341
342defvar vfloat64m1_t = nxv1f64;
343defvar vfloat64m2_t = nxv2f64;
344defvar vfloat64m4_t = nxv4f64;
345defvar vfloat64m8_t = nxv8f64;
346
347defvar vbool1_t  = nxv64i1;
348defvar vbool2_t  = nxv32i1;
349defvar vbool4_t  = nxv16i1;
350defvar vbool8_t  = nxv8i1;
351defvar vbool16_t = nxv4i1;
352defvar vbool32_t = nxv2i1;
353defvar vbool64_t = nxv1i1;
354
355// There is no need to define register classes for fractional LMUL.
356def LMULList {
357  list<int> m = [1, 2, 4, 8];
358}
359
360//===----------------------------------------------------------------------===//
361// Utility classes for segment load/store.
362//===----------------------------------------------------------------------===//
363// The set of legal NF for LMUL = lmul.
364// LMUL == 1, NF = 2, 3, 4, 5, 6, 7, 8
365// LMUL == 2, NF = 2, 3, 4
366// LMUL == 4, NF = 2
367class NFList<int lmul> {
368  list<int> L = !cond(!eq(lmul, 1): [2, 3, 4, 5, 6, 7, 8],
369                      !eq(lmul, 2): [2, 3, 4],
370                      !eq(lmul, 4): [2],
371                      !eq(lmul, 8): []);
372}
373
374// Generate [start, end) SubRegIndex list.
375class SubRegSet<list<SubRegIndex> LIn, int start, int nf, int lmul> {
376  list<SubRegIndex> L = !foldl([]<SubRegIndex>,
377                               [0, 1, 2, 3, 4, 5, 6, 7],
378                               AccList, i,
379                               !listconcat(AccList,
380                                 !if(!lt(i, nf),
381                                   [!cast<SubRegIndex>("sub_vrm" # lmul # "_" # i)],
382                                   [])));
383}
384
385class IndexSet<int index, int nf, int lmul> {
386  list<int> R =
387    !foldl([]<int>,
388              [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
389               13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
390               23, 24, 25, 26, 27, 28, 29, 30, 31],
391              L, i,
392              !listconcat(L,
393                          !if(!and(
394                                !le(!mul(index, lmul), !mul(i, lmul)),
395                                !le(!mul(i, lmul),
396                                    !sub(!add(32, !mul(index, lmul)), !mul(nf, lmul)))
397                              ), [!mul(i, lmul)], [])));
398}
399
400class VRegList<list<dag> LIn, int start, int nf, int lmul, bit NoV0> {
401  list<dag> L =
402    !if(!ge(start, nf),
403        LIn,
404        !listconcat(
405          [!dag(add,
406                !foreach(i,
407                  !if(NoV0,
408                    !tail(IndexSet<start, nf, lmul>.R),
409                    [!head(IndexSet<start, nf, lmul>.R)]),
410                  !cast<Register>("V" # i # !cond(!eq(lmul, 2): "M2",
411                                                  !eq(lmul, 4): "M4",
412                                                  true: ""))),
413                !listsplat("",
414                  !if(NoV0,
415                    !size(!tail(IndexSet<start, nf, lmul>.R)),
416                    !size([!head(IndexSet<start, nf, lmul>.R)]))))],
417          VRegList<LIn, !add(start, 1), nf, lmul, NoV0>.L));
418}
419
420// Vector registers
421let RegAltNameIndices = [ABIRegAltName] in {
422  foreach Index = 0-31 in {
423    def V#Index : RISCVReg<Index, "v"#Index, ["v"#Index]>, DwarfRegNum<[!add(Index, 96)]>;
424  }
425
426  foreach Index = [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22,
427                   24, 26, 28, 30] in {
428    def V#Index#M2 : RISCVRegWithSubRegs<Index, "v"#Index,
429                       [!cast<Register>("V"#Index),
430                        !cast<Register>("V"#!add(Index, 1))],
431                       ["v"#Index]>,
432                     DwarfRegAlias<!cast<Register>("V"#Index)> {
433      let SubRegIndices = [sub_vrm1_0, sub_vrm1_1];
434    }
435  }
436
437  foreach Index = [0, 4, 8, 12, 16, 20, 24, 28] in {
438    def V#Index#M4 : RISCVRegWithSubRegs<Index, "v"#Index,
439                       [!cast<Register>("V"#Index#"M2"),
440                        !cast<Register>("V"#!add(Index, 2)#"M2")],
441                       ["v"#Index]>,
442                     DwarfRegAlias<!cast<Register>("V"#Index)> {
443      let SubRegIndices = [sub_vrm2_0, sub_vrm2_1];
444    }
445  }
446
447  foreach Index = [0, 8, 16, 24] in {
448    def V#Index#M8 : RISCVRegWithSubRegs<Index, "v"#Index,
449                       [!cast<Register>("V"#Index#"M4"),
450                        !cast<Register>("V"#!add(Index, 4)#"M4")],
451                       ["v"#Index]>,
452                     DwarfRegAlias<!cast<Register>("V"#Index)> {
453      let SubRegIndices = [sub_vrm4_0, sub_vrm4_1];
454    }
455  }
456
457  def VTYPE  : RISCVReg<0, "vtype", ["vtype"]>;
458  def VL     : RISCVReg<0, "vl", ["vl"]>;
459  def VXSAT  : RISCVReg<0, "vxsat", ["vxsat"]>;
460  def VXRM   : RISCVReg<0, "vxrm", ["vxrm"]>;
461}
462
463foreach m = [1, 2, 4] in {
464  foreach n = NFList<m>.L in {
465    def "VN" # n # "M" # m # "NoV0": RegisterTuples<
466                                       SubRegSet<[], 0, n, m>.L,
467                                       VRegList<[], 0, n, m, 1>.L>;
468    def "VN" # n # "M" # m # "V0" : RegisterTuples<
469                                       SubRegSet<[], 0, n, m>.L,
470                                       VRegList<[], 0, n, m, 0>.L>;
471  }
472}
473
474class VReg<list<ValueType> regTypes, dag regList, int Vlmul>
475  : RegisterClass<"RISCV",
476                  regTypes,
477                  64, // The maximum supported ELEN is 64.
478                  regList> {
479  int VLMul = Vlmul;
480  int Size = !mul(Vlmul, 64);
481}
482
483def VR : VReg<[vint8m1_t, vint16m1_t, vint32m1_t, vint64m1_t,
484               vfloat16m1_t, vfloat32m1_t, vfloat64m1_t,
485               vint8mf2_t, vint8mf4_t, vint8mf8_t,
486               vint16mf2_t, vint16mf4_t, vint32mf2_t,
487               vfloat16mf4_t, vfloat16mf2_t, vfloat32mf2_t,
488               vbool64_t, vbool32_t, vbool16_t, vbool8_t, vbool4_t,
489               vbool2_t, vbool1_t],
490           (add (sequence "V%u", 25, 31),
491                (sequence "V%u", 8, 24),
492                (sequence "V%u", 0, 7)), 1>;
493
494def VRNoV0 : VReg<[vint8m1_t, vint16m1_t, vint32m1_t, vint64m1_t,
495                   vfloat16m1_t, vfloat32m1_t, vfloat64m1_t,
496                   vint8mf2_t, vint8mf4_t, vint8mf8_t,
497                   vint16mf2_t, vint16mf4_t, vint32mf2_t,
498                   vfloat16mf4_t, vfloat16mf2_t, vfloat32mf2_t,
499                   vbool64_t, vbool32_t, vbool16_t, vbool8_t, vbool4_t,
500                   vbool2_t, vbool1_t],
501               (add (sequence "V%u", 25, 31),
502                    (sequence "V%u", 8, 24),
503                    (sequence "V%u", 1, 7)), 1>;
504
505def VRM2 : VReg<[vint8m2_t, vint16m2_t, vint32m2_t, vint64m2_t,
506                 vfloat16m2_t, vfloat32m2_t, vfloat64m2_t],
507             (add V26M2, V28M2, V30M2, V8M2, V10M2, V12M2, V14M2, V16M2,
508                  V18M2, V20M2, V22M2, V24M2, V0M2, V2M2, V4M2, V6M2), 2>;
509
510def VRM2NoV0 : VReg<[vint8m2_t, vint16m2_t, vint32m2_t, vint64m2_t,
511                     vfloat16m2_t, vfloat32m2_t, vfloat64m2_t],
512                 (add V26M2, V28M2, V30M2, V8M2, V10M2, V12M2, V14M2, V16M2,
513                      V18M2, V20M2, V22M2, V24M2, V2M2, V4M2, V6M2), 2>;
514
515def VRM4 : VReg<[vint8m4_t, vint16m4_t, vint32m4_t, vint64m4_t,
516                 vfloat16m4_t, vfloat32m4_t, vfloat64m4_t],
517             (add V28M4, V8M4, V12M4, V16M4, V20M4, V24M4, V0M4, V4M4), 4>;
518
519def VRM4NoV0 : VReg<[vint8m4_t, vint16m4_t, vint32m4_t, vint64m4_t,
520                     vfloat16m4_t, vfloat32m4_t, vfloat64m4_t],
521                 (add V28M4, V8M4, V12M4, V16M4, V20M4, V24M4, V4M4), 4>;
522
523def VRM8 : VReg<[vint8m8_t, vint16m8_t, vint32m8_t, vint64m8_t,
524                 vfloat16m8_t, vfloat32m8_t, vfloat64m8_t],
525             (add V8M8, V16M8, V24M8, V0M8), 8>;
526
527def VRM8NoV0 : VReg<[vint8m8_t, vint16m8_t, vint32m8_t, vint64m8_t,
528                     vfloat16m8_t, vfloat32m8_t, vfloat64m8_t],
529                 (add V8M8, V16M8, V24M8), 8>;
530
531defvar VMaskVTs = [vbool64_t, vbool32_t, vbool16_t, vbool8_t,
532                   vbool4_t, vbool2_t, vbool1_t];
533
534def VMV0 : RegisterClass<"RISCV", VMaskVTs, 64, (add V0)> {
535  let Size = 64;
536}
537
538// The register class is added for inline assembly for vector mask types.
539def VM : VReg<[vbool1_t, vbool2_t, vbool4_t, vbool8_t, vbool16_t,
540               vbool32_t, vbool64_t],
541           (add (sequence "V%u", 25, 31),
542                (sequence "V%u", 8, 24),
543                (sequence "V%u", 0, 7)), 1>;
544
545foreach m = LMULList.m in {
546  foreach nf = NFList<m>.L in {
547    def "VRN" # nf # "M" # m: VReg<[untyped],
548                               (add !cast<RegisterTuples>("VN" # nf # "M" # m # "V0"), !cast<RegisterTuples>("VN" # nf # "M" # m # "NoV0")),
549                                    !mul(nf, m)>;
550    def "VRN" # nf # "M" # m # "NoV0": VReg<[untyped],
551                               (add !cast<RegisterTuples>("VN" # nf # "M" # m # "NoV0")),
552                                    !mul(nf, m)>;
553  }
554}
555
556// Special registers
557def FFLAGS : RISCVReg<0, "fflags">;
558def FRM    : RISCVReg<0, "frm">;
559def FCSR   : RISCVReg<0, "fcsr">;
560