1//=- AArch64RegisterInfo.td - Describe the AArch64 Registers -*- tablegen -*-=//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9//
10//===----------------------------------------------------------------------===//
11
12
13class AArch64Reg<bits<16> enc, string n, list<Register> subregs = [],
14               list<string> altNames = []>
15        : Register<n, altNames> {
16  let HWEncoding = enc;
17  let Namespace = "AArch64";
18  let SubRegs = subregs;
19}
20
21let Namespace = "AArch64" in {
22  def sub_32 : SubRegIndex<32>;
23
24  def bsub : SubRegIndex<8>;
25  def hsub : SubRegIndex<16>;
26  def ssub : SubRegIndex<32>;
27  def dsub : SubRegIndex<64>;
28  def sube32 : SubRegIndex<32>;
29  def subo32 : SubRegIndex<32>;
30  def sube64 : SubRegIndex<64>;
31  def subo64 : SubRegIndex<64>;
32  // SVE
33  def zsub    : SubRegIndex<128>;
34  // Note: zsub_hi should never be used directly because it represents
35  // the scalable part of the SVE vector and cannot be manipulated as a
36  // subvector in the same way the lower 128bits can.
37  def zsub_hi : SubRegIndex<128>;
38  // Note: Code depends on these having consecutive numbers
39  def dsub0 : SubRegIndex<64>;
40  def dsub1 : SubRegIndex<64>;
41  def dsub2 : SubRegIndex<64>;
42  def dsub3 : SubRegIndex<64>;
43  // Note: Code depends on these having consecutive numbers
44  def qsub0 : SubRegIndex<128>;
45  def qsub1 : SubRegIndex<128>;
46  def qsub2 : SubRegIndex<128>;
47  def qsub3 : SubRegIndex<128>;
48  // Note: Code depends on these having consecutive numbers
49  def zasubb  : SubRegIndex<2048>; // (16 x 16)/1 bytes  = 2048 bits
50  def zasubh0 : SubRegIndex<1024>; // (16 x 16)/2 bytes  = 1024 bits
51  def zasubh1 : SubRegIndex<1024>; // (16 x 16)/2 bytes  = 1024 bits
52  def zasubs0 : SubRegIndex<512>;  // (16 x 16)/4 bytes  = 512 bits
53  def zasubs1 : SubRegIndex<512>;  // (16 x 16)/4 bytes  = 512 bits
54  def zasubd0 : SubRegIndex<256>;  // (16 x 16)/8 bytes  = 256 bits
55  def zasubd1 : SubRegIndex<256>;  // (16 x 16)/8 bytes  = 256 bits
56  def zasubq0 : SubRegIndex<128>;  // (16 x 16)/16 bytes = 128 bits
57  def zasubq1 : SubRegIndex<128>;  // (16 x 16)/16 bytes = 128 bits
58}
59
60let Namespace = "AArch64" in {
61  def vreg : RegAltNameIndex;
62  def vlist1 : RegAltNameIndex;
63}
64
65//===----------------------------------------------------------------------===//
66// Registers
67//===----------------------------------------------------------------------===//
68def W0    : AArch64Reg<0,   "w0" >, DwarfRegNum<[0]>;
69def W1    : AArch64Reg<1,   "w1" >, DwarfRegNum<[1]>;
70def W2    : AArch64Reg<2,   "w2" >, DwarfRegNum<[2]>;
71def W3    : AArch64Reg<3,   "w3" >, DwarfRegNum<[3]>;
72def W4    : AArch64Reg<4,   "w4" >, DwarfRegNum<[4]>;
73def W5    : AArch64Reg<5,   "w5" >, DwarfRegNum<[5]>;
74def W6    : AArch64Reg<6,   "w6" >, DwarfRegNum<[6]>;
75def W7    : AArch64Reg<7,   "w7" >, DwarfRegNum<[7]>;
76def W8    : AArch64Reg<8,   "w8" >, DwarfRegNum<[8]>;
77def W9    : AArch64Reg<9,   "w9" >, DwarfRegNum<[9]>;
78def W10   : AArch64Reg<10, "w10">, DwarfRegNum<[10]>;
79def W11   : AArch64Reg<11, "w11">, DwarfRegNum<[11]>;
80def W12   : AArch64Reg<12, "w12">, DwarfRegNum<[12]>;
81def W13   : AArch64Reg<13, "w13">, DwarfRegNum<[13]>;
82def W14   : AArch64Reg<14, "w14">, DwarfRegNum<[14]>;
83def W15   : AArch64Reg<15, "w15">, DwarfRegNum<[15]>;
84def W16   : AArch64Reg<16, "w16">, DwarfRegNum<[16]>;
85def W17   : AArch64Reg<17, "w17">, DwarfRegNum<[17]>;
86def W18   : AArch64Reg<18, "w18">, DwarfRegNum<[18]>;
87def W19   : AArch64Reg<19, "w19">, DwarfRegNum<[19]>;
88def W20   : AArch64Reg<20, "w20">, DwarfRegNum<[20]>;
89def W21   : AArch64Reg<21, "w21">, DwarfRegNum<[21]>;
90def W22   : AArch64Reg<22, "w22">, DwarfRegNum<[22]>;
91def W23   : AArch64Reg<23, "w23">, DwarfRegNum<[23]>;
92def W24   : AArch64Reg<24, "w24">, DwarfRegNum<[24]>;
93def W25   : AArch64Reg<25, "w25">, DwarfRegNum<[25]>;
94def W26   : AArch64Reg<26, "w26">, DwarfRegNum<[26]>;
95def W27   : AArch64Reg<27, "w27">, DwarfRegNum<[27]>;
96def W28   : AArch64Reg<28, "w28">, DwarfRegNum<[28]>;
97def W29   : AArch64Reg<29, "w29">, DwarfRegNum<[29]>;
98def W30   : AArch64Reg<30, "w30">, DwarfRegNum<[30]>;
99def WSP   : AArch64Reg<31, "wsp">, DwarfRegNum<[31]>;
100let isConstant = true in
101def WZR   : AArch64Reg<31, "wzr">, DwarfRegAlias<WSP>;
102
103let SubRegIndices = [sub_32] in {
104def X0    : AArch64Reg<0,   "x0",  [W0]>, DwarfRegAlias<W0>;
105def X1    : AArch64Reg<1,   "x1",  [W1]>, DwarfRegAlias<W1>;
106def X2    : AArch64Reg<2,   "x2",  [W2]>, DwarfRegAlias<W2>;
107def X3    : AArch64Reg<3,   "x3",  [W3]>, DwarfRegAlias<W3>;
108def X4    : AArch64Reg<4,   "x4",  [W4]>, DwarfRegAlias<W4>;
109def X5    : AArch64Reg<5,   "x5",  [W5]>, DwarfRegAlias<W5>;
110def X6    : AArch64Reg<6,   "x6",  [W6]>, DwarfRegAlias<W6>;
111def X7    : AArch64Reg<7,   "x7",  [W7]>, DwarfRegAlias<W7>;
112def X8    : AArch64Reg<8,   "x8",  [W8]>, DwarfRegAlias<W8>;
113def X9    : AArch64Reg<9,   "x9",  [W9]>, DwarfRegAlias<W9>;
114def X10   : AArch64Reg<10, "x10", [W10]>, DwarfRegAlias<W10>;
115def X11   : AArch64Reg<11, "x11", [W11]>, DwarfRegAlias<W11>;
116def X12   : AArch64Reg<12, "x12", [W12]>, DwarfRegAlias<W12>;
117def X13   : AArch64Reg<13, "x13", [W13]>, DwarfRegAlias<W13>;
118def X14   : AArch64Reg<14, "x14", [W14]>, DwarfRegAlias<W14>;
119def X15   : AArch64Reg<15, "x15", [W15]>, DwarfRegAlias<W15>;
120def X16   : AArch64Reg<16, "x16", [W16]>, DwarfRegAlias<W16>;
121def X17   : AArch64Reg<17, "x17", [W17]>, DwarfRegAlias<W17>;
122def X18   : AArch64Reg<18, "x18", [W18]>, DwarfRegAlias<W18>;
123def X19   : AArch64Reg<19, "x19", [W19]>, DwarfRegAlias<W19>;
124def X20   : AArch64Reg<20, "x20", [W20]>, DwarfRegAlias<W20>;
125def X21   : AArch64Reg<21, "x21", [W21]>, DwarfRegAlias<W21>;
126def X22   : AArch64Reg<22, "x22", [W22]>, DwarfRegAlias<W22>;
127def X23   : AArch64Reg<23, "x23", [W23]>, DwarfRegAlias<W23>;
128def X24   : AArch64Reg<24, "x24", [W24]>, DwarfRegAlias<W24>;
129def X25   : AArch64Reg<25, "x25", [W25]>, DwarfRegAlias<W25>;
130def X26   : AArch64Reg<26, "x26", [W26]>, DwarfRegAlias<W26>;
131def X27   : AArch64Reg<27, "x27", [W27]>, DwarfRegAlias<W27>;
132def X28   : AArch64Reg<28, "x28", [W28]>, DwarfRegAlias<W28>;
133def FP    : AArch64Reg<29, "x29", [W29]>, DwarfRegAlias<W29>;
134def LR    : AArch64Reg<30, "x30", [W30]>, DwarfRegAlias<W30>;
135def SP    : AArch64Reg<31, "sp",  [WSP]>, DwarfRegAlias<WSP>;
136let isConstant = true in
137def XZR   : AArch64Reg<31, "xzr", [WZR]>, DwarfRegAlias<WSP>;
138}
139
140// Condition code register.
141def NZCV  : AArch64Reg<0, "nzcv">;
142
143// First fault status register
144def FFR : AArch64Reg<0, "ffr">, DwarfRegNum<[47]>;
145
146// Purely virtual Vector Granule (VG) Dwarf register
147def VG : AArch64Reg<0, "vg">, DwarfRegNum<[46]>;
148
149// Floating-point control register
150def FPCR : AArch64Reg<0, "fpcr">;
151
152// GPR register classes with the intersections of GPR32/GPR32sp and
153// GPR64/GPR64sp for use by the coalescer.
154def GPR32common : RegisterClass<"AArch64", [i32], 32, (sequence "W%u", 0, 30)> {
155  let AltOrders = [(rotl GPR32common, 8)];
156  let AltOrderSelect = [{ return 1; }];
157}
158def GPR64common : RegisterClass<"AArch64", [i64], 64,
159                                (add (sequence "X%u", 0, 28), FP, LR)> {
160  let AltOrders = [(rotl GPR64common, 8)];
161  let AltOrderSelect = [{ return 1; }];
162}
163// GPR register classes which exclude SP/WSP.
164def GPR32 : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WZR)> {
165  let AltOrders = [(rotl GPR32, 8)];
166  let AltOrderSelect = [{ return 1; }];
167}
168def GPR64 : RegisterClass<"AArch64", [i64], 64, (add GPR64common, XZR)> {
169  let AltOrders = [(rotl GPR64, 8)];
170  let AltOrderSelect = [{ return 1; }];
171}
172
173// GPR register classes which include SP/WSP.
174def GPR32sp : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WSP)> {
175  let AltOrders = [(rotl GPR32sp, 8)];
176  let AltOrderSelect = [{ return 1; }];
177}
178def GPR64sp : RegisterClass<"AArch64", [i64], 64, (add GPR64common, SP)> {
179  let AltOrders = [(rotl GPR64sp, 8)];
180  let AltOrderSelect = [{ return 1; }];
181}
182
183def GPR32sponly : RegisterClass<"AArch64", [i32], 32, (add WSP)>;
184def GPR64sponly : RegisterClass<"AArch64", [i64], 64, (add SP)>;
185
186def GPR64spPlus0Operand : AsmOperandClass {
187  let Name = "GPR64sp0";
188  let RenderMethod = "addRegOperands";
189  let PredicateMethod = "isGPR64<AArch64::GPR64spRegClassID>";
190  let ParserMethod = "tryParseGPR64sp0Operand";
191}
192
193def GPR64sp0 : RegisterOperand<GPR64sp> {
194  let ParserMatchClass = GPR64spPlus0Operand;
195}
196
197// GPR32/GPR64 but with zero-register substitution enabled.
198// TODO: Roll this out to GPR32/GPR64/GPR32all/GPR64all.
199def GPR32z : RegisterOperand<GPR32> {
200  let GIZeroRegister = WZR;
201}
202def GPR64z : RegisterOperand<GPR64> {
203  let GIZeroRegister = XZR;
204}
205
206// GPR argument registers.
207def GPR32arg : RegisterClass<"AArch64", [i32], 32, (sequence "W%u", 0, 7)>;
208def GPR64arg : RegisterClass<"AArch64", [i64], 64, (sequence "X%u", 0, 7)>;
209
210// GPR register classes which include WZR/XZR AND SP/WSP. This is not a
211// constraint used by any instructions, it is used as a common super-class.
212def GPR32all : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WZR, WSP)>;
213def GPR64all : RegisterClass<"AArch64", [i64], 64, (add GPR64common, XZR, SP)>;
214
215// For tail calls, we can't use callee-saved registers, as they are restored
216// to the saved value before the tail call, which would clobber a call address.
217// This is for indirect tail calls to store the address of the destination.
218def tcGPR64 : RegisterClass<"AArch64", [i64], 64, (sub GPR64common, X19, X20, X21,
219                                                     X22, X23, X24, X25, X26,
220                                                     X27, X28, FP, LR)>;
221
222// Restricted set of tail call registers, for use when branch target
223// enforcement is enabled. These are the only registers which can be used to
224// indirectly branch (not call) to the "BTI c" instruction at the start of a
225// BTI-protected function.
226def rtcGPR64 : RegisterClass<"AArch64", [i64], 64, (add X16, X17)>;
227
228// Register set that excludes registers that are reserved for procedure calls.
229// This is used for pseudo-instructions that are actually implemented using a
230// procedure call.
231def GPR64noip : RegisterClass<"AArch64", [i64], 64, (sub GPR64, X16, X17, LR)>;
232
233// GPR register classes for post increment amount of vector load/store that
234// has alternate printing when Rm=31 and prints a constant immediate value
235// equal to the total number of bytes transferred.
236
237// FIXME: TableGen *should* be able to do these itself now. There appears to be
238// a bug in counting how many operands a Post-indexed MCInst should have which
239// means the aliases don't trigger.
240def GPR64pi1  : RegisterOperand<GPR64, "printPostIncOperand<1>">;
241def GPR64pi2  : RegisterOperand<GPR64, "printPostIncOperand<2>">;
242def GPR64pi3  : RegisterOperand<GPR64, "printPostIncOperand<3>">;
243def GPR64pi4  : RegisterOperand<GPR64, "printPostIncOperand<4>">;
244def GPR64pi6  : RegisterOperand<GPR64, "printPostIncOperand<6>">;
245def GPR64pi8  : RegisterOperand<GPR64, "printPostIncOperand<8>">;
246def GPR64pi12 : RegisterOperand<GPR64, "printPostIncOperand<12>">;
247def GPR64pi16 : RegisterOperand<GPR64, "printPostIncOperand<16>">;
248def GPR64pi24 : RegisterOperand<GPR64, "printPostIncOperand<24>">;
249def GPR64pi32 : RegisterOperand<GPR64, "printPostIncOperand<32>">;
250def GPR64pi48 : RegisterOperand<GPR64, "printPostIncOperand<48>">;
251def GPR64pi64 : RegisterOperand<GPR64, "printPostIncOperand<64>">;
252
253// Condition code regclass.
254def CCR : RegisterClass<"AArch64", [i32], 32, (add NZCV)> {
255  let CopyCost = -1;  // Don't allow copying of status registers.
256
257  // CCR is not allocatable.
258  let isAllocatable = 0;
259}
260
261//===----------------------------------------------------------------------===//
262// Floating Point Scalar Registers
263//===----------------------------------------------------------------------===//
264
265def B0    : AArch64Reg<0,   "b0">, DwarfRegNum<[64]>;
266def B1    : AArch64Reg<1,   "b1">, DwarfRegNum<[65]>;
267def B2    : AArch64Reg<2,   "b2">, DwarfRegNum<[66]>;
268def B3    : AArch64Reg<3,   "b3">, DwarfRegNum<[67]>;
269def B4    : AArch64Reg<4,   "b4">, DwarfRegNum<[68]>;
270def B5    : AArch64Reg<5,   "b5">, DwarfRegNum<[69]>;
271def B6    : AArch64Reg<6,   "b6">, DwarfRegNum<[70]>;
272def B7    : AArch64Reg<7,   "b7">, DwarfRegNum<[71]>;
273def B8    : AArch64Reg<8,   "b8">, DwarfRegNum<[72]>;
274def B9    : AArch64Reg<9,   "b9">, DwarfRegNum<[73]>;
275def B10   : AArch64Reg<10, "b10">, DwarfRegNum<[74]>;
276def B11   : AArch64Reg<11, "b11">, DwarfRegNum<[75]>;
277def B12   : AArch64Reg<12, "b12">, DwarfRegNum<[76]>;
278def B13   : AArch64Reg<13, "b13">, DwarfRegNum<[77]>;
279def B14   : AArch64Reg<14, "b14">, DwarfRegNum<[78]>;
280def B15   : AArch64Reg<15, "b15">, DwarfRegNum<[79]>;
281def B16   : AArch64Reg<16, "b16">, DwarfRegNum<[80]>;
282def B17   : AArch64Reg<17, "b17">, DwarfRegNum<[81]>;
283def B18   : AArch64Reg<18, "b18">, DwarfRegNum<[82]>;
284def B19   : AArch64Reg<19, "b19">, DwarfRegNum<[83]>;
285def B20   : AArch64Reg<20, "b20">, DwarfRegNum<[84]>;
286def B21   : AArch64Reg<21, "b21">, DwarfRegNum<[85]>;
287def B22   : AArch64Reg<22, "b22">, DwarfRegNum<[86]>;
288def B23   : AArch64Reg<23, "b23">, DwarfRegNum<[87]>;
289def B24   : AArch64Reg<24, "b24">, DwarfRegNum<[88]>;
290def B25   : AArch64Reg<25, "b25">, DwarfRegNum<[89]>;
291def B26   : AArch64Reg<26, "b26">, DwarfRegNum<[90]>;
292def B27   : AArch64Reg<27, "b27">, DwarfRegNum<[91]>;
293def B28   : AArch64Reg<28, "b28">, DwarfRegNum<[92]>;
294def B29   : AArch64Reg<29, "b29">, DwarfRegNum<[93]>;
295def B30   : AArch64Reg<30, "b30">, DwarfRegNum<[94]>;
296def B31   : AArch64Reg<31, "b31">, DwarfRegNum<[95]>;
297
298let SubRegIndices = [bsub] in {
299def H0    : AArch64Reg<0,   "h0", [B0]>, DwarfRegAlias<B0>;
300def H1    : AArch64Reg<1,   "h1", [B1]>, DwarfRegAlias<B1>;
301def H2    : AArch64Reg<2,   "h2", [B2]>, DwarfRegAlias<B2>;
302def H3    : AArch64Reg<3,   "h3", [B3]>, DwarfRegAlias<B3>;
303def H4    : AArch64Reg<4,   "h4", [B4]>, DwarfRegAlias<B4>;
304def H5    : AArch64Reg<5,   "h5", [B5]>, DwarfRegAlias<B5>;
305def H6    : AArch64Reg<6,   "h6", [B6]>, DwarfRegAlias<B6>;
306def H7    : AArch64Reg<7,   "h7", [B7]>, DwarfRegAlias<B7>;
307def H8    : AArch64Reg<8,   "h8", [B8]>, DwarfRegAlias<B8>;
308def H9    : AArch64Reg<9,   "h9", [B9]>, DwarfRegAlias<B9>;
309def H10   : AArch64Reg<10, "h10", [B10]>, DwarfRegAlias<B10>;
310def H11   : AArch64Reg<11, "h11", [B11]>, DwarfRegAlias<B11>;
311def H12   : AArch64Reg<12, "h12", [B12]>, DwarfRegAlias<B12>;
312def H13   : AArch64Reg<13, "h13", [B13]>, DwarfRegAlias<B13>;
313def H14   : AArch64Reg<14, "h14", [B14]>, DwarfRegAlias<B14>;
314def H15   : AArch64Reg<15, "h15", [B15]>, DwarfRegAlias<B15>;
315def H16   : AArch64Reg<16, "h16", [B16]>, DwarfRegAlias<B16>;
316def H17   : AArch64Reg<17, "h17", [B17]>, DwarfRegAlias<B17>;
317def H18   : AArch64Reg<18, "h18", [B18]>, DwarfRegAlias<B18>;
318def H19   : AArch64Reg<19, "h19", [B19]>, DwarfRegAlias<B19>;
319def H20   : AArch64Reg<20, "h20", [B20]>, DwarfRegAlias<B20>;
320def H21   : AArch64Reg<21, "h21", [B21]>, DwarfRegAlias<B21>;
321def H22   : AArch64Reg<22, "h22", [B22]>, DwarfRegAlias<B22>;
322def H23   : AArch64Reg<23, "h23", [B23]>, DwarfRegAlias<B23>;
323def H24   : AArch64Reg<24, "h24", [B24]>, DwarfRegAlias<B24>;
324def H25   : AArch64Reg<25, "h25", [B25]>, DwarfRegAlias<B25>;
325def H26   : AArch64Reg<26, "h26", [B26]>, DwarfRegAlias<B26>;
326def H27   : AArch64Reg<27, "h27", [B27]>, DwarfRegAlias<B27>;
327def H28   : AArch64Reg<28, "h28", [B28]>, DwarfRegAlias<B28>;
328def H29   : AArch64Reg<29, "h29", [B29]>, DwarfRegAlias<B29>;
329def H30   : AArch64Reg<30, "h30", [B30]>, DwarfRegAlias<B30>;
330def H31   : AArch64Reg<31, "h31", [B31]>, DwarfRegAlias<B31>;
331}
332
333let SubRegIndices = [hsub] in {
334def S0    : AArch64Reg<0,   "s0", [H0]>, DwarfRegAlias<B0>;
335def S1    : AArch64Reg<1,   "s1", [H1]>, DwarfRegAlias<B1>;
336def S2    : AArch64Reg<2,   "s2", [H2]>, DwarfRegAlias<B2>;
337def S3    : AArch64Reg<3,   "s3", [H3]>, DwarfRegAlias<B3>;
338def S4    : AArch64Reg<4,   "s4", [H4]>, DwarfRegAlias<B4>;
339def S5    : AArch64Reg<5,   "s5", [H5]>, DwarfRegAlias<B5>;
340def S6    : AArch64Reg<6,   "s6", [H6]>, DwarfRegAlias<B6>;
341def S7    : AArch64Reg<7,   "s7", [H7]>, DwarfRegAlias<B7>;
342def S8    : AArch64Reg<8,   "s8", [H8]>, DwarfRegAlias<B8>;
343def S9    : AArch64Reg<9,   "s9", [H9]>, DwarfRegAlias<B9>;
344def S10   : AArch64Reg<10, "s10", [H10]>, DwarfRegAlias<B10>;
345def S11   : AArch64Reg<11, "s11", [H11]>, DwarfRegAlias<B11>;
346def S12   : AArch64Reg<12, "s12", [H12]>, DwarfRegAlias<B12>;
347def S13   : AArch64Reg<13, "s13", [H13]>, DwarfRegAlias<B13>;
348def S14   : AArch64Reg<14, "s14", [H14]>, DwarfRegAlias<B14>;
349def S15   : AArch64Reg<15, "s15", [H15]>, DwarfRegAlias<B15>;
350def S16   : AArch64Reg<16, "s16", [H16]>, DwarfRegAlias<B16>;
351def S17   : AArch64Reg<17, "s17", [H17]>, DwarfRegAlias<B17>;
352def S18   : AArch64Reg<18, "s18", [H18]>, DwarfRegAlias<B18>;
353def S19   : AArch64Reg<19, "s19", [H19]>, DwarfRegAlias<B19>;
354def S20   : AArch64Reg<20, "s20", [H20]>, DwarfRegAlias<B20>;
355def S21   : AArch64Reg<21, "s21", [H21]>, DwarfRegAlias<B21>;
356def S22   : AArch64Reg<22, "s22", [H22]>, DwarfRegAlias<B22>;
357def S23   : AArch64Reg<23, "s23", [H23]>, DwarfRegAlias<B23>;
358def S24   : AArch64Reg<24, "s24", [H24]>, DwarfRegAlias<B24>;
359def S25   : AArch64Reg<25, "s25", [H25]>, DwarfRegAlias<B25>;
360def S26   : AArch64Reg<26, "s26", [H26]>, DwarfRegAlias<B26>;
361def S27   : AArch64Reg<27, "s27", [H27]>, DwarfRegAlias<B27>;
362def S28   : AArch64Reg<28, "s28", [H28]>, DwarfRegAlias<B28>;
363def S29   : AArch64Reg<29, "s29", [H29]>, DwarfRegAlias<B29>;
364def S30   : AArch64Reg<30, "s30", [H30]>, DwarfRegAlias<B30>;
365def S31   : AArch64Reg<31, "s31", [H31]>, DwarfRegAlias<B31>;
366}
367
368let SubRegIndices = [ssub], RegAltNameIndices = [vreg, vlist1] in {
369def D0    : AArch64Reg<0,   "d0", [S0], ["v0", ""]>, DwarfRegAlias<B0>;
370def D1    : AArch64Reg<1,   "d1", [S1], ["v1", ""]>, DwarfRegAlias<B1>;
371def D2    : AArch64Reg<2,   "d2", [S2], ["v2", ""]>, DwarfRegAlias<B2>;
372def D3    : AArch64Reg<3,   "d3", [S3], ["v3", ""]>, DwarfRegAlias<B3>;
373def D4    : AArch64Reg<4,   "d4", [S4], ["v4", ""]>, DwarfRegAlias<B4>;
374def D5    : AArch64Reg<5,   "d5", [S5], ["v5", ""]>, DwarfRegAlias<B5>;
375def D6    : AArch64Reg<6,   "d6", [S6], ["v6", ""]>, DwarfRegAlias<B6>;
376def D7    : AArch64Reg<7,   "d7", [S7], ["v7", ""]>, DwarfRegAlias<B7>;
377def D8    : AArch64Reg<8,   "d8", [S8], ["v8", ""]>, DwarfRegAlias<B8>;
378def D9    : AArch64Reg<9,   "d9", [S9], ["v9", ""]>, DwarfRegAlias<B9>;
379def D10   : AArch64Reg<10, "d10", [S10], ["v10", ""]>, DwarfRegAlias<B10>;
380def D11   : AArch64Reg<11, "d11", [S11], ["v11", ""]>, DwarfRegAlias<B11>;
381def D12   : AArch64Reg<12, "d12", [S12], ["v12", ""]>, DwarfRegAlias<B12>;
382def D13   : AArch64Reg<13, "d13", [S13], ["v13", ""]>, DwarfRegAlias<B13>;
383def D14   : AArch64Reg<14, "d14", [S14], ["v14", ""]>, DwarfRegAlias<B14>;
384def D15   : AArch64Reg<15, "d15", [S15], ["v15", ""]>, DwarfRegAlias<B15>;
385def D16   : AArch64Reg<16, "d16", [S16], ["v16", ""]>, DwarfRegAlias<B16>;
386def D17   : AArch64Reg<17, "d17", [S17], ["v17", ""]>, DwarfRegAlias<B17>;
387def D18   : AArch64Reg<18, "d18", [S18], ["v18", ""]>, DwarfRegAlias<B18>;
388def D19   : AArch64Reg<19, "d19", [S19], ["v19", ""]>, DwarfRegAlias<B19>;
389def D20   : AArch64Reg<20, "d20", [S20], ["v20", ""]>, DwarfRegAlias<B20>;
390def D21   : AArch64Reg<21, "d21", [S21], ["v21", ""]>, DwarfRegAlias<B21>;
391def D22   : AArch64Reg<22, "d22", [S22], ["v22", ""]>, DwarfRegAlias<B22>;
392def D23   : AArch64Reg<23, "d23", [S23], ["v23", ""]>, DwarfRegAlias<B23>;
393def D24   : AArch64Reg<24, "d24", [S24], ["v24", ""]>, DwarfRegAlias<B24>;
394def D25   : AArch64Reg<25, "d25", [S25], ["v25", ""]>, DwarfRegAlias<B25>;
395def D26   : AArch64Reg<26, "d26", [S26], ["v26", ""]>, DwarfRegAlias<B26>;
396def D27   : AArch64Reg<27, "d27", [S27], ["v27", ""]>, DwarfRegAlias<B27>;
397def D28   : AArch64Reg<28, "d28", [S28], ["v28", ""]>, DwarfRegAlias<B28>;
398def D29   : AArch64Reg<29, "d29", [S29], ["v29", ""]>, DwarfRegAlias<B29>;
399def D30   : AArch64Reg<30, "d30", [S30], ["v30", ""]>, DwarfRegAlias<B30>;
400def D31   : AArch64Reg<31, "d31", [S31], ["v31", ""]>, DwarfRegAlias<B31>;
401}
402
403let SubRegIndices = [dsub], RegAltNameIndices = [vreg, vlist1] in {
404def Q0    : AArch64Reg<0,   "q0", [D0], ["v0", ""]>, DwarfRegAlias<B0>;
405def Q1    : AArch64Reg<1,   "q1", [D1], ["v1", ""]>, DwarfRegAlias<B1>;
406def Q2    : AArch64Reg<2,   "q2", [D2], ["v2", ""]>, DwarfRegAlias<B2>;
407def Q3    : AArch64Reg<3,   "q3", [D3], ["v3", ""]>, DwarfRegAlias<B3>;
408def Q4    : AArch64Reg<4,   "q4", [D4], ["v4", ""]>, DwarfRegAlias<B4>;
409def Q5    : AArch64Reg<5,   "q5", [D5], ["v5", ""]>, DwarfRegAlias<B5>;
410def Q6    : AArch64Reg<6,   "q6", [D6], ["v6", ""]>, DwarfRegAlias<B6>;
411def Q7    : AArch64Reg<7,   "q7", [D7], ["v7", ""]>, DwarfRegAlias<B7>;
412def Q8    : AArch64Reg<8,   "q8", [D8], ["v8", ""]>, DwarfRegAlias<B8>;
413def Q9    : AArch64Reg<9,   "q9", [D9], ["v9", ""]>, DwarfRegAlias<B9>;
414def Q10   : AArch64Reg<10, "q10", [D10], ["v10", ""]>, DwarfRegAlias<B10>;
415def Q11   : AArch64Reg<11, "q11", [D11], ["v11", ""]>, DwarfRegAlias<B11>;
416def Q12   : AArch64Reg<12, "q12", [D12], ["v12", ""]>, DwarfRegAlias<B12>;
417def Q13   : AArch64Reg<13, "q13", [D13], ["v13", ""]>, DwarfRegAlias<B13>;
418def Q14   : AArch64Reg<14, "q14", [D14], ["v14", ""]>, DwarfRegAlias<B14>;
419def Q15   : AArch64Reg<15, "q15", [D15], ["v15", ""]>, DwarfRegAlias<B15>;
420def Q16   : AArch64Reg<16, "q16", [D16], ["v16", ""]>, DwarfRegAlias<B16>;
421def Q17   : AArch64Reg<17, "q17", [D17], ["v17", ""]>, DwarfRegAlias<B17>;
422def Q18   : AArch64Reg<18, "q18", [D18], ["v18", ""]>, DwarfRegAlias<B18>;
423def Q19   : AArch64Reg<19, "q19", [D19], ["v19", ""]>, DwarfRegAlias<B19>;
424def Q20   : AArch64Reg<20, "q20", [D20], ["v20", ""]>, DwarfRegAlias<B20>;
425def Q21   : AArch64Reg<21, "q21", [D21], ["v21", ""]>, DwarfRegAlias<B21>;
426def Q22   : AArch64Reg<22, "q22", [D22], ["v22", ""]>, DwarfRegAlias<B22>;
427def Q23   : AArch64Reg<23, "q23", [D23], ["v23", ""]>, DwarfRegAlias<B23>;
428def Q24   : AArch64Reg<24, "q24", [D24], ["v24", ""]>, DwarfRegAlias<B24>;
429def Q25   : AArch64Reg<25, "q25", [D25], ["v25", ""]>, DwarfRegAlias<B25>;
430def Q26   : AArch64Reg<26, "q26", [D26], ["v26", ""]>, DwarfRegAlias<B26>;
431def Q27   : AArch64Reg<27, "q27", [D27], ["v27", ""]>, DwarfRegAlias<B27>;
432def Q28   : AArch64Reg<28, "q28", [D28], ["v28", ""]>, DwarfRegAlias<B28>;
433def Q29   : AArch64Reg<29, "q29", [D29], ["v29", ""]>, DwarfRegAlias<B29>;
434def Q30   : AArch64Reg<30, "q30", [D30], ["v30", ""]>, DwarfRegAlias<B30>;
435def Q31   : AArch64Reg<31, "q31", [D31], ["v31", ""]>, DwarfRegAlias<B31>;
436}
437
438def FPR8  : RegisterClass<"AArch64", [i8], 8, (sequence "B%u", 0, 31)> {
439  let Size = 8;
440}
441def FPR16 : RegisterClass<"AArch64", [f16, bf16], 16, (sequence "H%u", 0, 31)> {
442  let Size = 16;
443}
444
445def FPR16_lo : RegisterClass<"AArch64", [f16], 16, (trunc FPR16, 16)> {
446  let Size = 16;
447}
448def FPR32 : RegisterClass<"AArch64", [f32, i32], 32,(sequence "S%u", 0, 31)>;
449def FPR64 : RegisterClass<"AArch64", [f64, i64, v2f32, v1f64, v8i8, v4i16, v2i32,
450                                      v1i64, v4f16, v4bf16],
451                                     64, (sequence "D%u", 0, 31)>;
452def FPR64_lo : RegisterClass<"AArch64",
453                             [v8i8, v4i16, v2i32, v1i64, v4f16, v4bf16, v2f32,
454                              v1f64],
455                             64, (trunc FPR64, 16)>;
456
457// We don't (yet) have an f128 legal type, so don't use that here. We
458// normalize 128-bit vectors to v2f64 for arg passing and such, so use
459// that here.
460def FPR128 : RegisterClass<"AArch64",
461                           [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64, f128,
462                            v8f16, v8bf16],
463                           128, (sequence "Q%u", 0, 31)>;
464
465// The lower 16 vector registers.  Some instructions can only take registers
466// in this range.
467def FPR128_lo : RegisterClass<"AArch64",
468                              [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64, v8f16,
469                               v8bf16],
470                              128, (trunc FPR128, 16)>;
471
472// Pairs, triples, and quads of 64-bit vector registers.
473def DSeqPairs : RegisterTuples<[dsub0, dsub1], [(rotl FPR64, 0), (rotl FPR64, 1)]>;
474def DSeqTriples : RegisterTuples<[dsub0, dsub1, dsub2],
475                                 [(rotl FPR64, 0), (rotl FPR64, 1),
476                                  (rotl FPR64, 2)]>;
477def DSeqQuads : RegisterTuples<[dsub0, dsub1, dsub2, dsub3],
478                               [(rotl FPR64, 0), (rotl FPR64, 1),
479                                (rotl FPR64, 2), (rotl FPR64, 3)]>;
480def DD   : RegisterClass<"AArch64", [untyped], 64, (add DSeqPairs)> {
481  let Size = 128;
482}
483def DDD  : RegisterClass<"AArch64", [untyped], 64, (add DSeqTriples)> {
484  let Size = 192;
485}
486def DDDD : RegisterClass<"AArch64", [untyped], 64, (add DSeqQuads)> {
487  let Size = 256;
488}
489
490// Pairs, triples, and quads of 128-bit vector registers.
491def QSeqPairs : RegisterTuples<[qsub0, qsub1], [(rotl FPR128, 0), (rotl FPR128, 1)]>;
492def QSeqTriples : RegisterTuples<[qsub0, qsub1, qsub2],
493                                 [(rotl FPR128, 0), (rotl FPR128, 1),
494                                  (rotl FPR128, 2)]>;
495def QSeqQuads : RegisterTuples<[qsub0, qsub1, qsub2, qsub3],
496                               [(rotl FPR128, 0), (rotl FPR128, 1),
497                                (rotl FPR128, 2), (rotl FPR128, 3)]>;
498def QQ   : RegisterClass<"AArch64", [untyped], 128, (add QSeqPairs)> {
499  let Size = 256;
500}
501def QQQ  : RegisterClass<"AArch64", [untyped], 128, (add QSeqTriples)> {
502  let Size = 384;
503}
504def QQQQ : RegisterClass<"AArch64", [untyped], 128, (add QSeqQuads)> {
505  let Size = 512;
506}
507
508
509// Vector operand versions of the FP registers. Alternate name printing and
510// assembler matching.
511def VectorReg64AsmOperand : AsmOperandClass {
512  let Name = "VectorReg64";
513  let PredicateMethod = "isNeonVectorReg";
514}
515def VectorReg128AsmOperand : AsmOperandClass {
516  let Name = "VectorReg128";
517  let PredicateMethod = "isNeonVectorReg";
518}
519
520def V64  : RegisterOperand<FPR64, "printVRegOperand"> {
521  let ParserMatchClass = VectorReg64AsmOperand;
522}
523
524def V128 : RegisterOperand<FPR128, "printVRegOperand"> {
525  let ParserMatchClass = VectorReg128AsmOperand;
526}
527
528def VectorRegLoAsmOperand : AsmOperandClass {
529  let Name = "VectorRegLo";
530  let PredicateMethod = "isNeonVectorRegLo";
531}
532def V64_lo : RegisterOperand<FPR64_lo, "printVRegOperand"> {
533  let ParserMatchClass = VectorRegLoAsmOperand;
534}
535def V128_lo : RegisterOperand<FPR128_lo, "printVRegOperand"> {
536  let ParserMatchClass = VectorRegLoAsmOperand;
537}
538
539class TypedVecListAsmOperand<int count, string vecty, int lanes, int eltsize>
540    : AsmOperandClass {
541  let Name = "TypedVectorList" # count # "_" # lanes # eltsize;
542
543  let PredicateMethod
544      = "isTypedVectorList<RegKind::NeonVector, " # count # ", " # lanes # ", " # eltsize # ">";
545  let RenderMethod = "addVectorListOperands<" # vecty  # ", "  # count # ">";
546}
547
548class TypedVecListRegOperand<RegisterClass Reg, int lanes, string eltsize>
549    : RegisterOperand<Reg, "printTypedVectorList<" # lanes # ", '"
550                                                   # eltsize # "'>">;
551
552multiclass VectorList<int count, RegisterClass Reg64, RegisterClass Reg128> {
553  // With implicit types (probably on instruction instead). E.g. { v0, v1 }
554  def _64AsmOperand : AsmOperandClass {
555    let Name = NAME # "64";
556    let PredicateMethod = "isImplicitlyTypedVectorList<RegKind::NeonVector, " # count # ">";
557    let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_DReg, " # count # ">";
558  }
559
560  def "64" : RegisterOperand<Reg64, "printImplicitlyTypedVectorList"> {
561    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_64AsmOperand");
562  }
563
564  def _128AsmOperand : AsmOperandClass {
565    let Name = NAME # "128";
566    let PredicateMethod = "isImplicitlyTypedVectorList<RegKind::NeonVector, " # count # ">";
567    let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_QReg, " # count # ">";
568  }
569
570  def "128" : RegisterOperand<Reg128, "printImplicitlyTypedVectorList"> {
571    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_128AsmOperand");
572  }
573
574  // 64-bit register lists with explicit type.
575
576  // { v0.8b, v1.8b }
577  def _8bAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 8, 8>;
578  def "8b" : TypedVecListRegOperand<Reg64, 8, "b"> {
579    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_8bAsmOperand");
580  }
581
582  // { v0.4h, v1.4h }
583  def _4hAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 4, 16>;
584  def "4h" : TypedVecListRegOperand<Reg64, 4, "h"> {
585    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_4hAsmOperand");
586  }
587
588  // { v0.2s, v1.2s }
589  def _2sAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 2, 32>;
590  def "2s" : TypedVecListRegOperand<Reg64, 2, "s"> {
591    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_2sAsmOperand");
592  }
593
594  // { v0.1d, v1.1d }
595  def _1dAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 1, 64>;
596  def "1d" : TypedVecListRegOperand<Reg64, 1, "d"> {
597    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_1dAsmOperand");
598  }
599
600  // 128-bit register lists with explicit type
601
602  // { v0.16b, v1.16b }
603  def _16bAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 16, 8>;
604  def "16b" : TypedVecListRegOperand<Reg128, 16, "b"> {
605    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_16bAsmOperand");
606  }
607
608  // { v0.8h, v1.8h }
609  def _8hAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 8, 16>;
610  def "8h" : TypedVecListRegOperand<Reg128, 8, "h"> {
611    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_8hAsmOperand");
612  }
613
614  // { v0.4s, v1.4s }
615  def _4sAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 4, 32>;
616  def "4s" : TypedVecListRegOperand<Reg128, 4, "s"> {
617    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_4sAsmOperand");
618  }
619
620  // { v0.2d, v1.2d }
621  def _2dAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 2, 64>;
622  def "2d" : TypedVecListRegOperand<Reg128, 2, "d"> {
623    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_2dAsmOperand");
624  }
625
626  // { v0.b, v1.b }
627  def _bAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 8>;
628  def "b" : TypedVecListRegOperand<Reg128, 0, "b"> {
629    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_bAsmOperand");
630  }
631
632  // { v0.h, v1.h }
633  def _hAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 16>;
634  def "h" : TypedVecListRegOperand<Reg128, 0, "h"> {
635    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_hAsmOperand");
636  }
637
638  // { v0.s, v1.s }
639  def _sAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 32>;
640  def "s" : TypedVecListRegOperand<Reg128, 0, "s"> {
641    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_sAsmOperand");
642  }
643
644  // { v0.d, v1.d }
645  def _dAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 64>;
646  def "d" : TypedVecListRegOperand<Reg128, 0, "d"> {
647    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_dAsmOperand");
648  }
649
650
651}
652
653defm VecListOne   : VectorList<1, FPR64, FPR128>;
654defm VecListTwo   : VectorList<2, DD,    QQ>;
655defm VecListThree : VectorList<3, DDD,   QQQ>;
656defm VecListFour  : VectorList<4, DDDD,  QQQQ>;
657
658class FPRAsmOperand<string RC> : AsmOperandClass {
659  let Name = "FPRAsmOperand" # RC;
660  let PredicateMethod = "isGPR64<AArch64::" # RC # "RegClassID>";
661  let RenderMethod = "addRegOperands";
662}
663
664// Register operand versions of the scalar FP registers.
665def FPR8Op  : RegisterOperand<FPR8, "printOperand"> {
666  let ParserMatchClass = FPRAsmOperand<"FPR8">;
667}
668
669def FPR16Op  : RegisterOperand<FPR16, "printOperand"> {
670  let ParserMatchClass = FPRAsmOperand<"FPR16">;
671}
672
673def FPR16Op_lo  : RegisterOperand<FPR16_lo, "printOperand"> {
674  let ParserMatchClass = FPRAsmOperand<"FPR16_lo">;
675}
676
677def FPR32Op  : RegisterOperand<FPR32, "printOperand"> {
678  let ParserMatchClass = FPRAsmOperand<"FPR32">;
679}
680
681def FPR64Op  : RegisterOperand<FPR64, "printOperand"> {
682  let ParserMatchClass = FPRAsmOperand<"FPR64">;
683}
684
685def FPR128Op : RegisterOperand<FPR128, "printOperand"> {
686  let ParserMatchClass = FPRAsmOperand<"FPR128">;
687}
688
689//===----------------------------------------------------------------------===//
690// ARMv8.1a atomic CASP register operands
691
692
693def WSeqPairs : RegisterTuples<[sube32, subo32],
694                               [(decimate (rotl GPR32, 0), 2),
695                                (decimate (rotl GPR32, 1), 2)]>;
696def XSeqPairs : RegisterTuples<[sube64, subo64],
697                               [(decimate (rotl GPR64, 0), 2),
698                                (decimate (rotl GPR64, 1), 2)]>;
699
700def WSeqPairsClass   : RegisterClass<"AArch64", [untyped], 32,
701                                     (add WSeqPairs)>{
702  let Size = 64;
703}
704def XSeqPairsClass   : RegisterClass<"AArch64", [untyped], 64,
705                                     (add XSeqPairs)>{
706  let Size = 128;
707}
708
709
710let RenderMethod = "addRegOperands", ParserMethod="tryParseGPRSeqPair" in {
711  def WSeqPairsAsmOperandClass : AsmOperandClass { let Name = "WSeqPair"; }
712  def XSeqPairsAsmOperandClass : AsmOperandClass { let Name = "XSeqPair"; }
713}
714
715def WSeqPairClassOperand :
716    RegisterOperand<WSeqPairsClass, "printGPRSeqPairsClassOperand<32>"> {
717  let ParserMatchClass = WSeqPairsAsmOperandClass;
718}
719def XSeqPairClassOperand :
720    RegisterOperand<XSeqPairsClass, "printGPRSeqPairsClassOperand<64>"> {
721  let ParserMatchClass = XSeqPairsAsmOperandClass;
722}
723// Reuse the parsing and register numbers from XSeqPairs, but encoding is different.
724def MrrsMssrPairClassOperand :
725    RegisterOperand<XSeqPairsClass, "printGPRSeqPairsClassOperand<64>"> {
726  let ParserMatchClass = XSeqPairsAsmOperandClass;
727}
728def SyspXzrPairOperandMatcherClass : AsmOperandClass {
729  let Name = "SyspXzrPair";
730  let RenderMethod = "addSyspXzrPairOperand";
731  let ParserMethod = "tryParseSyspXzrPair";
732}
733def SyspXzrPairOperand :
734    RegisterOperand<GPR64, "printSyspXzrPair"> { // needed to allow alias with XZR operand
735  let ParserMatchClass = SyspXzrPairOperandMatcherClass;
736}
737
738
739
740//===----- END: v8.1a atomic CASP register operands -----------------------===//
741
742//===----------------------------------------------------------------------===//
743// Armv8.7a accelerator extension register operands: 8 consecutive GPRs
744// starting with an even one
745
746let Namespace = "AArch64" in {
747  foreach i = 0-7 in
748    def "x8sub_"#i : SubRegIndex<64, !mul(64, i)>;
749}
750
751def Tuples8X : RegisterTuples<
752  !foreach(i, [0,1,2,3,4,5,6,7], !cast<SubRegIndex>("x8sub_"#i)),
753  !foreach(i, [0,1,2,3,4,5,6,7], (trunc (decimate (rotl GPR64, i), 2), 12))>;
754
755def GPR64x8Class : RegisterClass<"AArch64", [i64x8], 512, (trunc Tuples8X, 12)> {
756  let Size = 512;
757}
758def GPR64x8AsmOp : AsmOperandClass {
759  let Name = "GPR64x8";
760  let ParserMethod = "tryParseGPR64x8";
761  let RenderMethod = "addRegOperands";
762}
763def GPR64x8 : RegisterOperand<GPR64x8Class, "printGPR64x8"> {
764  let ParserMatchClass = GPR64x8AsmOp;
765  let PrintMethod = "printGPR64x8";
766}
767
768//===----- END: v8.7a accelerator extension register operands -------------===//
769
770// SVE predicate registers
771def P0    : AArch64Reg<0,   "p0">, DwarfRegNum<[48]>;
772def P1    : AArch64Reg<1,   "p1">, DwarfRegNum<[49]>;
773def P2    : AArch64Reg<2,   "p2">, DwarfRegNum<[50]>;
774def P3    : AArch64Reg<3,   "p3">, DwarfRegNum<[51]>;
775def P4    : AArch64Reg<4,   "p4">, DwarfRegNum<[52]>;
776def P5    : AArch64Reg<5,   "p5">, DwarfRegNum<[53]>;
777def P6    : AArch64Reg<6,   "p6">, DwarfRegNum<[54]>;
778def P7    : AArch64Reg<7,   "p7">, DwarfRegNum<[55]>;
779def P8    : AArch64Reg<8,   "p8">, DwarfRegNum<[56]>;
780def P9    : AArch64Reg<9,   "p9">, DwarfRegNum<[57]>;
781def P10   : AArch64Reg<10, "p10">, DwarfRegNum<[58]>;
782def P11   : AArch64Reg<11, "p11">, DwarfRegNum<[59]>;
783def P12   : AArch64Reg<12, "p12">, DwarfRegNum<[60]>;
784def P13   : AArch64Reg<13, "p13">, DwarfRegNum<[61]>;
785def P14   : AArch64Reg<14, "p14">, DwarfRegNum<[62]>;
786def P15   : AArch64Reg<15, "p15">, DwarfRegNum<[63]>;
787
788// The part of SVE registers that don't overlap Neon registers.
789// These are only used as part of clobber lists.
790def Z0_HI    : AArch64Reg<0,   "z0_hi">;
791def Z1_HI    : AArch64Reg<1,   "z1_hi">;
792def Z2_HI    : AArch64Reg<2,   "z2_hi">;
793def Z3_HI    : AArch64Reg<3,   "z3_hi">;
794def Z4_HI    : AArch64Reg<4,   "z4_hi">;
795def Z5_HI    : AArch64Reg<5,   "z5_hi">;
796def Z6_HI    : AArch64Reg<6,   "z6_hi">;
797def Z7_HI    : AArch64Reg<7,   "z7_hi">;
798def Z8_HI    : AArch64Reg<8,   "z8_hi">;
799def Z9_HI    : AArch64Reg<9,   "z9_hi">;
800def Z10_HI   : AArch64Reg<10, "z10_hi">;
801def Z11_HI   : AArch64Reg<11, "z11_hi">;
802def Z12_HI   : AArch64Reg<12, "z12_hi">;
803def Z13_HI   : AArch64Reg<13, "z13_hi">;
804def Z14_HI   : AArch64Reg<14, "z14_hi">;
805def Z15_HI   : AArch64Reg<15, "z15_hi">;
806def Z16_HI   : AArch64Reg<16, "z16_hi">;
807def Z17_HI   : AArch64Reg<17, "z17_hi">;
808def Z18_HI   : AArch64Reg<18, "z18_hi">;
809def Z19_HI   : AArch64Reg<19, "z19_hi">;
810def Z20_HI   : AArch64Reg<20, "z20_hi">;
811def Z21_HI   : AArch64Reg<21, "z21_hi">;
812def Z22_HI   : AArch64Reg<22, "z22_hi">;
813def Z23_HI   : AArch64Reg<23, "z23_hi">;
814def Z24_HI   : AArch64Reg<24, "z24_hi">;
815def Z25_HI   : AArch64Reg<25, "z25_hi">;
816def Z26_HI   : AArch64Reg<26, "z26_hi">;
817def Z27_HI   : AArch64Reg<27, "z27_hi">;
818def Z28_HI   : AArch64Reg<28, "z28_hi">;
819def Z29_HI   : AArch64Reg<29, "z29_hi">;
820def Z30_HI   : AArch64Reg<30, "z30_hi">;
821def Z31_HI   : AArch64Reg<31, "z31_hi">;
822
823// SVE variable-size vector registers
824let SubRegIndices = [zsub,zsub_hi] in {
825def Z0    : AArch64Reg<0,   "z0",  [Q0,  Z0_HI]>, DwarfRegNum<[96]>;
826def Z1    : AArch64Reg<1,   "z1",  [Q1,  Z1_HI]>, DwarfRegNum<[97]>;
827def Z2    : AArch64Reg<2,   "z2",  [Q2,  Z2_HI]>, DwarfRegNum<[98]>;
828def Z3    : AArch64Reg<3,   "z3",  [Q3,  Z3_HI]>, DwarfRegNum<[99]>;
829def Z4    : AArch64Reg<4,   "z4",  [Q4,  Z4_HI]>, DwarfRegNum<[100]>;
830def Z5    : AArch64Reg<5,   "z5",  [Q5,  Z5_HI]>, DwarfRegNum<[101]>;
831def Z6    : AArch64Reg<6,   "z6",  [Q6,  Z6_HI]>, DwarfRegNum<[102]>;
832def Z7    : AArch64Reg<7,   "z7",  [Q7,  Z7_HI]>, DwarfRegNum<[103]>;
833def Z8    : AArch64Reg<8,   "z8",  [Q8,  Z8_HI]>, DwarfRegNum<[104]>;
834def Z9    : AArch64Reg<9,   "z9",  [Q9,  Z9_HI]>, DwarfRegNum<[105]>;
835def Z10   : AArch64Reg<10, "z10", [Q10, Z10_HI]>, DwarfRegNum<[106]>;
836def Z11   : AArch64Reg<11, "z11", [Q11, Z11_HI]>, DwarfRegNum<[107]>;
837def Z12   : AArch64Reg<12, "z12", [Q12, Z12_HI]>, DwarfRegNum<[108]>;
838def Z13   : AArch64Reg<13, "z13", [Q13, Z13_HI]>, DwarfRegNum<[109]>;
839def Z14   : AArch64Reg<14, "z14", [Q14, Z14_HI]>, DwarfRegNum<[110]>;
840def Z15   : AArch64Reg<15, "z15", [Q15, Z15_HI]>, DwarfRegNum<[111]>;
841def Z16   : AArch64Reg<16, "z16", [Q16, Z16_HI]>, DwarfRegNum<[112]>;
842def Z17   : AArch64Reg<17, "z17", [Q17, Z17_HI]>, DwarfRegNum<[113]>;
843def Z18   : AArch64Reg<18, "z18", [Q18, Z18_HI]>, DwarfRegNum<[114]>;
844def Z19   : AArch64Reg<19, "z19", [Q19, Z19_HI]>, DwarfRegNum<[115]>;
845def Z20   : AArch64Reg<20, "z20", [Q20, Z20_HI]>, DwarfRegNum<[116]>;
846def Z21   : AArch64Reg<21, "z21", [Q21, Z21_HI]>, DwarfRegNum<[117]>;
847def Z22   : AArch64Reg<22, "z22", [Q22, Z22_HI]>, DwarfRegNum<[118]>;
848def Z23   : AArch64Reg<23, "z23", [Q23, Z23_HI]>, DwarfRegNum<[119]>;
849def Z24   : AArch64Reg<24, "z24", [Q24, Z24_HI]>, DwarfRegNum<[120]>;
850def Z25   : AArch64Reg<25, "z25", [Q25, Z25_HI]>, DwarfRegNum<[121]>;
851def Z26   : AArch64Reg<26, "z26", [Q26, Z26_HI]>, DwarfRegNum<[122]>;
852def Z27   : AArch64Reg<27, "z27", [Q27, Z27_HI]>, DwarfRegNum<[123]>;
853def Z28   : AArch64Reg<28, "z28", [Q28, Z28_HI]>, DwarfRegNum<[124]>;
854def Z29   : AArch64Reg<29, "z29", [Q29, Z29_HI]>, DwarfRegNum<[125]>;
855def Z30   : AArch64Reg<30, "z30", [Q30, Z30_HI]>, DwarfRegNum<[126]>;
856def Z31   : AArch64Reg<31, "z31", [Q31, Z31_HI]>, DwarfRegNum<[127]>;
857}
858
859// Enum describing the element size for destructive
860// operations.
861class ElementSizeEnum<bits<3> val> {
862  bits<3> Value = val;
863}
864
865def ElementSizeNone : ElementSizeEnum<0>;
866def ElementSizeB    : ElementSizeEnum<1>;
867def ElementSizeH    : ElementSizeEnum<2>;
868def ElementSizeS    : ElementSizeEnum<3>;
869def ElementSizeD    : ElementSizeEnum<4>;
870def ElementSizeQ    : ElementSizeEnum<5>;  // Unused
871
872class SVERegOp <string Suffix, AsmOperandClass C,
873                ElementSizeEnum Size,
874                RegisterClass RC> : RegisterOperand<RC> {
875  ElementSizeEnum ElementSize;
876
877  let ElementSize = Size;
878  let PrintMethod = !if(!eq(Suffix, ""),
879                        "printSVERegOp<>",
880                        "printSVERegOp<'" # Suffix # "'>");
881  let ParserMatchClass = C;
882}
883
884class PPRRegOp <string Suffix, AsmOperandClass C, ElementSizeEnum Size,
885                RegisterClass RC> : SVERegOp<Suffix, C, Size, RC> {}
886class ZPRRegOp <string Suffix, AsmOperandClass C, ElementSizeEnum Size,
887                RegisterClass RC> : SVERegOp<Suffix, C, Size, RC> {}
888
889//******************************************************************************
890
891// SVE predicate register classes.
892class PPRClass<int firstreg, int lastreg> : RegisterClass<
893                                  "AArch64",
894                                  [ nxv16i1, nxv8i1, nxv4i1, nxv2i1, nxv1i1, aarch64svcount ], 16,
895                                  (sequence "P%u", firstreg, lastreg)> {
896  let Size = 16;
897}
898
899def PPR    : PPRClass<0, 15>;
900def PPR_3b : PPRClass<0, 7>; // Restricted 3 bit SVE predicate register class.
901def PPR_p8to15 : PPRClass<8, 15>;
902
903class PPRAsmOperand <string name, string RegClass, int Width>: AsmOperandClass {
904  let Name = "SVE" # name # "Reg";
905  let PredicateMethod = "isSVEPredicateVectorRegOfWidth<"
906                            # Width # ", " # "AArch64::" # RegClass # "RegClassID>";
907  let DiagnosticType = "InvalidSVE" # name # "Reg";
908  let RenderMethod = "addRegOperands";
909  let ParserMethod = "tryParseSVEPredicateVector<RegKind::SVEPredicateVector>";
910}
911
912def PPRAsmOpAny : PPRAsmOperand<"PredicateAny", "PPR",  0>;
913def PPRAsmOp8   : PPRAsmOperand<"PredicateB",   "PPR",  8>;
914def PPRAsmOp16  : PPRAsmOperand<"PredicateH",   "PPR", 16>;
915def PPRAsmOp32  : PPRAsmOperand<"PredicateS",   "PPR", 32>;
916def PPRAsmOp64  : PPRAsmOperand<"PredicateD",   "PPR", 64>;
917
918def PPRAny : PPRRegOp<"",  PPRAsmOpAny, ElementSizeNone, PPR>;
919def PPR8   : PPRRegOp<"b", PPRAsmOp8,   ElementSizeB,  PPR>;
920def PPR16  : PPRRegOp<"h", PPRAsmOp16,  ElementSizeH,  PPR>;
921def PPR32  : PPRRegOp<"s", PPRAsmOp32,  ElementSizeS,  PPR>;
922def PPR64  : PPRRegOp<"d", PPRAsmOp64,  ElementSizeD,  PPR>;
923
924def PPRAsmOp3bAny : PPRAsmOperand<"Predicate3bAny", "PPR_3b",  0>;
925
926def PPR3bAny : PPRRegOp<"",  PPRAsmOp3bAny, ElementSizeNone, PPR_3b>;
927
928
929// SVE predicate-as-counter operand
930class PNRAsmOperand<string name, string RegClass, int Width>
931    : PPRAsmOperand<name, RegClass, Width> {
932  let PredicateMethod = "isSVEPredicateAsCounterRegOfWidth<"
933                            # Width # ", " # "AArch64::"
934                            # RegClass # "RegClassID>";
935  let DiagnosticType = "InvalidSVE" # name # "Reg";
936  let ParserMethod   = "tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>";
937}
938
939class PNRRegOp<string Suffix, AsmOperandClass C, int EltSize, RegisterClass RC>
940    : PPRRegOp<Suffix, C, ElementSizeNone, RC> {
941  let PrintMethod = "printPredicateAsCounter<" # EltSize # ">";
942}
943
944def PNRAsmOpAny: PNRAsmOperand<"PNPredicateAny", "PPR", 0>;
945def PNRAsmOp8  : PNRAsmOperand<"PNPredicateB", "PPR", 8>;
946def PNRAsmOp16 : PNRAsmOperand<"PNPredicateH", "PPR", 16>;
947def PNRAsmOp32 : PNRAsmOperand<"PNPredicateS", "PPR", 32>;
948def PNRAsmOp64 : PNRAsmOperand<"PNPredicateD", "PPR", 64>;
949
950def PNRAny : PNRRegOp<"", PNRAsmOpAny, 0,  PPR>;
951def PNR8   : PNRRegOp<"b", PNRAsmOp8,  8,  PPR>;
952def PNR16  : PNRRegOp<"h", PNRAsmOp16, 16, PPR>;
953def PNR32  : PNRRegOp<"s", PNRAsmOp32, 32, PPR>;
954def PNR64  : PNRRegOp<"d", PNRAsmOp64, 64, PPR>;
955
956class PNRP8to15RegOp<string Suffix, AsmOperandClass C, int EltSize, RegisterClass RC>
957    : PPRRegOp<Suffix, C, ElementSizeNone, RC> {
958  let PrintMethod   = "printPredicateAsCounter<" # EltSize # ">";
959  let EncoderMethod = "EncodePPR_p8to15";
960  let DecoderMethod = "DecodePPR_p8to15RegisterClass";
961}
962
963def PNRAsmAny_p8to15  : PNRAsmOperand<"PNPredicateAny_p8to15", "PPR_p8to15", 0>;
964def PNRAsmOp8_p8to15  : PNRAsmOperand<"PNPredicateB_p8to15",   "PPR_p8to15", 8>;
965def PNRAsmOp16_p8to15 : PNRAsmOperand<"PNPredicateH_p8to15",   "PPR_p8to15", 16>;
966def PNRAsmOp32_p8to15 : PNRAsmOperand<"PNPredicateS_p8to15",   "PPR_p8to15", 32>;
967def PNRAsmOp64_p8to15 : PNRAsmOperand<"PNPredicateD_p8to15",   "PPR_p8to15", 64>;
968
969def PNRAny_p8to15 : PNRP8to15RegOp<"",  PNRAsmAny_p8to15,  0,  PPR_p8to15>;
970def PNR8_p8to15   : PNRP8to15RegOp<"b", PNRAsmOp8_p8to15,  8,  PPR_p8to15>;
971def PNR16_p8to15  : PNRP8to15RegOp<"h", PNRAsmOp16_p8to15, 16, PPR_p8to15>;
972def PNR32_p8to15  : PNRP8to15RegOp<"s", PNRAsmOp32_p8to15, 32, PPR_p8to15>;
973def PNR64_p8to15  : PNRP8to15RegOp<"d", PNRAsmOp64_p8to15, 64, PPR_p8to15>;
974
975
976let Namespace = "AArch64" in {
977  def psub0 : SubRegIndex<16, -1>;
978  def psub1 : SubRegIndex<16, -1>;
979}
980
981// Pairs of SVE predicate vector registers.
982def PSeqPairs : RegisterTuples<[psub0, psub1], [(rotl PPR, 0), (rotl PPR, 1)]>;
983
984def PPR2 : RegisterClass<"AArch64", [untyped], 16, (add PSeqPairs)> {
985  let Size = 32;
986}
987
988class PPRVectorList<int ElementWidth, int NumRegs> : AsmOperandClass {
989  let Name = "SVEPredicateList" # NumRegs # "x" # ElementWidth;
990  let ParserMethod = "tryParseVectorList<RegKind::SVEPredicateVector>";
991  let PredicateMethod = "isTypedVectorList<RegKind::SVEPredicateVector, "
992                            # NumRegs #", 0, "#ElementWidth #">";
993  let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_PReg, "
994                         # NumRegs #">";
995}
996
997def PP_b : RegisterOperand<PPR2, "printTypedVectorList<0,'b'>"> {
998  let ParserMatchClass = PPRVectorList<8, 2>;
999}
1000
1001def PP_h : RegisterOperand<PPR2, "printTypedVectorList<0,'h'>"> {
1002  let ParserMatchClass = PPRVectorList<16, 2>;
1003}
1004
1005def PP_s : RegisterOperand<PPR2, "printTypedVectorList<0,'s'>"> {
1006  let ParserMatchClass = PPRVectorList<32, 2>;
1007}
1008
1009def PP_d : RegisterOperand<PPR2, "printTypedVectorList<0,'d'>"> {
1010  let ParserMatchClass = PPRVectorList<64, 2>;
1011}
1012
1013// SVE2 multiple-of-2 multi-predicate-vector operands
1014def PPR2Mul2 : RegisterClass<"AArch64", [untyped], 16, (add (decimate PSeqPairs, 2))> {
1015  let Size = 32;
1016}
1017
1018class PPRVectorListMul<int ElementWidth, int NumRegs> : PPRVectorList<ElementWidth, NumRegs> {
1019  let Name = "SVEPredicateListMul" # NumRegs # "x" # ElementWidth;
1020  let DiagnosticType = "Invalid" # Name;
1021  let PredicateMethod =
1022      "isTypedVectorListMultiple<RegKind::SVEPredicateVector, " # NumRegs # ", 0, "
1023                                                                # ElementWidth # ">";
1024}
1025
1026let EncoderMethod = "EncodeRegAsMultipleOf<2>",
1027    DecoderMethod = "DecodePPR2Mul2RegisterClass" in {
1028  def PP_b_mul_r : RegisterOperand<PPR2Mul2, "printTypedVectorList<0,'b'>"> {
1029    let ParserMatchClass = PPRVectorListMul<8, 2>;
1030  }
1031
1032  def PP_h_mul_r : RegisterOperand<PPR2Mul2, "printTypedVectorList<0,'h'>"> {
1033    let ParserMatchClass = PPRVectorListMul<16, 2>;
1034  }
1035
1036  def PP_s_mul_r : RegisterOperand<PPR2Mul2, "printTypedVectorList<0,'s'>"> {
1037    let ParserMatchClass = PPRVectorListMul<32, 2>;
1038  }
1039
1040  def PP_d_mul_r : RegisterOperand<PPR2Mul2, "printTypedVectorList<0,'d'>"> {
1041    let ParserMatchClass = PPRVectorListMul<64, 2>;
1042  }
1043}  // end let EncoderMethod/DecoderMethod
1044
1045
1046//******************************************************************************
1047
1048// SVE vector register classes
1049class ZPRClass<int lastreg> : RegisterClass<"AArch64",
1050                                            [nxv16i8, nxv8i16, nxv4i32, nxv2i64,
1051                                             nxv2f16, nxv4f16, nxv8f16,
1052                                             nxv2bf16, nxv4bf16, nxv8bf16,
1053                                             nxv2f32, nxv4f32,
1054                                             nxv2f64],
1055                                            128, (sequence "Z%u", 0, lastreg)> {
1056  let Size = 128;
1057}
1058
1059def ZPR    : ZPRClass<31>;
1060def ZPR_4b : ZPRClass<15>; // Restricted 4 bit SVE vector register class.
1061def ZPR_3b : ZPRClass<7>;  // Restricted 3 bit SVE vector register class.
1062
1063class ZPRAsmOperand<string name, int Width, string RegClassSuffix = "">
1064    : AsmOperandClass {
1065  let Name = "SVE" # name # "Reg";
1066  let PredicateMethod = "isSVEDataVectorRegOfWidth<"
1067                            # Width # ", AArch64::ZPR"
1068                            # RegClassSuffix # "RegClassID>";
1069  let RenderMethod = "addRegOperands";
1070  let DiagnosticType = "InvalidZPR" # RegClassSuffix # Width;
1071  let ParserMethod = "tryParseSVEDataVector<false, "
1072                               # !if(!eq(Width, 0), "false", "true") # ">";
1073}
1074
1075def ZPRAsmOpAny : ZPRAsmOperand<"VectorAny", 0>;
1076def ZPRAsmOp8   : ZPRAsmOperand<"VectorB",   8>;
1077def ZPRAsmOp16  : ZPRAsmOperand<"VectorH",   16>;
1078def ZPRAsmOp32  : ZPRAsmOperand<"VectorS",   32>;
1079def ZPRAsmOp64  : ZPRAsmOperand<"VectorD",   64>;
1080def ZPRAsmOp128 : ZPRAsmOperand<"VectorQ",   128>;
1081
1082def ZPRAny  : ZPRRegOp<"",  ZPRAsmOpAny, ElementSizeNone, ZPR>;
1083def ZPR8    : ZPRRegOp<"b", ZPRAsmOp8,   ElementSizeB, ZPR>;
1084def ZPR16   : ZPRRegOp<"h", ZPRAsmOp16,  ElementSizeH, ZPR>;
1085def ZPR32   : ZPRRegOp<"s", ZPRAsmOp32,  ElementSizeS, ZPR>;
1086def ZPR64   : ZPRRegOp<"d", ZPRAsmOp64,  ElementSizeD, ZPR>;
1087def ZPR128  : ZPRRegOp<"q", ZPRAsmOp128, ElementSizeQ, ZPR>;
1088
1089def ZPRAsmOp3b8   : ZPRAsmOperand<"Vector3bB", 8, "_3b">;
1090def ZPRAsmOp3b16  : ZPRAsmOperand<"Vector3bH", 16, "_3b">;
1091def ZPRAsmOp3b32  : ZPRAsmOperand<"Vector3bS", 32, "_3b">;
1092
1093def ZPR3b8  : ZPRRegOp<"b", ZPRAsmOp3b8,  ElementSizeB, ZPR_3b>;
1094def ZPR3b16 : ZPRRegOp<"h", ZPRAsmOp3b16, ElementSizeH, ZPR_3b>;
1095def ZPR3b32 : ZPRRegOp<"s", ZPRAsmOp3b32, ElementSizeS, ZPR_3b>;
1096
1097def ZPRAsmOp4b8   : ZPRAsmOperand<"Vector4bB", 8,  "_4b">;
1098def ZPRAsmOp4b16  : ZPRAsmOperand<"Vector4bH", 16, "_4b">;
1099def ZPRAsmOp4b32  : ZPRAsmOperand<"Vector4bS", 32, "_4b">;
1100def ZPRAsmOp4b64  : ZPRAsmOperand<"Vector4bD", 64, "_4b">;
1101
1102def ZPR4b8  : ZPRRegOp<"b", ZPRAsmOp4b8,  ElementSizeB, ZPR_4b>;
1103def ZPR4b16 : ZPRRegOp<"h", ZPRAsmOp4b16, ElementSizeH, ZPR_4b>;
1104def ZPR4b32 : ZPRRegOp<"s", ZPRAsmOp4b32, ElementSizeS, ZPR_4b>;
1105def ZPR4b64 : ZPRRegOp<"d", ZPRAsmOp4b64, ElementSizeD, ZPR_4b>;
1106
1107class FPRasZPR<int Width> : AsmOperandClass{
1108  let Name = "FPR" # Width # "asZPR";
1109  let PredicateMethod = "isFPRasZPR<AArch64::FPR" # Width # "RegClassID>";
1110  let RenderMethod = "addFPRasZPRRegOperands<" # Width # ">";
1111}
1112
1113class FPRasZPROperand<int Width> : RegisterOperand<ZPR> {
1114  let ParserMatchClass = FPRasZPR<Width>;
1115  let PrintMethod = "printZPRasFPR<" # Width # ">";
1116}
1117
1118def FPR8asZPR   : FPRasZPROperand<8>;
1119def FPR16asZPR  : FPRasZPROperand<16>;
1120def FPR32asZPR  : FPRasZPROperand<32>;
1121def FPR64asZPR  : FPRasZPROperand<64>;
1122def FPR128asZPR : FPRasZPROperand<128>;
1123
1124let Namespace = "AArch64" in {
1125  def zsub0 : SubRegIndex<128, -1>;
1126  def zsub1 : SubRegIndex<128, -1>;
1127  def zsub2 : SubRegIndex<128, -1>;
1128  def zsub3 : SubRegIndex<128, -1>;
1129}
1130
1131// Pairs, triples, and quads of SVE vector registers.
1132def ZSeqPairs   : RegisterTuples<[zsub0, zsub1], [(rotl ZPR, 0), (rotl ZPR, 1)]>;
1133def ZSeqTriples : RegisterTuples<[zsub0, zsub1, zsub2], [(rotl ZPR, 0), (rotl ZPR, 1), (rotl ZPR, 2)]>;
1134def ZSeqQuads   : RegisterTuples<[zsub0, zsub1, zsub2, zsub3], [(rotl ZPR, 0), (rotl ZPR, 1), (rotl ZPR, 2), (rotl ZPR, 3)]>;
1135
1136def ZPR2   : RegisterClass<"AArch64", [untyped], 128, (add ZSeqPairs)>  {
1137  let Size = 256;
1138}
1139def ZPR3  : RegisterClass<"AArch64", [untyped], 128, (add ZSeqTriples)> {
1140  let Size = 384;
1141}
1142def ZPR4 : RegisterClass<"AArch64", [untyped], 128, (add ZSeqQuads)> {
1143  let Size = 512;
1144}
1145
1146class ZPRVectorList<int ElementWidth, int NumRegs> : AsmOperandClass {
1147  let Name = "SVEVectorList" # NumRegs # ElementWidth;
1148  let ParserMethod = "tryParseVectorList<RegKind::SVEDataVector>";
1149  let PredicateMethod =
1150      "isTypedVectorList<RegKind::SVEDataVector, " #NumRegs #", 0, " #ElementWidth #">";
1151  let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_ZReg, " # NumRegs # ">";
1152}
1153
1154def Z_b  : RegisterOperand<ZPR,  "printTypedVectorList<0,'b'>"> {
1155  let ParserMatchClass = ZPRVectorList<8, 1>;
1156}
1157
1158def Z_h  : RegisterOperand<ZPR,  "printTypedVectorList<0,'h'>"> {
1159  let ParserMatchClass = ZPRVectorList<16, 1>;
1160}
1161
1162def Z_s  : RegisterOperand<ZPR,  "printTypedVectorList<0,'s'>"> {
1163  let ParserMatchClass = ZPRVectorList<32, 1>;
1164}
1165
1166def Z_d  : RegisterOperand<ZPR,  "printTypedVectorList<0,'d'>"> {
1167  let ParserMatchClass = ZPRVectorList<64, 1>;
1168}
1169
1170def Z_q  : RegisterOperand<ZPR,  "printTypedVectorList<0,'q'>"> {
1171  let ParserMatchClass = ZPRVectorList<128, 1>;
1172}
1173
1174def ZZ_b  : RegisterOperand<ZPR2, "printTypedVectorList<0,'b'>"> {
1175  let ParserMatchClass = ZPRVectorList<8, 2>;
1176}
1177
1178def ZZ_h  : RegisterOperand<ZPR2, "printTypedVectorList<0,'h'>"> {
1179  let ParserMatchClass = ZPRVectorList<16, 2>;
1180}
1181
1182def ZZ_s  : RegisterOperand<ZPR2, "printTypedVectorList<0,'s'>"> {
1183  let ParserMatchClass = ZPRVectorList<32, 2>;
1184}
1185
1186def ZZ_d  : RegisterOperand<ZPR2, "printTypedVectorList<0,'d'>"> {
1187  let ParserMatchClass = ZPRVectorList<64, 2>;
1188}
1189
1190def ZZ_q  : RegisterOperand<ZPR2, "printTypedVectorList<0,'q'>"> {
1191  let ParserMatchClass = ZPRVectorList<128, 2>;
1192}
1193
1194def ZZZ_b  : RegisterOperand<ZPR3, "printTypedVectorList<0,'b'>"> {
1195  let ParserMatchClass = ZPRVectorList<8, 3>;
1196}
1197
1198def ZZZ_h  : RegisterOperand<ZPR3, "printTypedVectorList<0,'h'>"> {
1199  let ParserMatchClass = ZPRVectorList<16, 3>;
1200}
1201
1202def ZZZ_s  : RegisterOperand<ZPR3, "printTypedVectorList<0,'s'>"> {
1203  let ParserMatchClass = ZPRVectorList<32, 3>;
1204}
1205
1206def ZZZ_d  : RegisterOperand<ZPR3, "printTypedVectorList<0,'d'>"> {
1207  let ParserMatchClass = ZPRVectorList<64, 3>;
1208}
1209
1210def ZZZ_q  : RegisterOperand<ZPR3, "printTypedVectorList<0,'q'>"> {
1211  let ParserMatchClass = ZPRVectorList<128, 3>;
1212}
1213
1214def ZZZZ_b : RegisterOperand<ZPR4, "printTypedVectorList<0,'b'>"> {
1215  let ParserMatchClass = ZPRVectorList<8, 4>;
1216}
1217
1218def ZZZZ_h : RegisterOperand<ZPR4, "printTypedVectorList<0,'h'>"> {
1219  let ParserMatchClass = ZPRVectorList<16, 4>;
1220}
1221
1222def ZZZZ_s : RegisterOperand<ZPR4, "printTypedVectorList<0,'s'>"> {
1223  let ParserMatchClass = ZPRVectorList<32, 4>;
1224}
1225
1226def ZZZZ_d : RegisterOperand<ZPR4, "printTypedVectorList<0,'d'>"> {
1227  let ParserMatchClass = ZPRVectorList<64, 4>;
1228}
1229
1230def ZZZZ_q : RegisterOperand<ZPR4, "printTypedVectorList<0,'q'>"> {
1231  let ParserMatchClass = ZPRVectorList<128, 4>;
1232}
1233
1234// SME2 multiple-of-2 or 4 multi-vector operands
1235def ZPR2Mul2 : RegisterClass<"AArch64", [untyped], 128, (add (decimate ZSeqPairs, 2))> {
1236  let Size = 256;
1237}
1238
1239def ZPR4Mul4 : RegisterClass<"AArch64", [untyped], 128, (add (decimate ZSeqQuads, 4))> {
1240  let Size = 512;
1241}
1242
1243class ZPRVectorListMul<int ElementWidth, int NumRegs> : ZPRVectorList<ElementWidth, NumRegs> {
1244  let Name = "SVEVectorListMul" # NumRegs # "x" # ElementWidth;
1245  let DiagnosticType = "Invalid" # Name;
1246  let PredicateMethod =
1247      "isTypedVectorListMultiple<RegKind::SVEDataVector, " # NumRegs # ", 0, "
1248                                                           # ElementWidth # ">";
1249}
1250
1251let EncoderMethod = "EncodeRegAsMultipleOf<2>",
1252    DecoderMethod = "DecodeZPR2Mul2RegisterClass" in {
1253  def ZZ_b_mul_r : RegisterOperand<ZPR2Mul2, "printTypedVectorList<0,'b'>"> {
1254    let ParserMatchClass = ZPRVectorListMul<8, 2>;
1255  }
1256
1257  def ZZ_h_mul_r : RegisterOperand<ZPR2Mul2, "printTypedVectorList<0,'h'>"> {
1258    let ParserMatchClass = ZPRVectorListMul<16, 2>;
1259  }
1260
1261  def ZZ_s_mul_r : RegisterOperand<ZPR2Mul2, "printTypedVectorList<0,'s'>"> {
1262    let ParserMatchClass = ZPRVectorListMul<32, 2>;
1263  }
1264
1265  def ZZ_d_mul_r : RegisterOperand<ZPR2Mul2, "printTypedVectorList<0,'d'>"> {
1266    let ParserMatchClass = ZPRVectorListMul<64, 2>;
1267  }
1268
1269  def ZZ_q_mul_r : RegisterOperand<ZPR2Mul2, "printTypedVectorList<0,'q'>"> {
1270    let ParserMatchClass = ZPRVectorListMul<128, 2>;
1271  }
1272} // end let EncoderMethod/DecoderMethod
1273
1274let EncoderMethod = "EncodeRegAsMultipleOf<4>",
1275    DecoderMethod = "DecodeZPR4Mul4RegisterClass" in {
1276  def ZZZZ_b_mul_r : RegisterOperand<ZPR4Mul4, "printTypedVectorList<0,'b'>"> {
1277    let ParserMatchClass = ZPRVectorListMul<8, 4>;
1278  }
1279
1280  def ZZZZ_h_mul_r : RegisterOperand<ZPR4Mul4, "printTypedVectorList<0,'h'>"> {
1281    let ParserMatchClass = ZPRVectorListMul<16, 4>;
1282  }
1283
1284  def ZZZZ_s_mul_r : RegisterOperand<ZPR4Mul4, "printTypedVectorList<0,'s'>"> {
1285    let ParserMatchClass = ZPRVectorListMul<32, 4>;
1286  }
1287
1288  def ZZZZ_d_mul_r : RegisterOperand<ZPR4Mul4, "printTypedVectorList<0,'d'>"> {
1289    let ParserMatchClass = ZPRVectorListMul<64, 4>;
1290  }
1291
1292  def ZZZZ_q_mul_r : RegisterOperand<ZPR4Mul4, "printTypedVectorList<0,'q'>"> {
1293    let ParserMatchClass = ZPRVectorListMul<128, 4>;
1294  }
1295} // end let EncoderMethod/DecoderMethod
1296
1297// SME2 strided multi-vector operands
1298
1299// ZStridedPairs
1300//
1301// A group of two Z vectors with strided numbering consisting of:
1302//   Zn+0.T and Zn+8.T
1303// where n is in the range 0 to 7 and 16 to 23 inclusive, and T is one of B, H,
1304// S, or D.
1305
1306// Z0_Z8, Z1_Z9, Z2_Z10, Z3_Z11, Z4_Z12, Z5_Z13, Z6_Z14, Z7_Z15
1307def ZStridedPairsLo : RegisterTuples<[zsub0, zsub1], [
1308  (trunc (rotl ZPR, 0), 8), (trunc (rotl ZPR, 8), 8)
1309]>;
1310
1311// Z16_Z24, Z17_Z25, Z18_Z26, Z19_Z27, Z20_Z28, Z21_Z29, Z22_Z30, Z23_Z31
1312def ZStridedPairsHi : RegisterTuples<[zsub0, zsub1], [
1313  (trunc (rotl ZPR, 16), 8), (trunc (rotl ZPR, 24), 8)
1314]>;
1315
1316// ZStridedQuads
1317//
1318// A group of four Z vectors with strided numbering consisting of:
1319//   Zn+0.T, Zn+4.T, Zn+8.T and Zn+12.T
1320// where n is in the range 0 to 3 and 16 to 19 inclusive, and T is one of B, H,
1321// S, or D.
1322
1323// Z0_Z4_Z8_Z12, Z1_Z5_Z9_Z13, Z2_Z6_Z10_Z14, Z3_Z7_Z11_Z15
1324def ZStridedQuadsLo : RegisterTuples<[zsub0, zsub1, zsub2, zsub3], [
1325  (trunc (rotl ZPR, 0), 4), (trunc (rotl ZPR, 4), 4),
1326  (trunc (rotl ZPR, 8), 4), (trunc (rotl ZPR, 12), 4)
1327]>;
1328// Z16_Z20_Z24_Z28, Z17_Z21_Z25_Z29, Z18_Z22_Z26_Z30, Z19_Z23_Z27_Z31
1329def ZStridedQuadsHi : RegisterTuples<[zsub0, zsub1, zsub2, zsub3], [
1330  (trunc (rotl ZPR, 16), 4), (trunc (rotl ZPR, 20), 4),
1331  (trunc (rotl ZPR, 24), 4), (trunc (rotl ZPR, 28), 4)
1332]>;
1333
1334def ZPR2Strided : RegisterClass<"AArch64", [untyped], 256,
1335                                (add ZStridedPairsLo, ZStridedPairsHi)>  {
1336  let Size = 256;
1337}
1338def ZPR4Strided : RegisterClass<"AArch64", [untyped], 512,
1339                                (add ZStridedQuadsLo, ZStridedQuadsHi)>  {
1340  let Size = 512;
1341}
1342
1343
1344class ZPRVectorListStrided<int ElementWidth, int NumRegs, int Stride>
1345    : ZPRVectorList<ElementWidth, NumRegs> {
1346  let Name = "SVEVectorListStrided" # NumRegs # "x" # ElementWidth;
1347  let DiagnosticType = "Invalid" # Name;
1348  let PredicateMethod = "isTypedVectorListStrided<RegKind::SVEDataVector, "
1349                        # NumRegs # "," # Stride # "," # ElementWidth # ">";
1350  let RenderMethod = "addStridedVectorListOperands<" # NumRegs # ">";
1351}
1352
1353let EncoderMethod = "EncodeZPR2StridedRegisterClass",
1354    DecoderMethod = "DecodeZPR2StridedRegisterClass" in {
1355  def ZZ_b_strided
1356      : RegisterOperand<ZPR2Strided, "printTypedVectorList<0, 'b'>"> {
1357    let ParserMatchClass = ZPRVectorListStrided<8, 2, 8>;
1358  }
1359
1360  def ZZ_h_strided
1361      : RegisterOperand<ZPR2Strided, "printTypedVectorList<0, 'h'>"> {
1362    let ParserMatchClass = ZPRVectorListStrided<16, 2, 8>;
1363  }
1364
1365  def ZZ_s_strided
1366      : RegisterOperand<ZPR2Strided, "printTypedVectorList<0,'s'>"> {
1367    let ParserMatchClass = ZPRVectorListStrided<32, 2, 8>;
1368  }
1369
1370  def ZZ_d_strided
1371      : RegisterOperand<ZPR2Strided, "printTypedVectorList<0,'d'>"> {
1372    let ParserMatchClass = ZPRVectorListStrided<64, 2, 8>;
1373  }
1374}
1375
1376let EncoderMethod = "EncodeZPR4StridedRegisterClass",
1377    DecoderMethod = "DecodeZPR4StridedRegisterClass" in {
1378  def ZZZZ_b_strided
1379      : RegisterOperand<ZPR4Strided, "printTypedVectorList<0,'b'>"> {
1380    let ParserMatchClass = ZPRVectorListStrided<8, 4, 4>;
1381  }
1382
1383  def ZZZZ_h_strided
1384      : RegisterOperand<ZPR4Strided, "printTypedVectorList<0,'h'>"> {
1385    let ParserMatchClass = ZPRVectorListStrided<16, 4, 4>;
1386  }
1387
1388  def ZZZZ_s_strided
1389      : RegisterOperand<ZPR4Strided, "printTypedVectorList<0,'s'>"> {
1390    let ParserMatchClass = ZPRVectorListStrided<32, 4, 4>;
1391  }
1392
1393  def ZZZZ_d_strided
1394      : RegisterOperand<ZPR4Strided, "printTypedVectorList<0,'d'>"> {
1395    let ParserMatchClass = ZPRVectorListStrided<64, 4, 4>;
1396  }
1397}
1398
1399class ZPRExtendAsmOperand<string ShiftExtend, int RegWidth, int Scale,
1400                          bit ScaleAlwaysSame = 0b0> : AsmOperandClass {
1401  let Name = "ZPRExtend" # ShiftExtend # RegWidth # Scale
1402                         # !if(ScaleAlwaysSame, "Only", "");
1403
1404  let PredicateMethod = "isSVEDataVectorRegWithShiftExtend<"
1405                          # RegWidth # ", AArch64::ZPRRegClassID, "
1406                          # "AArch64_AM::" # ShiftExtend # ", "
1407                          # Scale # ", "
1408                          # !if(ScaleAlwaysSame, "true", "false")
1409                          # ">";
1410  let DiagnosticType = "InvalidZPR" # RegWidth # ShiftExtend # Scale;
1411  let RenderMethod = "addRegOperands";
1412  let ParserMethod = "tryParseSVEDataVector<true, true>";
1413}
1414
1415class ZPRExtendRegisterOperand<bit SignExtend, bit IsLSL, string Repr,
1416                               int RegWidth, int Scale, string Suffix = "">
1417    : RegisterOperand<ZPR> {
1418  let ParserMatchClass =
1419    !cast<AsmOperandClass>("ZPR" # RegWidth # "AsmOpndExt" # Repr # Scale # Suffix);
1420  let PrintMethod = "printRegWithShiftExtend<"
1421                          # !if(SignExtend, "true", "false") # ", "
1422                          # Scale # ", "
1423                          # !if(IsLSL, "'x'", "'w'") # ", "
1424                          # !if(!eq(RegWidth, 32), "'s'", "'d'") # ">";
1425}
1426
1427foreach RegWidth = [32, 64] in {
1428  // UXTW(8|16|32|64)
1429  def ZPR#RegWidth#AsmOpndExtUXTW8Only : ZPRExtendAsmOperand<"UXTW", RegWidth, 8, 0b1>;
1430  def ZPR#RegWidth#AsmOpndExtUXTW8     : ZPRExtendAsmOperand<"UXTW", RegWidth, 8>;
1431  def ZPR#RegWidth#AsmOpndExtUXTW16    : ZPRExtendAsmOperand<"UXTW", RegWidth, 16>;
1432  def ZPR#RegWidth#AsmOpndExtUXTW32    : ZPRExtendAsmOperand<"UXTW", RegWidth, 32>;
1433  def ZPR#RegWidth#AsmOpndExtUXTW64    : ZPRExtendAsmOperand<"UXTW", RegWidth, 64>;
1434
1435  def ZPR#RegWidth#ExtUXTW8Only        : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 8, "Only">;
1436  def ZPR#RegWidth#ExtUXTW8            : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 8>;
1437  def ZPR#RegWidth#ExtUXTW16           : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 16>;
1438  def ZPR#RegWidth#ExtUXTW32           : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 32>;
1439  def ZPR#RegWidth#ExtUXTW64           : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 64>;
1440
1441  // SXTW(8|16|32|64)
1442  def ZPR#RegWidth#AsmOpndExtSXTW8Only : ZPRExtendAsmOperand<"SXTW", RegWidth, 8, 0b1>;
1443  def ZPR#RegWidth#AsmOpndExtSXTW8     : ZPRExtendAsmOperand<"SXTW", RegWidth, 8>;
1444  def ZPR#RegWidth#AsmOpndExtSXTW16    : ZPRExtendAsmOperand<"SXTW", RegWidth, 16>;
1445  def ZPR#RegWidth#AsmOpndExtSXTW32    : ZPRExtendAsmOperand<"SXTW", RegWidth, 32>;
1446  def ZPR#RegWidth#AsmOpndExtSXTW64    : ZPRExtendAsmOperand<"SXTW", RegWidth, 64>;
1447
1448  def ZPR#RegWidth#ExtSXTW8Only        : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 8, "Only">;
1449  def ZPR#RegWidth#ExtSXTW8            : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 8>;
1450  def ZPR#RegWidth#ExtSXTW16           : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 16>;
1451  def ZPR#RegWidth#ExtSXTW32           : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 32>;
1452  def ZPR#RegWidth#ExtSXTW64           : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 64>;
1453
1454  // LSL(8|16|32|64)
1455  def ZPR#RegWidth#AsmOpndExtLSL8      : ZPRExtendAsmOperand<"LSL", RegWidth, 8>;
1456  def ZPR#RegWidth#AsmOpndExtLSL16     : ZPRExtendAsmOperand<"LSL", RegWidth, 16>;
1457  def ZPR#RegWidth#AsmOpndExtLSL32     : ZPRExtendAsmOperand<"LSL", RegWidth, 32>;
1458  def ZPR#RegWidth#AsmOpndExtLSL64     : ZPRExtendAsmOperand<"LSL", RegWidth, 64>;
1459  def ZPR#RegWidth#ExtLSL8             : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 8>;
1460  def ZPR#RegWidth#ExtLSL16            : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 16>;
1461  def ZPR#RegWidth#ExtLSL32            : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 32>;
1462  def ZPR#RegWidth#ExtLSL64            : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 64>;
1463}
1464
1465class GPR64ShiftExtendAsmOperand <string AsmOperandName, int Scale, string RegClass> : AsmOperandClass {
1466  let Name = AsmOperandName # Scale;
1467  let PredicateMethod = "isGPR64WithShiftExtend<AArch64::"#RegClass#"RegClassID, " # Scale # ">";
1468  let DiagnosticType = "Invalid" # AsmOperandName # Scale;
1469  let RenderMethod = "addRegOperands";
1470  let ParserMethod = "tryParseGPROperand<true>";
1471}
1472
1473class GPR64ExtendRegisterOperand<string Name, int Scale, RegisterClass RegClass> : RegisterOperand<RegClass>{
1474  let ParserMatchClass = !cast<AsmOperandClass>(Name);
1475  let PrintMethod = "printRegWithShiftExtend<false, " # Scale # ", 'x', 0>";
1476}
1477
1478foreach Scale = [8, 16, 32, 64, 128] in {
1479  def GPR64shiftedAsmOpnd # Scale : GPR64ShiftExtendAsmOperand<"GPR64shifted", Scale, "GPR64">;
1480  def GPR64shifted # Scale : GPR64ExtendRegisterOperand<"GPR64shiftedAsmOpnd" # Scale, Scale, GPR64>;
1481
1482  def GPR64NoXZRshiftedAsmOpnd # Scale : GPR64ShiftExtendAsmOperand<"GPR64NoXZRshifted", Scale, "GPR64common">;
1483  def GPR64NoXZRshifted # Scale : GPR64ExtendRegisterOperand<"GPR64NoXZRshiftedAsmOpnd" # Scale, Scale, GPR64common>;
1484}
1485
1486// Accumulator array tiles.
1487def ZAQ0  : AArch64Reg<0,  "za0.q">;
1488def ZAQ1  : AArch64Reg<1,  "za1.q">;
1489def ZAQ2  : AArch64Reg<2,  "za2.q">;
1490def ZAQ3  : AArch64Reg<3,  "za3.q">;
1491def ZAQ4  : AArch64Reg<4,  "za4.q">;
1492def ZAQ5  : AArch64Reg<5,  "za5.q">;
1493def ZAQ6  : AArch64Reg<6,  "za6.q">;
1494def ZAQ7  : AArch64Reg<7,  "za7.q">;
1495def ZAQ8  : AArch64Reg<8,  "za8.q">;
1496def ZAQ9  : AArch64Reg<9,  "za9.q">;
1497def ZAQ10 : AArch64Reg<10, "za10.q">;
1498def ZAQ11 : AArch64Reg<11, "za11.q">;
1499def ZAQ12 : AArch64Reg<12, "za12.q">;
1500def ZAQ13 : AArch64Reg<13, "za13.q">;
1501def ZAQ14 : AArch64Reg<14, "za14.q">;
1502def ZAQ15 : AArch64Reg<15, "za15.q">;
1503
1504let SubRegIndices = [zasubq0, zasubq1] in {
1505  def ZAD0 : AArch64Reg<0, "za0.d", [ZAQ0, ZAQ8]>;
1506  def ZAD1 : AArch64Reg<1, "za1.d", [ZAQ1, ZAQ9]>;
1507  def ZAD2 : AArch64Reg<2, "za2.d", [ZAQ2, ZAQ10]>;
1508  def ZAD3 : AArch64Reg<3, "za3.d", [ZAQ3, ZAQ11]>;
1509  def ZAD4 : AArch64Reg<4, "za4.d", [ZAQ4, ZAQ12]>;
1510  def ZAD5 : AArch64Reg<5, "za5.d", [ZAQ5, ZAQ13]>;
1511  def ZAD6 : AArch64Reg<6, "za6.d", [ZAQ6, ZAQ14]>;
1512  def ZAD7 : AArch64Reg<7, "za7.d", [ZAQ7, ZAQ15]>;
1513}
1514
1515let SubRegIndices = [zasubd0, zasubd1] in {
1516  def ZAS0 : AArch64Reg<0, "za0.s", [ZAD0, ZAD4]>;
1517  def ZAS1 : AArch64Reg<1, "za1.s", [ZAD1, ZAD5]>;
1518  def ZAS2 : AArch64Reg<2, "za2.s", [ZAD2, ZAD6]>;
1519  def ZAS3 : AArch64Reg<3, "za3.s", [ZAD3, ZAD7]>;
1520}
1521
1522let SubRegIndices = [zasubs0, zasubs1] in {
1523  def ZAH0 : AArch64Reg<0, "za0.h", [ZAS0, ZAS2]>;
1524  def ZAH1 : AArch64Reg<1, "za1.h", [ZAS1, ZAS3]>;
1525}
1526
1527let SubRegIndices = [zasubh0, zasubh1] in {
1528  def ZAB0 : AArch64Reg<0, "za0.b", [ZAH0, ZAH1]>;
1529}
1530
1531let SubRegIndices = [zasubb] in {
1532  def ZA : AArch64Reg<0, "za", [ZAB0]>;
1533}
1534
1535def ZT0 : AArch64Reg<0, "zt0">;
1536
1537// SME Register Classes
1538
1539let isAllocatable = 0 in {
1540  // Accumulator array
1541  def MPR : RegisterClass<"AArch64", [untyped], 2048, (add ZA)> {
1542    let Size = 2048;
1543  }
1544
1545  // Accumulator array as single tiles
1546  def MPR8    : RegisterClass<"AArch64", [untyped], 2048, (add (sequence "ZAB%u", 0, 0))> {
1547    let Size = 2048;
1548  }
1549  def MPR16   : RegisterClass<"AArch64", [untyped], 1024, (add (sequence "ZAH%u", 0, 1))> {
1550    let Size = 1024;
1551  }
1552  def MPR32   : RegisterClass<"AArch64", [untyped],  512, (add (sequence "ZAS%u", 0, 3))> {
1553    let Size = 512;
1554  }
1555  def MPR64   : RegisterClass<"AArch64", [untyped],  256, (add (sequence "ZAD%u", 0, 7))> {
1556    let Size = 256;
1557  }
1558  def MPR128  : RegisterClass<"AArch64", [untyped],  128, (add (sequence "ZAQ%u", 0, 15))> {
1559    let Size = 128;
1560  }
1561}
1562
1563def ZTR : RegisterClass<"AArch64", [untyped], 512, (add ZT0)> {
1564  let Size = 512;
1565  let DiagnosticType = "InvalidLookupTable";
1566}
1567// SME Register Operands
1568// There are three types of SME matrix register operands:
1569// * Tiles:
1570//
1571//   These tiles make up the larger accumulator matrix. The tile representation
1572//   has an element type suffix, e.g. za0.b or za15.q and can be any of the
1573//   registers:
1574//          ZAQ0..ZAQ15
1575//          ZAD0..ZAD7
1576//          ZAS0..ZAS3
1577//          ZAH0..ZAH1
1578//       or ZAB0
1579//
1580// * Tile vectors:
1581//
1582//   Their representation is similar to regular tiles, but they have an extra
1583//   'h' or 'v' to tell how the vector at [reg+offset] is layed out in the tile,
1584//   horizontally or vertically.
1585//
1586//   e.g. za1h.h or za15v.q, which corresponds to vectors in registers ZAH1 and
1587//   ZAQ15, respectively. The horizontal/vertical is more a property of the
1588//   instruction, than a property of the asm-operand itself, or its register.
1589//   The distinction is required for the parsing/printing of the operand,
1590//   as from a compiler's perspective, the whole tile is read/written.
1591//
1592// * Accumulator matrix:
1593//
1594//   This is the entire matrix accumulator register ZA (<=> ZAB0), printed as
1595//   'za'.
1596
1597//
1598// Tiles
1599//
1600
1601class MatrixTileAsmOperand<string RC, int EltSize> : AsmOperandClass {
1602  let Name = "MatrixTile" # EltSize;
1603  let DiagnosticType = "Invalid" # Name;
1604  let ParserMethod = "tryParseMatrixRegister";
1605  let RenderMethod = "addMatrixOperands";
1606  let PredicateMethod = "isMatrixRegOperand<"
1607                          # "MatrixKind::Tile" # ", "
1608                          # EltSize # ", AArch64::" # RC # "RegClassID>";
1609}
1610
1611class MatrixTileOperand<int EltSize, int NumBitsForTile, RegisterClass RC>
1612    : RegisterOperand<RC> {
1613  let ParserMatchClass = MatrixTileAsmOperand<!cast<string>(RC), EltSize>;
1614  let DecoderMethod = "DecodeMatrixTile<" # NumBitsForTile # ">";
1615  let PrintMethod = "printMatrixTile";
1616}
1617
1618def TileOp16  : MatrixTileOperand<16, 1, MPR16>;
1619def TileOp32  : MatrixTileOperand<32, 2, MPR32>;
1620def TileOp64  : MatrixTileOperand<64, 3, MPR64>;
1621
1622//
1623// Tile vectors (horizontal and vertical)
1624//
1625
1626class MatrixTileVectorAsmOperand<string RC, int EltSize, int IsVertical>
1627    : AsmOperandClass {
1628  let Name = "MatrixTileVector" # !if(IsVertical, "V", "H") # EltSize;
1629  let DiagnosticType = "Invalid" # Name;
1630  let ParserMethod = "tryParseMatrixRegister";
1631  let RenderMethod = "addMatrixOperands";
1632  let PredicateMethod = "isMatrixRegOperand<"
1633                          # "MatrixKind::"
1634                          # !if(IsVertical, "Col", "Row") # ", "
1635                          # EltSize # ", AArch64::" # RC # "RegClassID>";
1636}
1637
1638class MatrixTileVectorOperand<int EltSize, int NumBitsForTile,
1639                              RegisterClass RC, int IsVertical>
1640    : RegisterOperand<RC> {
1641  let ParserMatchClass = MatrixTileVectorAsmOperand<!cast<string>(RC), EltSize,
1642                                                    IsVertical>;
1643  let DecoderMethod = "DecodeMatrixTile<" # NumBitsForTile # ">";
1644  let PrintMethod = "printMatrixTileVector<" # IsVertical # ">";
1645}
1646
1647def TileVectorOpH8   : MatrixTileVectorOperand<  8, 0, MPR8,   0>;
1648def TileVectorOpH16  : MatrixTileVectorOperand< 16, 1, MPR16,  0>;
1649def TileVectorOpH32  : MatrixTileVectorOperand< 32, 2, MPR32,  0>;
1650def TileVectorOpH64  : MatrixTileVectorOperand< 64, 3, MPR64,  0>;
1651def TileVectorOpH128 : MatrixTileVectorOperand<128, 4, MPR128, 0>;
1652
1653def TileVectorOpV8   : MatrixTileVectorOperand<  8, 0, MPR8,   1>;
1654def TileVectorOpV16  : MatrixTileVectorOperand< 16, 1, MPR16,  1>;
1655def TileVectorOpV32  : MatrixTileVectorOperand< 32, 2, MPR32,  1>;
1656def TileVectorOpV64  : MatrixTileVectorOperand< 64, 3, MPR64,  1>;
1657def TileVectorOpV128 : MatrixTileVectorOperand<128, 4, MPR128, 1>;
1658
1659//
1660// Accumulator matrix
1661//
1662
1663class MatrixAsmOperand<string RC, int EltSize> : AsmOperandClass {
1664  let Name = "Matrix" # !if(EltSize, !cast<string>(EltSize), "");
1665  let DiagnosticType = "Invalid" # Name;
1666  let ParserMethod = "tryParseMatrixRegister";
1667  let RenderMethod = "addMatrixOperands";
1668  let PredicateMethod = "isMatrixRegOperand<"
1669                          # "MatrixKind::Array" # ", "
1670                          # EltSize # ", AArch64::" # RC # "RegClassID>";
1671}
1672
1673class MatrixOperand<RegisterClass RC, int EltSize> : RegisterOperand<RC> {
1674  let ParserMatchClass = MatrixAsmOperand<!cast<string>(RC), EltSize>;
1675  let PrintMethod = "printMatrix<" # EltSize # ">";
1676}
1677
1678def MatrixOp : MatrixOperand<MPR, 0>;
1679// SME2 register operands and classes
1680def MatrixOp8 : MatrixOperand<MPR, 8>;
1681def MatrixOp16 : MatrixOperand<MPR, 16>;
1682def MatrixOp32 : MatrixOperand<MPR, 32>;
1683def MatrixOp64 : MatrixOperand<MPR, 64>;
1684
1685class MatrixTileListAsmOperand : AsmOperandClass {
1686  let Name = "MatrixTileList";
1687  let ParserMethod = "tryParseMatrixTileList";
1688  let RenderMethod = "addMatrixTileListOperands";
1689  let PredicateMethod = "isMatrixTileList";
1690}
1691
1692class MatrixTileListOperand : Operand<i8> {
1693  let ParserMatchClass = MatrixTileListAsmOperand<>;
1694  let DecoderMethod = "DecodeMatrixTileListRegisterClass";
1695  let EncoderMethod = "EncodeMatrixTileListRegisterClass";
1696  let PrintMethod = "printMatrixTileList";
1697}
1698
1699def MatrixTileList : MatrixTileListOperand<>;
1700
1701def MatrixIndexGPR32_8_11 : RegisterClass<"AArch64", [i32], 32, (sequence "W%u", 8, 11)> {
1702   let DiagnosticType = "InvalidMatrixIndexGPR32_8_11";
1703}
1704def MatrixIndexGPR32_12_15 : RegisterClass<"AArch64", [i32], 32, (sequence "W%u", 12, 15)> {
1705  let DiagnosticType = "InvalidMatrixIndexGPR32_12_15";
1706}
1707def MatrixIndexGPR32Op8_11 : RegisterOperand<MatrixIndexGPR32_8_11> {
1708  let EncoderMethod = "encodeMatrixIndexGPR32<AArch64::W8>";
1709}
1710def MatrixIndexGPR32Op12_15 : RegisterOperand<MatrixIndexGPR32_12_15> {
1711  let EncoderMethod = "encodeMatrixIndexGPR32<AArch64::W12>";
1712}
1713
1714def SVCROperand : AsmOperandClass {
1715  let Name = "SVCR";
1716  let ParserMethod = "tryParseSVCR";
1717  let DiagnosticType = "Invalid" # Name;
1718}
1719
1720def svcr_op : Operand<i32>, TImmLeaf<i32, [{
1721    return AArch64SVCR::lookupSVCRByEncoding(Imm) != nullptr;
1722  }]> {
1723  let ParserMatchClass = SVCROperand;
1724  let PrintMethod = "printSVCROp";
1725  let DecoderMethod = "DecodeSVCROp";
1726  let MCOperandPredicate = [{
1727    if (!MCOp.isImm())
1728      return false;
1729    return AArch64SVCR::lookupSVCRByEncoding(MCOp.getImm()) != nullptr;
1730  }];
1731}
1732
1733//===----------------------------------------------------------------------===//
1734// Register categories.
1735//
1736
1737def GeneralPurposeRegisters : RegisterCategory<[GPR64, GPR32]>;
1738
1739def FIXED_REGS : RegisterClass<"AArch64", [i64], 64, (add FP, SP, VG, FFR)>;
1740def FixedRegisters : RegisterCategory<[CCR, FIXED_REGS]>;
1741