1//=- AArch64RegisterInfo.td - Describe the AArch64 Registers -*- tablegen -*-=//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9//
10//===----------------------------------------------------------------------===//
11
12
13class AArch64Reg<bits<16> enc, string n, list<Register> subregs = [],
14               list<string> altNames = []>
15        : Register<n, altNames> {
16  let HWEncoding = enc;
17  let Namespace = "AArch64";
18  let SubRegs = subregs;
19}
20
21let Namespace = "AArch64" in {
22  def sub_32 : SubRegIndex<32>;
23
24  def bsub : SubRegIndex<8>;
25  def hsub : SubRegIndex<16>;
26  def ssub : SubRegIndex<32>;
27  def dsub : SubRegIndex<64>;
28  def sube32 : SubRegIndex<32>;
29  def subo32 : SubRegIndex<32>;
30  def sube64 : SubRegIndex<64>;
31  def subo64 : SubRegIndex<64>;
32  // SVE
33  def zsub    : SubRegIndex<128>;
34  // Note: Code depends on these having consecutive numbers
35  def dsub0 : SubRegIndex<64>;
36  def dsub1 : SubRegIndex<64>;
37  def dsub2 : SubRegIndex<64>;
38  def dsub3 : SubRegIndex<64>;
39  // Note: Code depends on these having consecutive numbers
40  def qsub0 : SubRegIndex<128>;
41  def qsub1 : SubRegIndex<128>;
42  def qsub2 : SubRegIndex<128>;
43  def qsub3 : SubRegIndex<128>;
44  // Note: Code depends on these having consecutive numbers
45  def zasubb  : SubRegIndex<2048>; // (16 x 16)/1 bytes  = 2048 bits
46  def zasubh0 : SubRegIndex<1024>; // (16 x 16)/2 bytes  = 1024 bits
47  def zasubh1 : SubRegIndex<1024>; // (16 x 16)/2 bytes  = 1024 bits
48  def zasubs0 : SubRegIndex<512>;  // (16 x 16)/4 bytes  = 512 bits
49  def zasubs1 : SubRegIndex<512>;  // (16 x 16)/4 bytes  = 512 bits
50  def zasubd0 : SubRegIndex<256>;  // (16 x 16)/8 bytes  = 256 bits
51  def zasubd1 : SubRegIndex<256>;  // (16 x 16)/8 bytes  = 256 bits
52  def zasubq0 : SubRegIndex<128>;  // (16 x 16)/16 bytes = 128 bits
53  def zasubq1 : SubRegIndex<128>;  // (16 x 16)/16 bytes = 128 bits
54
55  def psub : SubRegIndex<16>;
56}
57
58let Namespace = "AArch64" in {
59  def vreg : RegAltNameIndex;
60  def vlist1 : RegAltNameIndex;
61}
62
63//===----------------------------------------------------------------------===//
64// Registers
65//===----------------------------------------------------------------------===//
66def W0    : AArch64Reg<0,   "w0" >, DwarfRegNum<[0]>;
67def W1    : AArch64Reg<1,   "w1" >, DwarfRegNum<[1]>;
68def W2    : AArch64Reg<2,   "w2" >, DwarfRegNum<[2]>;
69def W3    : AArch64Reg<3,   "w3" >, DwarfRegNum<[3]>;
70def W4    : AArch64Reg<4,   "w4" >, DwarfRegNum<[4]>;
71def W5    : AArch64Reg<5,   "w5" >, DwarfRegNum<[5]>;
72def W6    : AArch64Reg<6,   "w6" >, DwarfRegNum<[6]>;
73def W7    : AArch64Reg<7,   "w7" >, DwarfRegNum<[7]>;
74def W8    : AArch64Reg<8,   "w8" >, DwarfRegNum<[8]>;
75def W9    : AArch64Reg<9,   "w9" >, DwarfRegNum<[9]>;
76def W10   : AArch64Reg<10, "w10">, DwarfRegNum<[10]>;
77def W11   : AArch64Reg<11, "w11">, DwarfRegNum<[11]>;
78def W12   : AArch64Reg<12, "w12">, DwarfRegNum<[12]>;
79def W13   : AArch64Reg<13, "w13">, DwarfRegNum<[13]>;
80def W14   : AArch64Reg<14, "w14">, DwarfRegNum<[14]>;
81def W15   : AArch64Reg<15, "w15">, DwarfRegNum<[15]>;
82def W16   : AArch64Reg<16, "w16">, DwarfRegNum<[16]>;
83def W17   : AArch64Reg<17, "w17">, DwarfRegNum<[17]>;
84def W18   : AArch64Reg<18, "w18">, DwarfRegNum<[18]>;
85def W19   : AArch64Reg<19, "w19">, DwarfRegNum<[19]>;
86def W20   : AArch64Reg<20, "w20">, DwarfRegNum<[20]>;
87def W21   : AArch64Reg<21, "w21">, DwarfRegNum<[21]>;
88def W22   : AArch64Reg<22, "w22">, DwarfRegNum<[22]>;
89def W23   : AArch64Reg<23, "w23">, DwarfRegNum<[23]>;
90def W24   : AArch64Reg<24, "w24">, DwarfRegNum<[24]>;
91def W25   : AArch64Reg<25, "w25">, DwarfRegNum<[25]>;
92def W26   : AArch64Reg<26, "w26">, DwarfRegNum<[26]>;
93def W27   : AArch64Reg<27, "w27">, DwarfRegNum<[27]>;
94def W28   : AArch64Reg<28, "w28">, DwarfRegNum<[28]>;
95def W29   : AArch64Reg<29, "w29">, DwarfRegNum<[29]>;
96def W30   : AArch64Reg<30, "w30">, DwarfRegNum<[30]>;
97def WSP   : AArch64Reg<31, "wsp">, DwarfRegNum<[31]>;
98let isConstant = true in
99def WZR   : AArch64Reg<31, "wzr">, DwarfRegAlias<WSP>;
100
101let SubRegIndices = [sub_32] in {
102def X0    : AArch64Reg<0,   "x0",  [W0]>, DwarfRegAlias<W0>;
103def X1    : AArch64Reg<1,   "x1",  [W1]>, DwarfRegAlias<W1>;
104def X2    : AArch64Reg<2,   "x2",  [W2]>, DwarfRegAlias<W2>;
105def X3    : AArch64Reg<3,   "x3",  [W3]>, DwarfRegAlias<W3>;
106def X4    : AArch64Reg<4,   "x4",  [W4]>, DwarfRegAlias<W4>;
107def X5    : AArch64Reg<5,   "x5",  [W5]>, DwarfRegAlias<W5>;
108def X6    : AArch64Reg<6,   "x6",  [W6]>, DwarfRegAlias<W6>;
109def X7    : AArch64Reg<7,   "x7",  [W7]>, DwarfRegAlias<W7>;
110def X8    : AArch64Reg<8,   "x8",  [W8]>, DwarfRegAlias<W8>;
111def X9    : AArch64Reg<9,   "x9",  [W9]>, DwarfRegAlias<W9>;
112def X10   : AArch64Reg<10, "x10", [W10]>, DwarfRegAlias<W10>;
113def X11   : AArch64Reg<11, "x11", [W11]>, DwarfRegAlias<W11>;
114def X12   : AArch64Reg<12, "x12", [W12]>, DwarfRegAlias<W12>;
115def X13   : AArch64Reg<13, "x13", [W13]>, DwarfRegAlias<W13>;
116def X14   : AArch64Reg<14, "x14", [W14]>, DwarfRegAlias<W14>;
117def X15   : AArch64Reg<15, "x15", [W15]>, DwarfRegAlias<W15>;
118def X16   : AArch64Reg<16, "x16", [W16]>, DwarfRegAlias<W16>;
119def X17   : AArch64Reg<17, "x17", [W17]>, DwarfRegAlias<W17>;
120def X18   : AArch64Reg<18, "x18", [W18]>, DwarfRegAlias<W18>;
121def X19   : AArch64Reg<19, "x19", [W19]>, DwarfRegAlias<W19>;
122def X20   : AArch64Reg<20, "x20", [W20]>, DwarfRegAlias<W20>;
123def X21   : AArch64Reg<21, "x21", [W21]>, DwarfRegAlias<W21>;
124def X22   : AArch64Reg<22, "x22", [W22]>, DwarfRegAlias<W22>;
125def X23   : AArch64Reg<23, "x23", [W23]>, DwarfRegAlias<W23>;
126def X24   : AArch64Reg<24, "x24", [W24]>, DwarfRegAlias<W24>;
127def X25   : AArch64Reg<25, "x25", [W25]>, DwarfRegAlias<W25>;
128def X26   : AArch64Reg<26, "x26", [W26]>, DwarfRegAlias<W26>;
129def X27   : AArch64Reg<27, "x27", [W27]>, DwarfRegAlias<W27>;
130def X28   : AArch64Reg<28, "x28", [W28]>, DwarfRegAlias<W28>;
131def FP    : AArch64Reg<29, "x29", [W29]>, DwarfRegAlias<W29>;
132def LR    : AArch64Reg<30, "x30", [W30]>, DwarfRegAlias<W30>;
133def SP    : AArch64Reg<31, "sp",  [WSP]>, DwarfRegAlias<WSP>;
134let isConstant = true in
135def XZR   : AArch64Reg<31, "xzr", [WZR]>, DwarfRegAlias<WSP>;
136}
137
138// Condition code register.
139def NZCV  : AArch64Reg<0, "nzcv">;
140
141// First fault status register
142def FFR : AArch64Reg<0, "ffr">, DwarfRegNum<[47]>;
143
144// Purely virtual Vector Granule (VG) Dwarf register
145def VG : AArch64Reg<0, "vg">, DwarfRegNum<[46]>;
146
147// Floating-point control register
148def FPCR : AArch64Reg<0, "fpcr">;
149
150// GPR register classes with the intersections of GPR32/GPR32sp and
151// GPR64/GPR64sp for use by the coalescer.
152def GPR32common : RegisterClass<"AArch64", [i32], 32, (sequence "W%u", 0, 30)> {
153  let AltOrders = [(rotl GPR32common, 8)];
154  let AltOrderSelect = [{ return 1; }];
155}
156def GPR64common : RegisterClass<"AArch64", [i64], 64,
157                                (add (sequence "X%u", 0, 28), FP, LR)> {
158  let AltOrders = [(rotl GPR64common, 8)];
159  let AltOrderSelect = [{ return 1; }];
160}
161// GPR register classes which exclude SP/WSP.
162def GPR32 : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WZR)> {
163  let AltOrders = [(rotl GPR32, 8)];
164  let AltOrderSelect = [{ return 1; }];
165}
166def GPR64 : RegisterClass<"AArch64", [i64], 64, (add GPR64common, XZR)> {
167  let AltOrders = [(rotl GPR64, 8)];
168  let AltOrderSelect = [{ return 1; }];
169}
170
171// GPR register classes which include SP/WSP.
172def GPR32sp : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WSP)> {
173  let AltOrders = [(rotl GPR32sp, 8)];
174  let AltOrderSelect = [{ return 1; }];
175}
176def GPR64sp : RegisterClass<"AArch64", [i64], 64, (add GPR64common, SP)> {
177  let AltOrders = [(rotl GPR64sp, 8)];
178  let AltOrderSelect = [{ return 1; }];
179}
180
181def GPR32sponly : RegisterClass<"AArch64", [i32], 32, (add WSP)>;
182def GPR64sponly : RegisterClass<"AArch64", [i64], 64, (add SP)>;
183
184def GPR64spPlus0Operand : AsmOperandClass {
185  let Name = "GPR64sp0";
186  let RenderMethod = "addRegOperands";
187  let PredicateMethod = "isGPR64<AArch64::GPR64spRegClassID>";
188  let ParserMethod = "tryParseGPR64sp0Operand";
189}
190
191def GPR64sp0 : RegisterOperand<GPR64sp> {
192  let ParserMatchClass = GPR64spPlus0Operand;
193}
194
195// GPR32/GPR64 but with zero-register substitution enabled.
196// TODO: Roll this out to GPR32/GPR64/GPR32all/GPR64all.
197def GPR32z : RegisterOperand<GPR32> {
198  let GIZeroRegister = WZR;
199}
200def GPR64z : RegisterOperand<GPR64> {
201  let GIZeroRegister = XZR;
202}
203
204// GPR argument registers.
205def GPR32arg : RegisterClass<"AArch64", [i32], 32, (sequence "W%u", 0, 7)>;
206def GPR64arg : RegisterClass<"AArch64", [i64], 64, (sequence "X%u", 0, 7)>;
207
208// GPR register classes which include WZR/XZR AND SP/WSP. This is not a
209// constraint used by any instructions, it is used as a common super-class.
210def GPR32all : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WZR, WSP)>;
211def GPR64all : RegisterClass<"AArch64", [i64], 64, (add GPR64common, XZR, SP)>;
212
213// For tail calls, we can't use callee-saved registers, as they are restored
214// to the saved value before the tail call, which would clobber a call address.
215// This is for indirect tail calls to store the address of the destination.
216def tcGPR64 : RegisterClass<"AArch64", [i64], 64, (sub GPR64common, X19, X20, X21,
217                                                     X22, X23, X24, X25, X26,
218                                                     X27, X28, FP, LR)>;
219
220// Restricted set of tail call registers, for use when branch target
221// enforcement is enabled. These are the only registers which can be used to
222// indirectly branch (not call) to the "BTI c" instruction at the start of a
223// BTI-protected function.
224def rtcGPR64 : RegisterClass<"AArch64", [i64], 64, (add X16, X17)>;
225
226// Register set that excludes registers that are reserved for procedure calls.
227// This is used for pseudo-instructions that are actually implemented using a
228// procedure call.
229def GPR64noip : RegisterClass<"AArch64", [i64], 64, (sub GPR64, X16, X17, LR)>;
230
231// GPR register classes for post increment amount of vector load/store that
232// has alternate printing when Rm=31 and prints a constant immediate value
233// equal to the total number of bytes transferred.
234
235// FIXME: TableGen *should* be able to do these itself now. There appears to be
236// a bug in counting how many operands a Post-indexed MCInst should have which
237// means the aliases don't trigger.
238def GPR64pi1  : RegisterOperand<GPR64, "printPostIncOperand<1>">;
239def GPR64pi2  : RegisterOperand<GPR64, "printPostIncOperand<2>">;
240def GPR64pi3  : RegisterOperand<GPR64, "printPostIncOperand<3>">;
241def GPR64pi4  : RegisterOperand<GPR64, "printPostIncOperand<4>">;
242def GPR64pi6  : RegisterOperand<GPR64, "printPostIncOperand<6>">;
243def GPR64pi8  : RegisterOperand<GPR64, "printPostIncOperand<8>">;
244def GPR64pi12 : RegisterOperand<GPR64, "printPostIncOperand<12>">;
245def GPR64pi16 : RegisterOperand<GPR64, "printPostIncOperand<16>">;
246def GPR64pi24 : RegisterOperand<GPR64, "printPostIncOperand<24>">;
247def GPR64pi32 : RegisterOperand<GPR64, "printPostIncOperand<32>">;
248def GPR64pi48 : RegisterOperand<GPR64, "printPostIncOperand<48>">;
249def GPR64pi64 : RegisterOperand<GPR64, "printPostIncOperand<64>">;
250
251// Condition code regclass.
252def CCR : RegisterClass<"AArch64", [i32], 32, (add NZCV)> {
253  let CopyCost = -1;  // Don't allow copying of status registers.
254
255  // CCR is not allocatable.
256  let isAllocatable = 0;
257}
258
259//===----------------------------------------------------------------------===//
260// Floating Point Scalar Registers
261//===----------------------------------------------------------------------===//
262
263def B0    : AArch64Reg<0,   "b0">, DwarfRegNum<[64]>;
264def B1    : AArch64Reg<1,   "b1">, DwarfRegNum<[65]>;
265def B2    : AArch64Reg<2,   "b2">, DwarfRegNum<[66]>;
266def B3    : AArch64Reg<3,   "b3">, DwarfRegNum<[67]>;
267def B4    : AArch64Reg<4,   "b4">, DwarfRegNum<[68]>;
268def B5    : AArch64Reg<5,   "b5">, DwarfRegNum<[69]>;
269def B6    : AArch64Reg<6,   "b6">, DwarfRegNum<[70]>;
270def B7    : AArch64Reg<7,   "b7">, DwarfRegNum<[71]>;
271def B8    : AArch64Reg<8,   "b8">, DwarfRegNum<[72]>;
272def B9    : AArch64Reg<9,   "b9">, DwarfRegNum<[73]>;
273def B10   : AArch64Reg<10, "b10">, DwarfRegNum<[74]>;
274def B11   : AArch64Reg<11, "b11">, DwarfRegNum<[75]>;
275def B12   : AArch64Reg<12, "b12">, DwarfRegNum<[76]>;
276def B13   : AArch64Reg<13, "b13">, DwarfRegNum<[77]>;
277def B14   : AArch64Reg<14, "b14">, DwarfRegNum<[78]>;
278def B15   : AArch64Reg<15, "b15">, DwarfRegNum<[79]>;
279def B16   : AArch64Reg<16, "b16">, DwarfRegNum<[80]>;
280def B17   : AArch64Reg<17, "b17">, DwarfRegNum<[81]>;
281def B18   : AArch64Reg<18, "b18">, DwarfRegNum<[82]>;
282def B19   : AArch64Reg<19, "b19">, DwarfRegNum<[83]>;
283def B20   : AArch64Reg<20, "b20">, DwarfRegNum<[84]>;
284def B21   : AArch64Reg<21, "b21">, DwarfRegNum<[85]>;
285def B22   : AArch64Reg<22, "b22">, DwarfRegNum<[86]>;
286def B23   : AArch64Reg<23, "b23">, DwarfRegNum<[87]>;
287def B24   : AArch64Reg<24, "b24">, DwarfRegNum<[88]>;
288def B25   : AArch64Reg<25, "b25">, DwarfRegNum<[89]>;
289def B26   : AArch64Reg<26, "b26">, DwarfRegNum<[90]>;
290def B27   : AArch64Reg<27, "b27">, DwarfRegNum<[91]>;
291def B28   : AArch64Reg<28, "b28">, DwarfRegNum<[92]>;
292def B29   : AArch64Reg<29, "b29">, DwarfRegNum<[93]>;
293def B30   : AArch64Reg<30, "b30">, DwarfRegNum<[94]>;
294def B31   : AArch64Reg<31, "b31">, DwarfRegNum<[95]>;
295
296let SubRegIndices = [bsub] in {
297def H0    : AArch64Reg<0,   "h0", [B0]>, DwarfRegAlias<B0>;
298def H1    : AArch64Reg<1,   "h1", [B1]>, DwarfRegAlias<B1>;
299def H2    : AArch64Reg<2,   "h2", [B2]>, DwarfRegAlias<B2>;
300def H3    : AArch64Reg<3,   "h3", [B3]>, DwarfRegAlias<B3>;
301def H4    : AArch64Reg<4,   "h4", [B4]>, DwarfRegAlias<B4>;
302def H5    : AArch64Reg<5,   "h5", [B5]>, DwarfRegAlias<B5>;
303def H6    : AArch64Reg<6,   "h6", [B6]>, DwarfRegAlias<B6>;
304def H7    : AArch64Reg<7,   "h7", [B7]>, DwarfRegAlias<B7>;
305def H8    : AArch64Reg<8,   "h8", [B8]>, DwarfRegAlias<B8>;
306def H9    : AArch64Reg<9,   "h9", [B9]>, DwarfRegAlias<B9>;
307def H10   : AArch64Reg<10, "h10", [B10]>, DwarfRegAlias<B10>;
308def H11   : AArch64Reg<11, "h11", [B11]>, DwarfRegAlias<B11>;
309def H12   : AArch64Reg<12, "h12", [B12]>, DwarfRegAlias<B12>;
310def H13   : AArch64Reg<13, "h13", [B13]>, DwarfRegAlias<B13>;
311def H14   : AArch64Reg<14, "h14", [B14]>, DwarfRegAlias<B14>;
312def H15   : AArch64Reg<15, "h15", [B15]>, DwarfRegAlias<B15>;
313def H16   : AArch64Reg<16, "h16", [B16]>, DwarfRegAlias<B16>;
314def H17   : AArch64Reg<17, "h17", [B17]>, DwarfRegAlias<B17>;
315def H18   : AArch64Reg<18, "h18", [B18]>, DwarfRegAlias<B18>;
316def H19   : AArch64Reg<19, "h19", [B19]>, DwarfRegAlias<B19>;
317def H20   : AArch64Reg<20, "h20", [B20]>, DwarfRegAlias<B20>;
318def H21   : AArch64Reg<21, "h21", [B21]>, DwarfRegAlias<B21>;
319def H22   : AArch64Reg<22, "h22", [B22]>, DwarfRegAlias<B22>;
320def H23   : AArch64Reg<23, "h23", [B23]>, DwarfRegAlias<B23>;
321def H24   : AArch64Reg<24, "h24", [B24]>, DwarfRegAlias<B24>;
322def H25   : AArch64Reg<25, "h25", [B25]>, DwarfRegAlias<B25>;
323def H26   : AArch64Reg<26, "h26", [B26]>, DwarfRegAlias<B26>;
324def H27   : AArch64Reg<27, "h27", [B27]>, DwarfRegAlias<B27>;
325def H28   : AArch64Reg<28, "h28", [B28]>, DwarfRegAlias<B28>;
326def H29   : AArch64Reg<29, "h29", [B29]>, DwarfRegAlias<B29>;
327def H30   : AArch64Reg<30, "h30", [B30]>, DwarfRegAlias<B30>;
328def H31   : AArch64Reg<31, "h31", [B31]>, DwarfRegAlias<B31>;
329}
330
331let SubRegIndices = [hsub] in {
332def S0    : AArch64Reg<0,   "s0", [H0]>, DwarfRegAlias<B0>;
333def S1    : AArch64Reg<1,   "s1", [H1]>, DwarfRegAlias<B1>;
334def S2    : AArch64Reg<2,   "s2", [H2]>, DwarfRegAlias<B2>;
335def S3    : AArch64Reg<3,   "s3", [H3]>, DwarfRegAlias<B3>;
336def S4    : AArch64Reg<4,   "s4", [H4]>, DwarfRegAlias<B4>;
337def S5    : AArch64Reg<5,   "s5", [H5]>, DwarfRegAlias<B5>;
338def S6    : AArch64Reg<6,   "s6", [H6]>, DwarfRegAlias<B6>;
339def S7    : AArch64Reg<7,   "s7", [H7]>, DwarfRegAlias<B7>;
340def S8    : AArch64Reg<8,   "s8", [H8]>, DwarfRegAlias<B8>;
341def S9    : AArch64Reg<9,   "s9", [H9]>, DwarfRegAlias<B9>;
342def S10   : AArch64Reg<10, "s10", [H10]>, DwarfRegAlias<B10>;
343def S11   : AArch64Reg<11, "s11", [H11]>, DwarfRegAlias<B11>;
344def S12   : AArch64Reg<12, "s12", [H12]>, DwarfRegAlias<B12>;
345def S13   : AArch64Reg<13, "s13", [H13]>, DwarfRegAlias<B13>;
346def S14   : AArch64Reg<14, "s14", [H14]>, DwarfRegAlias<B14>;
347def S15   : AArch64Reg<15, "s15", [H15]>, DwarfRegAlias<B15>;
348def S16   : AArch64Reg<16, "s16", [H16]>, DwarfRegAlias<B16>;
349def S17   : AArch64Reg<17, "s17", [H17]>, DwarfRegAlias<B17>;
350def S18   : AArch64Reg<18, "s18", [H18]>, DwarfRegAlias<B18>;
351def S19   : AArch64Reg<19, "s19", [H19]>, DwarfRegAlias<B19>;
352def S20   : AArch64Reg<20, "s20", [H20]>, DwarfRegAlias<B20>;
353def S21   : AArch64Reg<21, "s21", [H21]>, DwarfRegAlias<B21>;
354def S22   : AArch64Reg<22, "s22", [H22]>, DwarfRegAlias<B22>;
355def S23   : AArch64Reg<23, "s23", [H23]>, DwarfRegAlias<B23>;
356def S24   : AArch64Reg<24, "s24", [H24]>, DwarfRegAlias<B24>;
357def S25   : AArch64Reg<25, "s25", [H25]>, DwarfRegAlias<B25>;
358def S26   : AArch64Reg<26, "s26", [H26]>, DwarfRegAlias<B26>;
359def S27   : AArch64Reg<27, "s27", [H27]>, DwarfRegAlias<B27>;
360def S28   : AArch64Reg<28, "s28", [H28]>, DwarfRegAlias<B28>;
361def S29   : AArch64Reg<29, "s29", [H29]>, DwarfRegAlias<B29>;
362def S30   : AArch64Reg<30, "s30", [H30]>, DwarfRegAlias<B30>;
363def S31   : AArch64Reg<31, "s31", [H31]>, DwarfRegAlias<B31>;
364}
365
366let SubRegIndices = [ssub], RegAltNameIndices = [vreg, vlist1] in {
367def D0    : AArch64Reg<0,   "d0", [S0], ["v0", ""]>, DwarfRegAlias<B0>;
368def D1    : AArch64Reg<1,   "d1", [S1], ["v1", ""]>, DwarfRegAlias<B1>;
369def D2    : AArch64Reg<2,   "d2", [S2], ["v2", ""]>, DwarfRegAlias<B2>;
370def D3    : AArch64Reg<3,   "d3", [S3], ["v3", ""]>, DwarfRegAlias<B3>;
371def D4    : AArch64Reg<4,   "d4", [S4], ["v4", ""]>, DwarfRegAlias<B4>;
372def D5    : AArch64Reg<5,   "d5", [S5], ["v5", ""]>, DwarfRegAlias<B5>;
373def D6    : AArch64Reg<6,   "d6", [S6], ["v6", ""]>, DwarfRegAlias<B6>;
374def D7    : AArch64Reg<7,   "d7", [S7], ["v7", ""]>, DwarfRegAlias<B7>;
375def D8    : AArch64Reg<8,   "d8", [S8], ["v8", ""]>, DwarfRegAlias<B8>;
376def D9    : AArch64Reg<9,   "d9", [S9], ["v9", ""]>, DwarfRegAlias<B9>;
377def D10   : AArch64Reg<10, "d10", [S10], ["v10", ""]>, DwarfRegAlias<B10>;
378def D11   : AArch64Reg<11, "d11", [S11], ["v11", ""]>, DwarfRegAlias<B11>;
379def D12   : AArch64Reg<12, "d12", [S12], ["v12", ""]>, DwarfRegAlias<B12>;
380def D13   : AArch64Reg<13, "d13", [S13], ["v13", ""]>, DwarfRegAlias<B13>;
381def D14   : AArch64Reg<14, "d14", [S14], ["v14", ""]>, DwarfRegAlias<B14>;
382def D15   : AArch64Reg<15, "d15", [S15], ["v15", ""]>, DwarfRegAlias<B15>;
383def D16   : AArch64Reg<16, "d16", [S16], ["v16", ""]>, DwarfRegAlias<B16>;
384def D17   : AArch64Reg<17, "d17", [S17], ["v17", ""]>, DwarfRegAlias<B17>;
385def D18   : AArch64Reg<18, "d18", [S18], ["v18", ""]>, DwarfRegAlias<B18>;
386def D19   : AArch64Reg<19, "d19", [S19], ["v19", ""]>, DwarfRegAlias<B19>;
387def D20   : AArch64Reg<20, "d20", [S20], ["v20", ""]>, DwarfRegAlias<B20>;
388def D21   : AArch64Reg<21, "d21", [S21], ["v21", ""]>, DwarfRegAlias<B21>;
389def D22   : AArch64Reg<22, "d22", [S22], ["v22", ""]>, DwarfRegAlias<B22>;
390def D23   : AArch64Reg<23, "d23", [S23], ["v23", ""]>, DwarfRegAlias<B23>;
391def D24   : AArch64Reg<24, "d24", [S24], ["v24", ""]>, DwarfRegAlias<B24>;
392def D25   : AArch64Reg<25, "d25", [S25], ["v25", ""]>, DwarfRegAlias<B25>;
393def D26   : AArch64Reg<26, "d26", [S26], ["v26", ""]>, DwarfRegAlias<B26>;
394def D27   : AArch64Reg<27, "d27", [S27], ["v27", ""]>, DwarfRegAlias<B27>;
395def D28   : AArch64Reg<28, "d28", [S28], ["v28", ""]>, DwarfRegAlias<B28>;
396def D29   : AArch64Reg<29, "d29", [S29], ["v29", ""]>, DwarfRegAlias<B29>;
397def D30   : AArch64Reg<30, "d30", [S30], ["v30", ""]>, DwarfRegAlias<B30>;
398def D31   : AArch64Reg<31, "d31", [S31], ["v31", ""]>, DwarfRegAlias<B31>;
399}
400
401let SubRegIndices = [dsub], RegAltNameIndices = [vreg, vlist1] in {
402def Q0    : AArch64Reg<0,   "q0", [D0], ["v0", ""]>, DwarfRegAlias<B0>;
403def Q1    : AArch64Reg<1,   "q1", [D1], ["v1", ""]>, DwarfRegAlias<B1>;
404def Q2    : AArch64Reg<2,   "q2", [D2], ["v2", ""]>, DwarfRegAlias<B2>;
405def Q3    : AArch64Reg<3,   "q3", [D3], ["v3", ""]>, DwarfRegAlias<B3>;
406def Q4    : AArch64Reg<4,   "q4", [D4], ["v4", ""]>, DwarfRegAlias<B4>;
407def Q5    : AArch64Reg<5,   "q5", [D5], ["v5", ""]>, DwarfRegAlias<B5>;
408def Q6    : AArch64Reg<6,   "q6", [D6], ["v6", ""]>, DwarfRegAlias<B6>;
409def Q7    : AArch64Reg<7,   "q7", [D7], ["v7", ""]>, DwarfRegAlias<B7>;
410def Q8    : AArch64Reg<8,   "q8", [D8], ["v8", ""]>, DwarfRegAlias<B8>;
411def Q9    : AArch64Reg<9,   "q9", [D9], ["v9", ""]>, DwarfRegAlias<B9>;
412def Q10   : AArch64Reg<10, "q10", [D10], ["v10", ""]>, DwarfRegAlias<B10>;
413def Q11   : AArch64Reg<11, "q11", [D11], ["v11", ""]>, DwarfRegAlias<B11>;
414def Q12   : AArch64Reg<12, "q12", [D12], ["v12", ""]>, DwarfRegAlias<B12>;
415def Q13   : AArch64Reg<13, "q13", [D13], ["v13", ""]>, DwarfRegAlias<B13>;
416def Q14   : AArch64Reg<14, "q14", [D14], ["v14", ""]>, DwarfRegAlias<B14>;
417def Q15   : AArch64Reg<15, "q15", [D15], ["v15", ""]>, DwarfRegAlias<B15>;
418def Q16   : AArch64Reg<16, "q16", [D16], ["v16", ""]>, DwarfRegAlias<B16>;
419def Q17   : AArch64Reg<17, "q17", [D17], ["v17", ""]>, DwarfRegAlias<B17>;
420def Q18   : AArch64Reg<18, "q18", [D18], ["v18", ""]>, DwarfRegAlias<B18>;
421def Q19   : AArch64Reg<19, "q19", [D19], ["v19", ""]>, DwarfRegAlias<B19>;
422def Q20   : AArch64Reg<20, "q20", [D20], ["v20", ""]>, DwarfRegAlias<B20>;
423def Q21   : AArch64Reg<21, "q21", [D21], ["v21", ""]>, DwarfRegAlias<B21>;
424def Q22   : AArch64Reg<22, "q22", [D22], ["v22", ""]>, DwarfRegAlias<B22>;
425def Q23   : AArch64Reg<23, "q23", [D23], ["v23", ""]>, DwarfRegAlias<B23>;
426def Q24   : AArch64Reg<24, "q24", [D24], ["v24", ""]>, DwarfRegAlias<B24>;
427def Q25   : AArch64Reg<25, "q25", [D25], ["v25", ""]>, DwarfRegAlias<B25>;
428def Q26   : AArch64Reg<26, "q26", [D26], ["v26", ""]>, DwarfRegAlias<B26>;
429def Q27   : AArch64Reg<27, "q27", [D27], ["v27", ""]>, DwarfRegAlias<B27>;
430def Q28   : AArch64Reg<28, "q28", [D28], ["v28", ""]>, DwarfRegAlias<B28>;
431def Q29   : AArch64Reg<29, "q29", [D29], ["v29", ""]>, DwarfRegAlias<B29>;
432def Q30   : AArch64Reg<30, "q30", [D30], ["v30", ""]>, DwarfRegAlias<B30>;
433def Q31   : AArch64Reg<31, "q31", [D31], ["v31", ""]>, DwarfRegAlias<B31>;
434}
435
436def FPR8  : RegisterClass<"AArch64", [i8], 8, (sequence "B%u", 0, 31)> {
437  let Size = 8;
438}
439def FPR16 : RegisterClass<"AArch64", [f16, bf16, i16], 16, (sequence "H%u", 0, 31)> {
440  let Size = 16;
441}
442
443def FPR16_lo : RegisterClass<"AArch64", [f16], 16, (trunc FPR16, 16)> {
444  let Size = 16;
445}
446def FPR32 : RegisterClass<"AArch64", [f32, i32], 32,(sequence "S%u", 0, 31)>;
447def FPR64 : RegisterClass<"AArch64", [f64, i64, v2f32, v1f64, v8i8, v4i16, v2i32,
448                                      v1i64, v4f16, v4bf16],
449                                     64, (sequence "D%u", 0, 31)>;
450def FPR64_lo : RegisterClass<"AArch64",
451                             [v8i8, v4i16, v2i32, v1i64, v4f16, v4bf16, v2f32,
452                              v1f64],
453                             64, (trunc FPR64, 16)>;
454
455// We don't (yet) have an f128 legal type, so don't use that here. We
456// normalize 128-bit vectors to v2f64 for arg passing and such, so use
457// that here.
458def FPR128 : RegisterClass<"AArch64",
459                           [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64, f128,
460                            v8f16, v8bf16],
461                           128, (sequence "Q%u", 0, 31)>;
462
463// The lower 16 vector registers.  Some instructions can only take registers
464// in this range.
465def FPR128_lo : RegisterClass<"AArch64",
466                              [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64, v8f16,
467                               v8bf16],
468                              128, (trunc FPR128, 16)>;
469
470// The lower 8 vector registers.  Some instructions can only take registers
471// in this range.
472def FPR128_0to7 : RegisterClass<"AArch64",
473                                [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64, v8f16,
474                                 v8bf16],
475                                128, (trunc FPR128, 8)>;
476
477// Pairs, triples, and quads of 64-bit vector registers.
478def DSeqPairs : RegisterTuples<[dsub0, dsub1], [(rotl FPR64, 0), (rotl FPR64, 1)]>;
479def DSeqTriples : RegisterTuples<[dsub0, dsub1, dsub2],
480                                 [(rotl FPR64, 0), (rotl FPR64, 1),
481                                  (rotl FPR64, 2)]>;
482def DSeqQuads : RegisterTuples<[dsub0, dsub1, dsub2, dsub3],
483                               [(rotl FPR64, 0), (rotl FPR64, 1),
484                                (rotl FPR64, 2), (rotl FPR64, 3)]>;
485def DD   : RegisterClass<"AArch64", [untyped], 64, (add DSeqPairs)> {
486  let Size = 128;
487}
488def DDD  : RegisterClass<"AArch64", [untyped], 64, (add DSeqTriples)> {
489  let Size = 192;
490}
491def DDDD : RegisterClass<"AArch64", [untyped], 64, (add DSeqQuads)> {
492  let Size = 256;
493}
494
495// Pairs, triples, and quads of 128-bit vector registers.
496def QSeqPairs : RegisterTuples<[qsub0, qsub1], [(rotl FPR128, 0), (rotl FPR128, 1)]>;
497def QSeqTriples : RegisterTuples<[qsub0, qsub1, qsub2],
498                                 [(rotl FPR128, 0), (rotl FPR128, 1),
499                                  (rotl FPR128, 2)]>;
500def QSeqQuads : RegisterTuples<[qsub0, qsub1, qsub2, qsub3],
501                               [(rotl FPR128, 0), (rotl FPR128, 1),
502                                (rotl FPR128, 2), (rotl FPR128, 3)]>;
503def QQ   : RegisterClass<"AArch64", [untyped], 128, (add QSeqPairs)> {
504  let Size = 256;
505}
506def QQQ  : RegisterClass<"AArch64", [untyped], 128, (add QSeqTriples)> {
507  let Size = 384;
508}
509def QQQQ : RegisterClass<"AArch64", [untyped], 128, (add QSeqQuads)> {
510  let Size = 512;
511}
512
513
514// Vector operand versions of the FP registers. Alternate name printing and
515// assembler matching.
516def VectorReg64AsmOperand : AsmOperandClass {
517  let Name = "VectorReg64";
518  let PredicateMethod = "isNeonVectorReg";
519}
520def VectorReg128AsmOperand : AsmOperandClass {
521  let Name = "VectorReg128";
522  let PredicateMethod = "isNeonVectorReg";
523}
524
525def V64  : RegisterOperand<FPR64, "printVRegOperand"> {
526  let ParserMatchClass = VectorReg64AsmOperand;
527}
528
529def V128 : RegisterOperand<FPR128, "printVRegOperand"> {
530  let ParserMatchClass = VectorReg128AsmOperand;
531}
532
533def VectorRegLoAsmOperand : AsmOperandClass {
534  let Name = "VectorRegLo";
535  let PredicateMethod = "isNeonVectorRegLo";
536}
537def V64_lo : RegisterOperand<FPR64_lo, "printVRegOperand"> {
538  let ParserMatchClass = VectorRegLoAsmOperand;
539}
540def V128_lo : RegisterOperand<FPR128_lo, "printVRegOperand"> {
541  let ParserMatchClass = VectorRegLoAsmOperand;
542}
543
544def VectorReg0to7AsmOperand : AsmOperandClass {
545  let Name = "VectorReg0to7";
546  let PredicateMethod = "isNeonVectorReg0to7";
547}
548
549def V128_0to7 : RegisterOperand<FPR128_0to7, "printVRegOperand"> {
550  let ParserMatchClass = VectorReg0to7AsmOperand;
551}
552
553class TypedVecListAsmOperand<int count, string vecty, int lanes, int eltsize>
554    : AsmOperandClass {
555  let Name = "TypedVectorList" # count # "_" # lanes # eltsize;
556
557  let PredicateMethod
558      = "isTypedVectorList<RegKind::NeonVector, " # count # ", " # lanes # ", " # eltsize # ">";
559  let RenderMethod = "addVectorListOperands<" # vecty  # ", "  # count # ">";
560}
561
562class TypedVecListRegOperand<RegisterClass Reg, int lanes, string eltsize>
563    : RegisterOperand<Reg, "printTypedVectorList<" # lanes # ", '"
564                                                   # eltsize # "'>">;
565
566multiclass VectorList<int count, RegisterClass Reg64, RegisterClass Reg128> {
567  // With implicit types (probably on instruction instead). E.g. { v0, v1 }
568  def _64AsmOperand : AsmOperandClass {
569    let Name = NAME # "64";
570    let PredicateMethod = "isImplicitlyTypedVectorList<RegKind::NeonVector, " # count # ">";
571    let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_DReg, " # count # ">";
572  }
573
574  def "64" : RegisterOperand<Reg64, "printImplicitlyTypedVectorList"> {
575    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_64AsmOperand");
576  }
577
578  def _128AsmOperand : AsmOperandClass {
579    let Name = NAME # "128";
580    let PredicateMethod = "isImplicitlyTypedVectorList<RegKind::NeonVector, " # count # ">";
581    let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_QReg, " # count # ">";
582  }
583
584  def "128" : RegisterOperand<Reg128, "printImplicitlyTypedVectorList"> {
585    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_128AsmOperand");
586  }
587
588  // 64-bit register lists with explicit type.
589
590  // { v0.8b, v1.8b }
591  def _8bAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 8, 8>;
592  def "8b" : TypedVecListRegOperand<Reg64, 8, "b"> {
593    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_8bAsmOperand");
594  }
595
596  // { v0.4h, v1.4h }
597  def _4hAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 4, 16>;
598  def "4h" : TypedVecListRegOperand<Reg64, 4, "h"> {
599    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_4hAsmOperand");
600  }
601
602  // { v0.2s, v1.2s }
603  def _2sAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 2, 32>;
604  def "2s" : TypedVecListRegOperand<Reg64, 2, "s"> {
605    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_2sAsmOperand");
606  }
607
608  // { v0.1d, v1.1d }
609  def _1dAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 1, 64>;
610  def "1d" : TypedVecListRegOperand<Reg64, 1, "d"> {
611    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_1dAsmOperand");
612  }
613
614  // 128-bit register lists with explicit type
615
616  // { v0.16b, v1.16b }
617  def _16bAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 16, 8>;
618  def "16b" : TypedVecListRegOperand<Reg128, 16, "b"> {
619    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_16bAsmOperand");
620  }
621
622  // { v0.8h, v1.8h }
623  def _8hAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 8, 16>;
624  def "8h" : TypedVecListRegOperand<Reg128, 8, "h"> {
625    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_8hAsmOperand");
626  }
627
628  // { v0.4s, v1.4s }
629  def _4sAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 4, 32>;
630  def "4s" : TypedVecListRegOperand<Reg128, 4, "s"> {
631    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_4sAsmOperand");
632  }
633
634  // { v0.2d, v1.2d }
635  def _2dAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 2, 64>;
636  def "2d" : TypedVecListRegOperand<Reg128, 2, "d"> {
637    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_2dAsmOperand");
638  }
639
640  // { v0.b, v1.b }
641  def _bAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 8>;
642  def "b" : TypedVecListRegOperand<Reg128, 0, "b"> {
643    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_bAsmOperand");
644  }
645
646  // { v0.h, v1.h }
647  def _hAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 16>;
648  def "h" : TypedVecListRegOperand<Reg128, 0, "h"> {
649    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_hAsmOperand");
650  }
651
652  // { v0.s, v1.s }
653  def _sAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 32>;
654  def "s" : TypedVecListRegOperand<Reg128, 0, "s"> {
655    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_sAsmOperand");
656  }
657
658  // { v0.d, v1.d }
659  def _dAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 64>;
660  def "d" : TypedVecListRegOperand<Reg128, 0, "d"> {
661    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_dAsmOperand");
662  }
663
664
665}
666
667defm VecListOne   : VectorList<1, FPR64, FPR128>;
668defm VecListTwo   : VectorList<2, DD,    QQ>;
669defm VecListThree : VectorList<3, DDD,   QQQ>;
670defm VecListFour  : VectorList<4, DDDD,  QQQQ>;
671
672class FPRAsmOperand<string RC> : AsmOperandClass {
673  let Name = "FPRAsmOperand" # RC;
674  let PredicateMethod = "isGPR64<AArch64::" # RC # "RegClassID>";
675  let RenderMethod = "addRegOperands";
676}
677
678// Register operand versions of the scalar FP registers.
679def FPR8Op  : RegisterOperand<FPR8, "printOperand"> {
680  let ParserMatchClass = FPRAsmOperand<"FPR8">;
681}
682
683def FPR16Op  : RegisterOperand<FPR16, "printOperand"> {
684  let ParserMatchClass = FPRAsmOperand<"FPR16">;
685}
686
687def FPR16Op_lo  : RegisterOperand<FPR16_lo, "printOperand"> {
688  let ParserMatchClass = FPRAsmOperand<"FPR16_lo">;
689}
690
691def FPR32Op  : RegisterOperand<FPR32, "printOperand"> {
692  let ParserMatchClass = FPRAsmOperand<"FPR32">;
693}
694
695def FPR64Op  : RegisterOperand<FPR64, "printOperand"> {
696  let ParserMatchClass = FPRAsmOperand<"FPR64">;
697}
698
699def FPR128Op : RegisterOperand<FPR128, "printOperand"> {
700  let ParserMatchClass = FPRAsmOperand<"FPR128">;
701}
702
703//===----------------------------------------------------------------------===//
704// ARMv8.1a atomic CASP register operands
705
706
707def WSeqPairs : RegisterTuples<[sube32, subo32],
708                               [(decimate (rotl GPR32, 0), 2),
709                                (decimate (rotl GPR32, 1), 2)]>;
710def XSeqPairs : RegisterTuples<[sube64, subo64],
711                               [(decimate (rotl GPR64, 0), 2),
712                                (decimate (rotl GPR64, 1), 2)]>;
713
714def WSeqPairsClass   : RegisterClass<"AArch64", [untyped], 32,
715                                     (add WSeqPairs)>{
716  let Size = 64;
717}
718def XSeqPairsClass   : RegisterClass<"AArch64", [untyped], 64,
719                                     (add XSeqPairs)>{
720  let Size = 128;
721}
722
723
724let RenderMethod = "addRegOperands", ParserMethod="tryParseGPRSeqPair" in {
725  def WSeqPairsAsmOperandClass : AsmOperandClass { let Name = "WSeqPair"; }
726  def XSeqPairsAsmOperandClass : AsmOperandClass { let Name = "XSeqPair"; }
727}
728
729def WSeqPairClassOperand :
730    RegisterOperand<WSeqPairsClass, "printGPRSeqPairsClassOperand<32>"> {
731  let ParserMatchClass = WSeqPairsAsmOperandClass;
732}
733def XSeqPairClassOperand :
734    RegisterOperand<XSeqPairsClass, "printGPRSeqPairsClassOperand<64>"> {
735  let ParserMatchClass = XSeqPairsAsmOperandClass;
736}
737// Reuse the parsing and register numbers from XSeqPairs, but encoding is different.
738def MrrsMssrPairClassOperand :
739    RegisterOperand<XSeqPairsClass, "printGPRSeqPairsClassOperand<64>"> {
740  let ParserMatchClass = XSeqPairsAsmOperandClass;
741}
742def SyspXzrPairOperandMatcherClass : AsmOperandClass {
743  let Name = "SyspXzrPair";
744  let RenderMethod = "addSyspXzrPairOperand";
745  let ParserMethod = "tryParseSyspXzrPair";
746}
747def SyspXzrPairOperand :
748    RegisterOperand<GPR64, "printSyspXzrPair"> { // needed to allow alias with XZR operand
749  let ParserMatchClass = SyspXzrPairOperandMatcherClass;
750}
751
752
753
754//===----- END: v8.1a atomic CASP register operands -----------------------===//
755
756//===----------------------------------------------------------------------===//
757// Armv8.7a accelerator extension register operands: 8 consecutive GPRs
758// starting with an even one
759
760let Namespace = "AArch64" in {
761  foreach i = 0-7 in
762    def "x8sub_"#i : SubRegIndex<64, !mul(64, i)>;
763}
764
765def Tuples8X : RegisterTuples<
766  !foreach(i, [0,1,2,3,4,5,6,7], !cast<SubRegIndex>("x8sub_"#i)),
767  !foreach(i, [0,1,2,3,4,5,6,7], (trunc (decimate (rotl GPR64, i), 2), 12))>;
768
769def GPR64x8Class : RegisterClass<"AArch64", [i64x8], 512, (trunc Tuples8X, 12)> {
770  let Size = 512;
771}
772def GPR64x8AsmOp : AsmOperandClass {
773  let Name = "GPR64x8";
774  let ParserMethod = "tryParseGPR64x8";
775  let RenderMethod = "addRegOperands";
776}
777def GPR64x8 : RegisterOperand<GPR64x8Class, "printGPR64x8"> {
778  let ParserMatchClass = GPR64x8AsmOp;
779  let PrintMethod = "printGPR64x8";
780}
781
782//===----- END: v8.7a accelerator extension register operands -------------===//
783
784// SVE predicate-as-counter registers
785  def PN0    : AArch64Reg<0,   "pn0">, DwarfRegNum<[48]>;
786  def PN1    : AArch64Reg<1,   "pn1">, DwarfRegNum<[49]>;
787  def PN2    : AArch64Reg<2,   "pn2">, DwarfRegNum<[50]>;
788  def PN3    : AArch64Reg<3,   "pn3">, DwarfRegNum<[51]>;
789  def PN4    : AArch64Reg<4,   "pn4">, DwarfRegNum<[52]>;
790  def PN5    : AArch64Reg<5,   "pn5">, DwarfRegNum<[53]>;
791  def PN6    : AArch64Reg<6,   "pn6">, DwarfRegNum<[54]>;
792  def PN7    : AArch64Reg<7,   "pn7">, DwarfRegNum<[55]>;
793  def PN8    : AArch64Reg<8,   "pn8">, DwarfRegNum<[56]>;
794  def PN9    : AArch64Reg<9,   "pn9">, DwarfRegNum<[57]>;
795  def PN10   : AArch64Reg<10, "pn10">, DwarfRegNum<[58]>;
796  def PN11   : AArch64Reg<11, "pn11">, DwarfRegNum<[59]>;
797  def PN12   : AArch64Reg<12, "pn12">, DwarfRegNum<[60]>;
798  def PN13   : AArch64Reg<13, "pn13">, DwarfRegNum<[61]>;
799  def PN14   : AArch64Reg<14, "pn14">, DwarfRegNum<[62]>;
800  def PN15   : AArch64Reg<15, "pn15">, DwarfRegNum<[63]>;
801
802// SVE predicate registers
803let SubRegIndices = [psub] in {
804  def P0    : AArch64Reg<0,   "p0", [PN0]>, DwarfRegAlias<PN0>;
805  def P1    : AArch64Reg<1,   "p1", [PN1]>, DwarfRegAlias<PN1>;
806  def P2    : AArch64Reg<2,   "p2", [PN2]>, DwarfRegAlias<PN2>;
807  def P3    : AArch64Reg<3,   "p3", [PN3]>, DwarfRegAlias<PN3>;
808  def P4    : AArch64Reg<4,   "p4", [PN4]>, DwarfRegAlias<PN4>;
809  def P5    : AArch64Reg<5,   "p5", [PN5]>, DwarfRegAlias<PN5>;
810  def P6    : AArch64Reg<6,   "p6", [PN6]>, DwarfRegAlias<PN6>;
811  def P7    : AArch64Reg<7,   "p7", [PN7]>, DwarfRegAlias<PN7>;
812  def P8    : AArch64Reg<8,   "p8", [PN8]>, DwarfRegAlias<PN8>;
813  def P9    : AArch64Reg<9,   "p9", [PN9]>, DwarfRegAlias<PN9>;
814  def P10   : AArch64Reg<10, "p10", [PN10]>, DwarfRegAlias<PN10>;
815  def P11   : AArch64Reg<11, "p11", [PN11]>, DwarfRegAlias<PN11>;
816  def P12   : AArch64Reg<12, "p12", [PN12]>, DwarfRegAlias<PN12>;
817  def P13   : AArch64Reg<13, "p13", [PN13]>, DwarfRegAlias<PN13>;
818  def P14   : AArch64Reg<14, "p14", [PN14]>, DwarfRegAlias<PN14>;
819  def P15   : AArch64Reg<15, "p15", [PN15]>, DwarfRegAlias<PN15>;
820}
821
822// SVE variable-size vector registers
823let SubRegIndices = [zsub] in {
824def Z0    : AArch64Reg<0,   "z0",  [Q0]>, DwarfRegNum<[96]>;
825def Z1    : AArch64Reg<1,   "z1",  [Q1]>, DwarfRegNum<[97]>;
826def Z2    : AArch64Reg<2,   "z2",  [Q2]>, DwarfRegNum<[98]>;
827def Z3    : AArch64Reg<3,   "z3",  [Q3]>, DwarfRegNum<[99]>;
828def Z4    : AArch64Reg<4,   "z4",  [Q4]>, DwarfRegNum<[100]>;
829def Z5    : AArch64Reg<5,   "z5",  [Q5]>, DwarfRegNum<[101]>;
830def Z6    : AArch64Reg<6,   "z6",  [Q6]>, DwarfRegNum<[102]>;
831def Z7    : AArch64Reg<7,   "z7",  [Q7]>, DwarfRegNum<[103]>;
832def Z8    : AArch64Reg<8,   "z8",  [Q8]>, DwarfRegNum<[104]>;
833def Z9    : AArch64Reg<9,   "z9",  [Q9]>, DwarfRegNum<[105]>;
834def Z10   : AArch64Reg<10, "z10", [Q10]>, DwarfRegNum<[106]>;
835def Z11   : AArch64Reg<11, "z11", [Q11]>, DwarfRegNum<[107]>;
836def Z12   : AArch64Reg<12, "z12", [Q12]>, DwarfRegNum<[108]>;
837def Z13   : AArch64Reg<13, "z13", [Q13]>, DwarfRegNum<[109]>;
838def Z14   : AArch64Reg<14, "z14", [Q14]>, DwarfRegNum<[110]>;
839def Z15   : AArch64Reg<15, "z15", [Q15]>, DwarfRegNum<[111]>;
840def Z16   : AArch64Reg<16, "z16", [Q16]>, DwarfRegNum<[112]>;
841def Z17   : AArch64Reg<17, "z17", [Q17]>, DwarfRegNum<[113]>;
842def Z18   : AArch64Reg<18, "z18", [Q18]>, DwarfRegNum<[114]>;
843def Z19   : AArch64Reg<19, "z19", [Q19]>, DwarfRegNum<[115]>;
844def Z20   : AArch64Reg<20, "z20", [Q20]>, DwarfRegNum<[116]>;
845def Z21   : AArch64Reg<21, "z21", [Q21]>, DwarfRegNum<[117]>;
846def Z22   : AArch64Reg<22, "z22", [Q22]>, DwarfRegNum<[118]>;
847def Z23   : AArch64Reg<23, "z23", [Q23]>, DwarfRegNum<[119]>;
848def Z24   : AArch64Reg<24, "z24", [Q24]>, DwarfRegNum<[120]>;
849def Z25   : AArch64Reg<25, "z25", [Q25]>, DwarfRegNum<[121]>;
850def Z26   : AArch64Reg<26, "z26", [Q26]>, DwarfRegNum<[122]>;
851def Z27   : AArch64Reg<27, "z27", [Q27]>, DwarfRegNum<[123]>;
852def Z28   : AArch64Reg<28, "z28", [Q28]>, DwarfRegNum<[124]>;
853def Z29   : AArch64Reg<29, "z29", [Q29]>, DwarfRegNum<[125]>;
854def Z30   : AArch64Reg<30, "z30", [Q30]>, DwarfRegNum<[126]>;
855def Z31   : AArch64Reg<31, "z31", [Q31]>, DwarfRegNum<[127]>;
856}
857
858// Enum describing the element size for destructive
859// operations.
860class ElementSizeEnum<bits<3> val> {
861  bits<3> Value = val;
862}
863
864def ElementSizeNone : ElementSizeEnum<0>;
865def ElementSizeB    : ElementSizeEnum<1>;
866def ElementSizeH    : ElementSizeEnum<2>;
867def ElementSizeS    : ElementSizeEnum<3>;
868def ElementSizeD    : ElementSizeEnum<4>;
869def ElementSizeQ    : ElementSizeEnum<5>;  // Unused
870
871class SVERegOp <string Suffix, AsmOperandClass C,
872                ElementSizeEnum Size,
873                RegisterClass RC> : RegisterOperand<RC> {
874  ElementSizeEnum ElementSize;
875
876  let ElementSize = Size;
877  let PrintMethod = !if(!eq(Suffix, ""),
878                        "printSVERegOp<>",
879                        "printSVERegOp<'" # Suffix # "'>");
880  let ParserMatchClass = C;
881}
882
883class ZPRRegOp <string Suffix, AsmOperandClass C, ElementSizeEnum Size,
884                RegisterClass RC> : SVERegOp<Suffix, C, Size, RC> {}
885
886//******************************************************************************
887
888// SVE predicate register classes.
889class PPRClass<int firstreg, int lastreg> : RegisterClass<
890                                  "AArch64",
891                                  [ nxv16i1, nxv8i1, nxv4i1, nxv2i1, nxv1i1 ], 16,
892                                  (sequence "P%u", firstreg, lastreg)> {
893  let Size = 16;
894}
895
896def PPR    : PPRClass<0, 15>;
897def PPR_3b : PPRClass<0, 7>; // Restricted 3 bit SVE predicate register class.
898def PPR_p8to15 : PPRClass<8, 15>;
899
900class PPRAsmOperand <string name, string RegClass, int Width>: AsmOperandClass {
901  let Name = "SVE" # name # "Reg";
902  let PredicateMethod = "isSVEPredicateVectorRegOfWidth<"
903                            # Width # ", " # "AArch64::" # RegClass # "RegClassID>";
904  let DiagnosticType = "InvalidSVE" # name # "Reg";
905  let RenderMethod = "addRegOperands";
906  let ParserMethod = "tryParseSVEPredicateVector<RegKind::SVEPredicateVector>";
907}
908
909def PPRAsmOpAny   : PPRAsmOperand<"PredicateAny",   "PPR",     0>;
910def PPRAsmOp8     : PPRAsmOperand<"PredicateB",     "PPR",     8>;
911def PPRAsmOp16    : PPRAsmOperand<"PredicateH",     "PPR",    16>;
912def PPRAsmOp32    : PPRAsmOperand<"PredicateS",     "PPR",    32>;
913def PPRAsmOp64    : PPRAsmOperand<"PredicateD",     "PPR",    64>;
914def PPRAsmOp3bAny : PPRAsmOperand<"Predicate3bAny", "PPR_3b",  0>;
915
916class PPRRegOp <string Suffix, AsmOperandClass C, ElementSizeEnum Size,
917                RegisterClass RC> : SVERegOp<Suffix, C, Size, RC> {}
918
919def PPRAny   : PPRRegOp<"",  PPRAsmOpAny,   ElementSizeNone, PPR>;
920def PPR8     : PPRRegOp<"b", PPRAsmOp8,     ElementSizeB,    PPR>;
921def PPR16    : PPRRegOp<"h", PPRAsmOp16,    ElementSizeH,    PPR>;
922def PPR32    : PPRRegOp<"s", PPRAsmOp32,    ElementSizeS,    PPR>;
923def PPR64    : PPRRegOp<"d", PPRAsmOp64,    ElementSizeD,    PPR>;
924def PPR3bAny : PPRRegOp<"",  PPRAsmOp3bAny, ElementSizeNone, PPR_3b>;
925
926class PNRClass<int firstreg, int lastreg> : RegisterClass<
927                                  "AArch64",
928                                  [ aarch64svcount ], 16,
929                                  (sequence "PN%u", firstreg, lastreg)> {
930  let Size = 16;
931}
932
933def PNR        : PNRClass<0, 15>;
934def PNR_3b     : PNRClass<0, 7>;
935def PNR_p8to15 : PNRClass<8, 15>;
936
937// SVE predicate-as-counter operand
938class PNRAsmOperand<string name, string RegClass, int Width>: AsmOperandClass {
939  let Name = "SVE" # name # "Reg";
940  let PredicateMethod = "isSVEPredicateAsCounterRegOfWidth<"
941                            # Width # ", " # "AArch64::"
942                            # RegClass # "RegClassID>";
943  let DiagnosticType = "InvalidSVE" # name # "Reg";
944  let RenderMethod = "addRegOperands";
945  let ParserMethod   = "tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>";
946}
947
948let RenderMethod = "addPNRasPPRRegOperands" in {
949  def PNRasPPROpAny : PNRAsmOperand<"PNRasPPRPredicateAny", "PNR",  0>;
950  def PNRasPPROp8   : PNRAsmOperand<"PNRasPPRPredicateB",   "PNR",  8>;
951}
952
953class PNRasPPRRegOp<string Suffix, AsmOperandClass C, ElementSizeEnum Size,
954                RegisterClass RC> : SVERegOp<Suffix, C, Size, RC> {}
955
956def PNRasPPRAny : PNRasPPRRegOp<"",  PNRasPPROpAny, ElementSizeNone, PPR>;
957def PNRasPPR8   : PNRasPPRRegOp<"b", PNRasPPROp8,   ElementSizeB,    PPR>;
958
959def PNRAsmOpAny: PNRAsmOperand<"PNPredicateAny", "PNR", 0>;
960def PNRAsmOp8  : PNRAsmOperand<"PNPredicateB",   "PNR", 8>;
961def PNRAsmOp16 : PNRAsmOperand<"PNPredicateH",   "PNR", 16>;
962def PNRAsmOp32 : PNRAsmOperand<"PNPredicateS",   "PNR", 32>;
963def PNRAsmOp64 : PNRAsmOperand<"PNPredicateD",   "PNR", 64>;
964
965class PNRRegOp<string Suffix, AsmOperandClass C, int Size, RegisterClass RC>
966    : SVERegOp<Suffix, C, ElementSizeNone, RC> {
967  let PrintMethod = "printPredicateAsCounter<" # Size # ">";
968}
969def PNRAny : PNRRegOp<"",  PNRAsmOpAny, 0,  PNR>;
970def PNR8   : PNRRegOp<"b", PNRAsmOp8,   8,  PNR>;
971def PNR16  : PNRRegOp<"h", PNRAsmOp16,  16, PNR>;
972def PNR32  : PNRRegOp<"s", PNRAsmOp32,  32, PNR>;
973def PNR64  : PNRRegOp<"d", PNRAsmOp64,  64, PNR>;
974
975def PNRAsmAny_p8to15  : PNRAsmOperand<"PNPredicateAny_p8to15", "PNR_p8to15", 0>;
976def PNRAsmOp8_p8to15  : PNRAsmOperand<"PNPredicateB_p8to15",   "PNR_p8to15", 8>;
977def PNRAsmOp16_p8to15 : PNRAsmOperand<"PNPredicateH_p8to15",   "PNR_p8to15", 16>;
978def PNRAsmOp32_p8to15 : PNRAsmOperand<"PNPredicateS_p8to15",   "PNR_p8to15", 32>;
979def PNRAsmOp64_p8to15 : PNRAsmOperand<"PNPredicateD_p8to15",   "PNR_p8to15", 64>;
980
981class PNRP8to15RegOp<string Suffix, AsmOperandClass C, int Width, RegisterClass RC>
982    : SVERegOp<Suffix, C, ElementSizeNone, RC> {
983  let PrintMethod   = "printPredicateAsCounter<" # Width # ">";
984  let EncoderMethod = "EncodePNR_p8to15";
985  let DecoderMethod = "DecodePNR_p8to15RegisterClass";
986}
987
988def PNRAny_p8to15 : PNRP8to15RegOp<"",  PNRAsmAny_p8to15,  0,  PNR_p8to15>;
989def PNR8_p8to15   : PNRP8to15RegOp<"b", PNRAsmOp8_p8to15,  8,  PNR_p8to15>;
990def PNR16_p8to15  : PNRP8to15RegOp<"h", PNRAsmOp16_p8to15, 16, PNR_p8to15>;
991def PNR32_p8to15  : PNRP8to15RegOp<"s", PNRAsmOp32_p8to15, 32, PNR_p8to15>;
992def PNR64_p8to15  : PNRP8to15RegOp<"d", PNRAsmOp64_p8to15, 64, PNR_p8to15>;
993
994let Namespace = "AArch64" in {
995  def psub0 : SubRegIndex<16, -1>;
996  def psub1 : SubRegIndex<16, -1>;
997}
998
999// Pairs of SVE predicate vector registers.
1000def PSeqPairs : RegisterTuples<[psub0, psub1], [(rotl PPR, 0), (rotl PPR, 1)]>;
1001
1002def PPR2 : RegisterClass<"AArch64", [untyped], 16, (add PSeqPairs)> {
1003  let Size = 32;
1004}
1005
1006class PPRVectorList<int ElementWidth, int NumRegs> : AsmOperandClass {
1007  let Name = "SVEPredicateList" # NumRegs # "x" # ElementWidth;
1008  let ParserMethod = "tryParseVectorList<RegKind::SVEPredicateVector>";
1009  let PredicateMethod = "isTypedVectorList<RegKind::SVEPredicateVector, "
1010                            # NumRegs #", 0, "#ElementWidth #">";
1011  let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_PReg, "
1012                         # NumRegs #">";
1013}
1014
1015def PP_b : RegisterOperand<PPR2, "printTypedVectorList<0,'b'>"> {
1016  let ParserMatchClass = PPRVectorList<8, 2>;
1017}
1018
1019def PP_h : RegisterOperand<PPR2, "printTypedVectorList<0,'h'>"> {
1020  let ParserMatchClass = PPRVectorList<16, 2>;
1021}
1022
1023def PP_s : RegisterOperand<PPR2, "printTypedVectorList<0,'s'>"> {
1024  let ParserMatchClass = PPRVectorList<32, 2>;
1025}
1026
1027def PP_d : RegisterOperand<PPR2, "printTypedVectorList<0,'d'>"> {
1028  let ParserMatchClass = PPRVectorList<64, 2>;
1029}
1030
1031// SVE2 multiple-of-2 multi-predicate-vector operands
1032def PPR2Mul2 : RegisterClass<"AArch64", [untyped], 16, (add (decimate PSeqPairs, 2))> {
1033  let Size = 32;
1034}
1035
1036class PPRVectorListMul<int ElementWidth, int NumRegs> : PPRVectorList<ElementWidth, NumRegs> {
1037  let Name = "SVEPredicateListMul" # NumRegs # "x" # ElementWidth;
1038  let DiagnosticType = "Invalid" # Name;
1039  let PredicateMethod =
1040      "isTypedVectorListMultiple<RegKind::SVEPredicateVector, " # NumRegs # ", 0, "
1041                                                                # ElementWidth # ">";
1042}
1043
1044let EncoderMethod = "EncodeRegAsMultipleOf<2>",
1045    DecoderMethod = "DecodePPR2Mul2RegisterClass" in {
1046  def PP_b_mul_r : RegisterOperand<PPR2Mul2, "printTypedVectorList<0,'b'>"> {
1047    let ParserMatchClass = PPRVectorListMul<8, 2>;
1048  }
1049
1050  def PP_h_mul_r : RegisterOperand<PPR2Mul2, "printTypedVectorList<0,'h'>"> {
1051    let ParserMatchClass = PPRVectorListMul<16, 2>;
1052  }
1053
1054  def PP_s_mul_r : RegisterOperand<PPR2Mul2, "printTypedVectorList<0,'s'>"> {
1055    let ParserMatchClass = PPRVectorListMul<32, 2>;
1056  }
1057
1058  def PP_d_mul_r : RegisterOperand<PPR2Mul2, "printTypedVectorList<0,'d'>"> {
1059    let ParserMatchClass = PPRVectorListMul<64, 2>;
1060  }
1061}  // end let EncoderMethod/DecoderMethod
1062
1063
1064//******************************************************************************
1065
1066// SVE vector register classes
1067class ZPRClass<int lastreg> : RegisterClass<"AArch64",
1068                                            [nxv16i8, nxv8i16, nxv4i32, nxv2i64,
1069                                             nxv2f16, nxv4f16, nxv8f16,
1070                                             nxv2bf16, nxv4bf16, nxv8bf16,
1071                                             nxv2f32, nxv4f32,
1072                                             nxv2f64],
1073                                            128, (sequence "Z%u", 0, lastreg)> {
1074  let Size = 128;
1075}
1076
1077def ZPR    : ZPRClass<31>;
1078def ZPR_4b : ZPRClass<15>; // Restricted 4 bit SVE vector register class.
1079def ZPR_3b : ZPRClass<7>;  // Restricted 3 bit SVE vector register class.
1080
1081class ZPRAsmOperand<string name, int Width, string RegClassSuffix = "">
1082    : AsmOperandClass {
1083  let Name = "SVE" # name # "Reg";
1084  let PredicateMethod = "isSVEDataVectorRegOfWidth<"
1085                            # Width # ", AArch64::ZPR"
1086                            # RegClassSuffix # "RegClassID>";
1087  let RenderMethod = "addRegOperands";
1088  let DiagnosticType = "InvalidZPR" # RegClassSuffix # Width;
1089  let ParserMethod = "tryParseSVEDataVector<false, "
1090                               # !if(!eq(Width, 0), "false", "true") # ">";
1091}
1092
1093def ZPRAsmOpAny : ZPRAsmOperand<"VectorAny", 0>;
1094def ZPRAsmOp8   : ZPRAsmOperand<"VectorB",   8>;
1095def ZPRAsmOp16  : ZPRAsmOperand<"VectorH",   16>;
1096def ZPRAsmOp32  : ZPRAsmOperand<"VectorS",   32>;
1097def ZPRAsmOp64  : ZPRAsmOperand<"VectorD",   64>;
1098def ZPRAsmOp128 : ZPRAsmOperand<"VectorQ",   128>;
1099
1100def ZPRAny  : ZPRRegOp<"",  ZPRAsmOpAny, ElementSizeNone, ZPR>;
1101def ZPR8    : ZPRRegOp<"b", ZPRAsmOp8,   ElementSizeB, ZPR>;
1102def ZPR16   : ZPRRegOp<"h", ZPRAsmOp16,  ElementSizeH, ZPR>;
1103def ZPR32   : ZPRRegOp<"s", ZPRAsmOp32,  ElementSizeS, ZPR>;
1104def ZPR64   : ZPRRegOp<"d", ZPRAsmOp64,  ElementSizeD, ZPR>;
1105def ZPR128  : ZPRRegOp<"q", ZPRAsmOp128, ElementSizeQ, ZPR>;
1106
1107def ZPRAsmOp3b8   : ZPRAsmOperand<"Vector3bB", 8, "_3b">;
1108def ZPRAsmOp3b16  : ZPRAsmOperand<"Vector3bH", 16, "_3b">;
1109def ZPRAsmOp3b32  : ZPRAsmOperand<"Vector3bS", 32, "_3b">;
1110
1111def ZPR3b8  : ZPRRegOp<"b", ZPRAsmOp3b8,  ElementSizeB, ZPR_3b>;
1112def ZPR3b16 : ZPRRegOp<"h", ZPRAsmOp3b16, ElementSizeH, ZPR_3b>;
1113def ZPR3b32 : ZPRRegOp<"s", ZPRAsmOp3b32, ElementSizeS, ZPR_3b>;
1114
1115def ZPRAsmOp4b8   : ZPRAsmOperand<"Vector4bB", 8,  "_4b">;
1116def ZPRAsmOp4b16  : ZPRAsmOperand<"Vector4bH", 16, "_4b">;
1117def ZPRAsmOp4b32  : ZPRAsmOperand<"Vector4bS", 32, "_4b">;
1118def ZPRAsmOp4b64  : ZPRAsmOperand<"Vector4bD", 64, "_4b">;
1119
1120def ZPR4b8  : ZPRRegOp<"b", ZPRAsmOp4b8,  ElementSizeB, ZPR_4b>;
1121def ZPR4b16 : ZPRRegOp<"h", ZPRAsmOp4b16, ElementSizeH, ZPR_4b>;
1122def ZPR4b32 : ZPRRegOp<"s", ZPRAsmOp4b32, ElementSizeS, ZPR_4b>;
1123def ZPR4b64 : ZPRRegOp<"d", ZPRAsmOp4b64, ElementSizeD, ZPR_4b>;
1124
1125class FPRasZPR<int Width> : AsmOperandClass{
1126  let Name = "FPR" # Width # "asZPR";
1127  let PredicateMethod = "isFPRasZPR<AArch64::FPR" # Width # "RegClassID>";
1128  let RenderMethod = "addFPRasZPRRegOperands<" # Width # ">";
1129}
1130
1131class FPRasZPROperand<int Width> : RegisterOperand<ZPR> {
1132  let ParserMatchClass = FPRasZPR<Width>;
1133  let PrintMethod = "printZPRasFPR<" # Width # ">";
1134}
1135
1136def FPR8asZPR   : FPRasZPROperand<8>;
1137def FPR16asZPR  : FPRasZPROperand<16>;
1138def FPR32asZPR  : FPRasZPROperand<32>;
1139def FPR64asZPR  : FPRasZPROperand<64>;
1140def FPR128asZPR : FPRasZPROperand<128>;
1141
1142let Namespace = "AArch64" in {
1143  def zsub0 : SubRegIndex<128, -1>;
1144  def zsub1 : SubRegIndex<128, -1>;
1145  def zsub2 : SubRegIndex<128, -1>;
1146  def zsub3 : SubRegIndex<128, -1>;
1147}
1148
1149// Pairs, triples, and quads of SVE vector registers.
1150def ZSeqPairs   : RegisterTuples<[zsub0, zsub1], [(rotl ZPR, 0), (rotl ZPR, 1)]>;
1151def ZSeqTriples : RegisterTuples<[zsub0, zsub1, zsub2], [(rotl ZPR, 0), (rotl ZPR, 1), (rotl ZPR, 2)]>;
1152def ZSeqQuads   : RegisterTuples<[zsub0, zsub1, zsub2, zsub3], [(rotl ZPR, 0), (rotl ZPR, 1), (rotl ZPR, 2), (rotl ZPR, 3)]>;
1153
1154def ZPR2   : RegisterClass<"AArch64", [untyped], 128, (add ZSeqPairs)>  {
1155  let Size = 256;
1156}
1157def ZPR3  : RegisterClass<"AArch64", [untyped], 128, (add ZSeqTriples)> {
1158  let Size = 384;
1159}
1160def ZPR4 : RegisterClass<"AArch64", [untyped], 128, (add ZSeqQuads)> {
1161  let Size = 512;
1162}
1163
1164class ZPRVectorList<int ElementWidth, int NumRegs> : AsmOperandClass {
1165  let Name = "SVEVectorList" # NumRegs # ElementWidth;
1166  let ParserMethod = "tryParseVectorList<RegKind::SVEDataVector>";
1167  let PredicateMethod =
1168      "isTypedVectorList<RegKind::SVEDataVector, " #NumRegs #", 0, " #ElementWidth #">";
1169  let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_ZReg, " # NumRegs # ">";
1170}
1171
1172def Z_b  : RegisterOperand<ZPR,  "printTypedVectorList<0,'b'>"> {
1173  let ParserMatchClass = ZPRVectorList<8, 1>;
1174}
1175
1176def Z_h  : RegisterOperand<ZPR,  "printTypedVectorList<0,'h'>"> {
1177  let ParserMatchClass = ZPRVectorList<16, 1>;
1178}
1179
1180def Z_s  : RegisterOperand<ZPR,  "printTypedVectorList<0,'s'>"> {
1181  let ParserMatchClass = ZPRVectorList<32, 1>;
1182}
1183
1184def Z_d  : RegisterOperand<ZPR,  "printTypedVectorList<0,'d'>"> {
1185  let ParserMatchClass = ZPRVectorList<64, 1>;
1186}
1187
1188def Z_q  : RegisterOperand<ZPR,  "printTypedVectorList<0,'q'>"> {
1189  let ParserMatchClass = ZPRVectorList<128, 1>;
1190}
1191
1192def ZZ_b  : RegisterOperand<ZPR2, "printTypedVectorList<0,'b'>"> {
1193  let ParserMatchClass = ZPRVectorList<8, 2>;
1194}
1195
1196def ZZ_h  : RegisterOperand<ZPR2, "printTypedVectorList<0,'h'>"> {
1197  let ParserMatchClass = ZPRVectorList<16, 2>;
1198}
1199
1200def ZZ_s  : RegisterOperand<ZPR2, "printTypedVectorList<0,'s'>"> {
1201  let ParserMatchClass = ZPRVectorList<32, 2>;
1202}
1203
1204def ZZ_d  : RegisterOperand<ZPR2, "printTypedVectorList<0,'d'>"> {
1205  let ParserMatchClass = ZPRVectorList<64, 2>;
1206}
1207
1208def ZZ_q  : RegisterOperand<ZPR2, "printTypedVectorList<0,'q'>"> {
1209  let ParserMatchClass = ZPRVectorList<128, 2>;
1210}
1211
1212def ZZZ_b  : RegisterOperand<ZPR3, "printTypedVectorList<0,'b'>"> {
1213  let ParserMatchClass = ZPRVectorList<8, 3>;
1214}
1215
1216def ZZZ_h  : RegisterOperand<ZPR3, "printTypedVectorList<0,'h'>"> {
1217  let ParserMatchClass = ZPRVectorList<16, 3>;
1218}
1219
1220def ZZZ_s  : RegisterOperand<ZPR3, "printTypedVectorList<0,'s'>"> {
1221  let ParserMatchClass = ZPRVectorList<32, 3>;
1222}
1223
1224def ZZZ_d  : RegisterOperand<ZPR3, "printTypedVectorList<0,'d'>"> {
1225  let ParserMatchClass = ZPRVectorList<64, 3>;
1226}
1227
1228def ZZZ_q  : RegisterOperand<ZPR3, "printTypedVectorList<0,'q'>"> {
1229  let ParserMatchClass = ZPRVectorList<128, 3>;
1230}
1231
1232def ZZZZ_b : RegisterOperand<ZPR4, "printTypedVectorList<0,'b'>"> {
1233  let ParserMatchClass = ZPRVectorList<8, 4>;
1234}
1235
1236def ZZZZ_h : RegisterOperand<ZPR4, "printTypedVectorList<0,'h'>"> {
1237  let ParserMatchClass = ZPRVectorList<16, 4>;
1238}
1239
1240def ZZZZ_s : RegisterOperand<ZPR4, "printTypedVectorList<0,'s'>"> {
1241  let ParserMatchClass = ZPRVectorList<32, 4>;
1242}
1243
1244def ZZZZ_d : RegisterOperand<ZPR4, "printTypedVectorList<0,'d'>"> {
1245  let ParserMatchClass = ZPRVectorList<64, 4>;
1246}
1247
1248def ZZZZ_q : RegisterOperand<ZPR4, "printTypedVectorList<0,'q'>"> {
1249  let ParserMatchClass = ZPRVectorList<128, 4>;
1250}
1251
1252// SME2 multiple-of-2 or 4 multi-vector operands
1253def ZPR2Mul2 : RegisterClass<"AArch64", [untyped], 128, (add (decimate ZSeqPairs, 2))> {
1254  let Size = 256;
1255}
1256
1257def ZPR4Mul4 : RegisterClass<"AArch64", [untyped], 128, (add (decimate ZSeqQuads, 4))> {
1258  let Size = 512;
1259}
1260
1261class ZPRVectorListMul<int ElementWidth, int NumRegs> : ZPRVectorList<ElementWidth, NumRegs> {
1262  let Name = "SVEVectorListMul" # NumRegs # "x" # ElementWidth;
1263  let DiagnosticType = "Invalid" # Name;
1264  let PredicateMethod =
1265      "isTypedVectorListMultiple<RegKind::SVEDataVector, " # NumRegs # ", 0, "
1266                                                           # ElementWidth # ">";
1267}
1268
1269let EncoderMethod = "EncodeRegAsMultipleOf<2>",
1270    DecoderMethod = "DecodeZPR2Mul2RegisterClass" in {
1271  def ZZ_mul_r : RegisterOperand<ZPR2Mul2, "printTypedVectorList<0,0>"> {
1272    let ParserMatchClass = ZPRVectorListMul<0, 2>;
1273  }
1274
1275  def ZZ_b_mul_r : RegisterOperand<ZPR2Mul2, "printTypedVectorList<0,'b'>"> {
1276    let ParserMatchClass = ZPRVectorListMul<8, 2>;
1277  }
1278
1279  def ZZ_h_mul_r : RegisterOperand<ZPR2Mul2, "printTypedVectorList<0,'h'>"> {
1280    let ParserMatchClass = ZPRVectorListMul<16, 2>;
1281  }
1282
1283  def ZZ_s_mul_r : RegisterOperand<ZPR2Mul2, "printTypedVectorList<0,'s'>"> {
1284    let ParserMatchClass = ZPRVectorListMul<32, 2>;
1285  }
1286
1287  def ZZ_d_mul_r : RegisterOperand<ZPR2Mul2, "printTypedVectorList<0,'d'>"> {
1288    let ParserMatchClass = ZPRVectorListMul<64, 2>;
1289  }
1290
1291  def ZZ_q_mul_r : RegisterOperand<ZPR2Mul2, "printTypedVectorList<0,'q'>"> {
1292    let ParserMatchClass = ZPRVectorListMul<128, 2>;
1293  }
1294} // end let EncoderMethod/DecoderMethod
1295
1296let EncoderMethod = "EncodeRegAsMultipleOf<4>",
1297    DecoderMethod = "DecodeZPR4Mul4RegisterClass" in {
1298  def ZZZZ_b_mul_r : RegisterOperand<ZPR4Mul4, "printTypedVectorList<0,'b'>"> {
1299    let ParserMatchClass = ZPRVectorListMul<8, 4>;
1300  }
1301
1302  def ZZZZ_h_mul_r : RegisterOperand<ZPR4Mul4, "printTypedVectorList<0,'h'>"> {
1303    let ParserMatchClass = ZPRVectorListMul<16, 4>;
1304  }
1305
1306  def ZZZZ_s_mul_r : RegisterOperand<ZPR4Mul4, "printTypedVectorList<0,'s'>"> {
1307    let ParserMatchClass = ZPRVectorListMul<32, 4>;
1308  }
1309
1310  def ZZZZ_d_mul_r : RegisterOperand<ZPR4Mul4, "printTypedVectorList<0,'d'>"> {
1311    let ParserMatchClass = ZPRVectorListMul<64, 4>;
1312  }
1313
1314  def ZZZZ_q_mul_r : RegisterOperand<ZPR4Mul4, "printTypedVectorList<0,'q'>"> {
1315    let ParserMatchClass = ZPRVectorListMul<128, 4>;
1316  }
1317} // end let EncoderMethod/DecoderMethod
1318
1319// SME2 strided multi-vector operands
1320
1321// ZStridedPairs
1322//
1323// A group of two Z vectors with strided numbering consisting of:
1324//   Zn+0.T and Zn+8.T
1325// where n is in the range 0 to 7 and 16 to 23 inclusive, and T is one of B, H,
1326// S, or D.
1327
1328// Z0_Z8, Z1_Z9, Z2_Z10, Z3_Z11, Z4_Z12, Z5_Z13, Z6_Z14, Z7_Z15
1329def ZStridedPairsLo : RegisterTuples<[zsub0, zsub1], [
1330  (trunc (rotl ZPR, 0), 8), (trunc (rotl ZPR, 8), 8)
1331]>;
1332
1333// Z16_Z24, Z17_Z25, Z18_Z26, Z19_Z27, Z20_Z28, Z21_Z29, Z22_Z30, Z23_Z31
1334def ZStridedPairsHi : RegisterTuples<[zsub0, zsub1], [
1335  (trunc (rotl ZPR, 16), 8), (trunc (rotl ZPR, 24), 8)
1336]>;
1337
1338// ZStridedQuads
1339//
1340// A group of four Z vectors with strided numbering consisting of:
1341//   Zn+0.T, Zn+4.T, Zn+8.T and Zn+12.T
1342// where n is in the range 0 to 3 and 16 to 19 inclusive, and T is one of B, H,
1343// S, or D.
1344
1345// Z0_Z4_Z8_Z12, Z1_Z5_Z9_Z13, Z2_Z6_Z10_Z14, Z3_Z7_Z11_Z15
1346def ZStridedQuadsLo : RegisterTuples<[zsub0, zsub1, zsub2, zsub3], [
1347  (trunc (rotl ZPR, 0), 4), (trunc (rotl ZPR, 4), 4),
1348  (trunc (rotl ZPR, 8), 4), (trunc (rotl ZPR, 12), 4)
1349]>;
1350// Z16_Z20_Z24_Z28, Z17_Z21_Z25_Z29, Z18_Z22_Z26_Z30, Z19_Z23_Z27_Z31
1351def ZStridedQuadsHi : RegisterTuples<[zsub0, zsub1, zsub2, zsub3], [
1352  (trunc (rotl ZPR, 16), 4), (trunc (rotl ZPR, 20), 4),
1353  (trunc (rotl ZPR, 24), 4), (trunc (rotl ZPR, 28), 4)
1354]>;
1355
1356def ZPR2Strided : RegisterClass<"AArch64", [untyped], 128,
1357                                (add ZStridedPairsLo, ZStridedPairsHi)>  {
1358  let Size = 256;
1359}
1360def ZPR4Strided : RegisterClass<"AArch64", [untyped], 128,
1361                                (add ZStridedQuadsLo, ZStridedQuadsHi)>  {
1362  let Size = 512;
1363}
1364
1365def ZPR2StridedOrContiguous : RegisterClass<"AArch64", [untyped], 128,
1366                                (add ZStridedPairsLo, ZStridedPairsHi,
1367                                (decimate ZSeqPairs, 2))> {
1368  let Size = 256;
1369}
1370
1371class ZPRVectorListStrided<int ElementWidth, int NumRegs, int Stride>
1372    : ZPRVectorList<ElementWidth, NumRegs> {
1373  let Name = "SVEVectorListStrided" # NumRegs # "x" # ElementWidth;
1374  let DiagnosticType = "Invalid" # Name;
1375  let PredicateMethod = "isTypedVectorListStrided<RegKind::SVEDataVector, "
1376                        # NumRegs # "," # Stride # "," # ElementWidth # ">";
1377  let RenderMethod = "addStridedVectorListOperands<" # NumRegs # ">";
1378}
1379
1380let EncoderMethod = "EncodeZPR2StridedRegisterClass",
1381    DecoderMethod = "DecodeZPR2StridedRegisterClass" in {
1382  def ZZ_b_strided
1383      : RegisterOperand<ZPR2Strided, "printTypedVectorList<0, 'b'>"> {
1384    let ParserMatchClass = ZPRVectorListStrided<8, 2, 8>;
1385  }
1386
1387  def ZZ_h_strided
1388      : RegisterOperand<ZPR2Strided, "printTypedVectorList<0, 'h'>"> {
1389    let ParserMatchClass = ZPRVectorListStrided<16, 2, 8>;
1390  }
1391
1392  def ZZ_s_strided
1393      : RegisterOperand<ZPR2Strided, "printTypedVectorList<0,'s'>"> {
1394    let ParserMatchClass = ZPRVectorListStrided<32, 2, 8>;
1395  }
1396
1397  def ZZ_d_strided
1398      : RegisterOperand<ZPR2Strided, "printTypedVectorList<0,'d'>"> {
1399    let ParserMatchClass = ZPRVectorListStrided<64, 2, 8>;
1400  }
1401
1402  def ZZ_b_strided_and_contiguous
1403      : RegisterOperand<ZPR2StridedOrContiguous, "printTypedVectorList<0,'b'>">;
1404  def ZZ_h_strided_and_contiguous
1405      : RegisterOperand<ZPR2StridedOrContiguous, "printTypedVectorList<0,'h'>">;
1406  def ZZ_s_strided_and_contiguous
1407      : RegisterOperand<ZPR2StridedOrContiguous, "printTypedVectorList<0,'s'>">;
1408  def ZZ_d_strided_and_contiguous
1409      : RegisterOperand<ZPR2StridedOrContiguous, "printTypedVectorList<0,'d'>">;
1410}
1411
1412def ZPR4StridedOrContiguous : RegisterClass<"AArch64", [untyped], 128,
1413                                (add ZStridedQuadsLo, ZStridedQuadsHi,
1414                                (decimate ZSeqQuads, 4))>  {
1415  let Size = 512;
1416}
1417
1418let EncoderMethod = "EncodeZPR4StridedRegisterClass",
1419    DecoderMethod = "DecodeZPR4StridedRegisterClass" in {
1420  def ZZZZ_b_strided
1421      : RegisterOperand<ZPR4Strided, "printTypedVectorList<0,'b'>"> {
1422    let ParserMatchClass = ZPRVectorListStrided<8, 4, 4>;
1423  }
1424
1425  def ZZZZ_h_strided
1426      : RegisterOperand<ZPR4Strided, "printTypedVectorList<0,'h'>"> {
1427    let ParserMatchClass = ZPRVectorListStrided<16, 4, 4>;
1428  }
1429
1430  def ZZZZ_s_strided
1431      : RegisterOperand<ZPR4Strided, "printTypedVectorList<0,'s'>"> {
1432    let ParserMatchClass = ZPRVectorListStrided<32, 4, 4>;
1433  }
1434
1435  def ZZZZ_d_strided
1436      : RegisterOperand<ZPR4Strided, "printTypedVectorList<0,'d'>"> {
1437    let ParserMatchClass = ZPRVectorListStrided<64, 4, 4>;
1438  }
1439
1440  def ZZZZ_b_strided_and_contiguous
1441      : RegisterOperand<ZPR4StridedOrContiguous, "printTypedVectorList<0,'b'>">;
1442  def ZZZZ_h_strided_and_contiguous
1443      : RegisterOperand<ZPR4StridedOrContiguous, "printTypedVectorList<0,'h'>">;
1444  def ZZZZ_s_strided_and_contiguous
1445      : RegisterOperand<ZPR4StridedOrContiguous, "printTypedVectorList<0,'s'>">;
1446  def ZZZZ_d_strided_and_contiguous
1447      : RegisterOperand<ZPR4StridedOrContiguous, "printTypedVectorList<0,'d'>">;
1448}
1449
1450class ZPRExtendAsmOperand<string ShiftExtend, int RegWidth, int Scale,
1451                          bit ScaleAlwaysSame = 0b0> : AsmOperandClass {
1452  let Name = "ZPRExtend" # ShiftExtend # RegWidth # Scale
1453                         # !if(ScaleAlwaysSame, "Only", "");
1454
1455  let PredicateMethod = "isSVEDataVectorRegWithShiftExtend<"
1456                          # RegWidth # ", AArch64::ZPRRegClassID, "
1457                          # "AArch64_AM::" # ShiftExtend # ", "
1458                          # Scale # ", "
1459                          # !if(ScaleAlwaysSame, "true", "false")
1460                          # ">";
1461  let DiagnosticType = "InvalidZPR" # RegWidth # ShiftExtend # Scale;
1462  let RenderMethod = "addRegOperands";
1463  let ParserMethod = "tryParseSVEDataVector<true, true>";
1464}
1465
1466class ZPRExtendRegisterOperand<bit SignExtend, bit IsLSL, string Repr,
1467                               int RegWidth, int Scale, string Suffix = "">
1468    : RegisterOperand<ZPR> {
1469  let ParserMatchClass =
1470    !cast<AsmOperandClass>("ZPR" # RegWidth # "AsmOpndExt" # Repr # Scale # Suffix);
1471  let PrintMethod = "printRegWithShiftExtend<"
1472                          # !if(SignExtend, "true", "false") # ", "
1473                          # Scale # ", "
1474                          # !if(IsLSL, "'x'", "'w'") # ", "
1475                          # !if(!eq(RegWidth, 32), "'s'", "'d'") # ">";
1476}
1477
1478foreach RegWidth = [32, 64] in {
1479  // UXTW(8|16|32|64)
1480  def ZPR#RegWidth#AsmOpndExtUXTW8Only : ZPRExtendAsmOperand<"UXTW", RegWidth, 8, 0b1>;
1481  def ZPR#RegWidth#AsmOpndExtUXTW8     : ZPRExtendAsmOperand<"UXTW", RegWidth, 8>;
1482  def ZPR#RegWidth#AsmOpndExtUXTW16    : ZPRExtendAsmOperand<"UXTW", RegWidth, 16>;
1483  def ZPR#RegWidth#AsmOpndExtUXTW32    : ZPRExtendAsmOperand<"UXTW", RegWidth, 32>;
1484  def ZPR#RegWidth#AsmOpndExtUXTW64    : ZPRExtendAsmOperand<"UXTW", RegWidth, 64>;
1485
1486  def ZPR#RegWidth#ExtUXTW8Only        : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 8, "Only">;
1487  def ZPR#RegWidth#ExtUXTW8            : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 8>;
1488  def ZPR#RegWidth#ExtUXTW16           : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 16>;
1489  def ZPR#RegWidth#ExtUXTW32           : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 32>;
1490  def ZPR#RegWidth#ExtUXTW64           : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 64>;
1491
1492  // SXTW(8|16|32|64)
1493  def ZPR#RegWidth#AsmOpndExtSXTW8Only : ZPRExtendAsmOperand<"SXTW", RegWidth, 8, 0b1>;
1494  def ZPR#RegWidth#AsmOpndExtSXTW8     : ZPRExtendAsmOperand<"SXTW", RegWidth, 8>;
1495  def ZPR#RegWidth#AsmOpndExtSXTW16    : ZPRExtendAsmOperand<"SXTW", RegWidth, 16>;
1496  def ZPR#RegWidth#AsmOpndExtSXTW32    : ZPRExtendAsmOperand<"SXTW", RegWidth, 32>;
1497  def ZPR#RegWidth#AsmOpndExtSXTW64    : ZPRExtendAsmOperand<"SXTW", RegWidth, 64>;
1498
1499  def ZPR#RegWidth#ExtSXTW8Only        : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 8, "Only">;
1500  def ZPR#RegWidth#ExtSXTW8            : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 8>;
1501  def ZPR#RegWidth#ExtSXTW16           : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 16>;
1502  def ZPR#RegWidth#ExtSXTW32           : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 32>;
1503  def ZPR#RegWidth#ExtSXTW64           : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 64>;
1504
1505  // LSL(8|16|32|64)
1506  def ZPR#RegWidth#AsmOpndExtLSL8      : ZPRExtendAsmOperand<"LSL", RegWidth, 8>;
1507  def ZPR#RegWidth#AsmOpndExtLSL16     : ZPRExtendAsmOperand<"LSL", RegWidth, 16>;
1508  def ZPR#RegWidth#AsmOpndExtLSL32     : ZPRExtendAsmOperand<"LSL", RegWidth, 32>;
1509  def ZPR#RegWidth#AsmOpndExtLSL64     : ZPRExtendAsmOperand<"LSL", RegWidth, 64>;
1510  def ZPR#RegWidth#ExtLSL8             : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 8>;
1511  def ZPR#RegWidth#ExtLSL16            : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 16>;
1512  def ZPR#RegWidth#ExtLSL32            : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 32>;
1513  def ZPR#RegWidth#ExtLSL64            : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 64>;
1514}
1515
1516class GPR64ShiftExtendAsmOperand <string AsmOperandName, int Scale, string RegClass> : AsmOperandClass {
1517  let Name = AsmOperandName # Scale;
1518  let PredicateMethod = "isGPR64WithShiftExtend<AArch64::"#RegClass#"RegClassID, " # Scale # ">";
1519  let DiagnosticType = "Invalid" # AsmOperandName # Scale;
1520  let RenderMethod = "addRegOperands";
1521  let ParserMethod = "tryParseGPROperand<true>";
1522}
1523
1524class GPR64ExtendRegisterOperand<string Name, int Scale, RegisterClass RegClass> : RegisterOperand<RegClass>{
1525  let ParserMatchClass = !cast<AsmOperandClass>(Name);
1526  let PrintMethod = "printRegWithShiftExtend<false, " # Scale # ", 'x', 0>";
1527}
1528
1529foreach Scale = [8, 16, 32, 64, 128] in {
1530  def GPR64shiftedAsmOpnd # Scale : GPR64ShiftExtendAsmOperand<"GPR64shifted", Scale, "GPR64">;
1531  def GPR64shifted # Scale : GPR64ExtendRegisterOperand<"GPR64shiftedAsmOpnd" # Scale, Scale, GPR64>;
1532
1533  def GPR64NoXZRshiftedAsmOpnd # Scale : GPR64ShiftExtendAsmOperand<"GPR64NoXZRshifted", Scale, "GPR64common">;
1534  def GPR64NoXZRshifted # Scale : GPR64ExtendRegisterOperand<"GPR64NoXZRshiftedAsmOpnd" # Scale, Scale, GPR64common>;
1535}
1536
1537// Accumulator array tiles.
1538def ZAQ0  : AArch64Reg<0,  "za0.q">;
1539def ZAQ1  : AArch64Reg<1,  "za1.q">;
1540def ZAQ2  : AArch64Reg<2,  "za2.q">;
1541def ZAQ3  : AArch64Reg<3,  "za3.q">;
1542def ZAQ4  : AArch64Reg<4,  "za4.q">;
1543def ZAQ5  : AArch64Reg<5,  "za5.q">;
1544def ZAQ6  : AArch64Reg<6,  "za6.q">;
1545def ZAQ7  : AArch64Reg<7,  "za7.q">;
1546def ZAQ8  : AArch64Reg<8,  "za8.q">;
1547def ZAQ9  : AArch64Reg<9,  "za9.q">;
1548def ZAQ10 : AArch64Reg<10, "za10.q">;
1549def ZAQ11 : AArch64Reg<11, "za11.q">;
1550def ZAQ12 : AArch64Reg<12, "za12.q">;
1551def ZAQ13 : AArch64Reg<13, "za13.q">;
1552def ZAQ14 : AArch64Reg<14, "za14.q">;
1553def ZAQ15 : AArch64Reg<15, "za15.q">;
1554
1555let SubRegIndices = [zasubq0, zasubq1] in {
1556  def ZAD0 : AArch64Reg<0, "za0.d", [ZAQ0, ZAQ8]>;
1557  def ZAD1 : AArch64Reg<1, "za1.d", [ZAQ1, ZAQ9]>;
1558  def ZAD2 : AArch64Reg<2, "za2.d", [ZAQ2, ZAQ10]>;
1559  def ZAD3 : AArch64Reg<3, "za3.d", [ZAQ3, ZAQ11]>;
1560  def ZAD4 : AArch64Reg<4, "za4.d", [ZAQ4, ZAQ12]>;
1561  def ZAD5 : AArch64Reg<5, "za5.d", [ZAQ5, ZAQ13]>;
1562  def ZAD6 : AArch64Reg<6, "za6.d", [ZAQ6, ZAQ14]>;
1563  def ZAD7 : AArch64Reg<7, "za7.d", [ZAQ7, ZAQ15]>;
1564}
1565
1566let SubRegIndices = [zasubd0, zasubd1] in {
1567  def ZAS0 : AArch64Reg<0, "za0.s", [ZAD0, ZAD4]>;
1568  def ZAS1 : AArch64Reg<1, "za1.s", [ZAD1, ZAD5]>;
1569  def ZAS2 : AArch64Reg<2, "za2.s", [ZAD2, ZAD6]>;
1570  def ZAS3 : AArch64Reg<3, "za3.s", [ZAD3, ZAD7]>;
1571}
1572
1573let SubRegIndices = [zasubs0, zasubs1] in {
1574  def ZAH0 : AArch64Reg<0, "za0.h", [ZAS0, ZAS2]>;
1575  def ZAH1 : AArch64Reg<1, "za1.h", [ZAS1, ZAS3]>;
1576}
1577
1578let SubRegIndices = [zasubh0, zasubh1] in {
1579  def ZAB0 : AArch64Reg<0, "za0.b", [ZAH0, ZAH1]>;
1580}
1581
1582let SubRegIndices = [zasubb] in {
1583  def ZA : AArch64Reg<0, "za", [ZAB0]>;
1584}
1585
1586def ZT0 : AArch64Reg<0, "zt0">;
1587
1588// SME Register Classes
1589
1590let isAllocatable = 0 in {
1591  // Accumulator array
1592  def MPR : RegisterClass<"AArch64", [untyped], 2048, (add ZA)> {
1593    let Size = 2048;
1594  }
1595
1596  // Accumulator array as single tiles
1597  def MPR8    : RegisterClass<"AArch64", [untyped], 2048, (add (sequence "ZAB%u", 0, 0))> {
1598    let Size = 2048;
1599  }
1600  def MPR16   : RegisterClass<"AArch64", [untyped], 1024, (add (sequence "ZAH%u", 0, 1))> {
1601    let Size = 1024;
1602  }
1603  def MPR32   : RegisterClass<"AArch64", [untyped],  512, (add (sequence "ZAS%u", 0, 3))> {
1604    let Size = 512;
1605  }
1606  def MPR64   : RegisterClass<"AArch64", [untyped],  256, (add (sequence "ZAD%u", 0, 7))> {
1607    let Size = 256;
1608  }
1609  def MPR128  : RegisterClass<"AArch64", [untyped],  128, (add (sequence "ZAQ%u", 0, 15))> {
1610    let Size = 128;
1611  }
1612}
1613
1614def ZTR : RegisterClass<"AArch64", [untyped], 512, (add ZT0)> {
1615  let Size = 512;
1616  let DiagnosticType = "InvalidLookupTable";
1617}
1618// SME Register Operands
1619// There are three types of SME matrix register operands:
1620// * Tiles:
1621//
1622//   These tiles make up the larger accumulator matrix. The tile representation
1623//   has an element type suffix, e.g. za0.b or za15.q and can be any of the
1624//   registers:
1625//          ZAQ0..ZAQ15
1626//          ZAD0..ZAD7
1627//          ZAS0..ZAS3
1628//          ZAH0..ZAH1
1629//       or ZAB0
1630//
1631// * Tile vectors:
1632//
1633//   Their representation is similar to regular tiles, but they have an extra
1634//   'h' or 'v' to tell how the vector at [reg+offset] is layed out in the tile,
1635//   horizontally or vertically.
1636//
1637//   e.g. za1h.h or za15v.q, which corresponds to vectors in registers ZAH1 and
1638//   ZAQ15, respectively. The horizontal/vertical is more a property of the
1639//   instruction, than a property of the asm-operand itself, or its register.
1640//   The distinction is required for the parsing/printing of the operand,
1641//   as from a compiler's perspective, the whole tile is read/written.
1642//
1643// * Accumulator matrix:
1644//
1645//   This is the entire matrix accumulator register ZA (<=> ZAB0), printed as
1646//   'za'.
1647
1648//
1649// Tiles
1650//
1651
1652class MatrixTileAsmOperand<string RC, int EltSize> : AsmOperandClass {
1653  let Name = "MatrixTile" # EltSize;
1654  let DiagnosticType = "Invalid" # Name;
1655  let ParserMethod = "tryParseMatrixRegister";
1656  let RenderMethod = "addMatrixOperands";
1657  let PredicateMethod = "isMatrixRegOperand<"
1658                          # "MatrixKind::Tile" # ", "
1659                          # EltSize # ", AArch64::" # RC # "RegClassID>";
1660}
1661
1662class MatrixTileOperand<int EltSize, int NumBitsForTile, RegisterClass RC>
1663    : RegisterOperand<RC> {
1664  let ParserMatchClass = MatrixTileAsmOperand<!cast<string>(RC), EltSize>;
1665  let DecoderMethod = "DecodeMatrixTile<" # NumBitsForTile # ">";
1666  let PrintMethod = "printMatrixTile";
1667}
1668
1669def TileOp16  : MatrixTileOperand<16, 1, MPR16>;
1670def TileOp32  : MatrixTileOperand<32, 2, MPR32>;
1671def TileOp64  : MatrixTileOperand<64, 3, MPR64>;
1672
1673//
1674// Tile vectors (horizontal and vertical)
1675//
1676
1677class MatrixTileVectorAsmOperand<string RC, int EltSize, int IsVertical>
1678    : AsmOperandClass {
1679  let Name = "MatrixTileVector" # !if(IsVertical, "V", "H") # EltSize;
1680  let DiagnosticType = "Invalid" # Name;
1681  let ParserMethod = "tryParseMatrixRegister";
1682  let RenderMethod = "addMatrixOperands";
1683  let PredicateMethod = "isMatrixRegOperand<"
1684                          # "MatrixKind::"
1685                          # !if(IsVertical, "Col", "Row") # ", "
1686                          # EltSize # ", AArch64::" # RC # "RegClassID>";
1687}
1688
1689class MatrixTileVectorOperand<int EltSize, int NumBitsForTile,
1690                              RegisterClass RC, int IsVertical>
1691    : RegisterOperand<RC> {
1692  let ParserMatchClass = MatrixTileVectorAsmOperand<!cast<string>(RC), EltSize,
1693                                                    IsVertical>;
1694  let DecoderMethod = "DecodeMatrixTile<" # NumBitsForTile # ">";
1695  let PrintMethod = "printMatrixTileVector<" # IsVertical # ">";
1696}
1697
1698def TileVectorOpH8   : MatrixTileVectorOperand<  8, 0, MPR8,   0>;
1699def TileVectorOpH16  : MatrixTileVectorOperand< 16, 1, MPR16,  0>;
1700def TileVectorOpH32  : MatrixTileVectorOperand< 32, 2, MPR32,  0>;
1701def TileVectorOpH64  : MatrixTileVectorOperand< 64, 3, MPR64,  0>;
1702def TileVectorOpH128 : MatrixTileVectorOperand<128, 4, MPR128, 0>;
1703
1704def TileVectorOpV8   : MatrixTileVectorOperand<  8, 0, MPR8,   1>;
1705def TileVectorOpV16  : MatrixTileVectorOperand< 16, 1, MPR16,  1>;
1706def TileVectorOpV32  : MatrixTileVectorOperand< 32, 2, MPR32,  1>;
1707def TileVectorOpV64  : MatrixTileVectorOperand< 64, 3, MPR64,  1>;
1708def TileVectorOpV128 : MatrixTileVectorOperand<128, 4, MPR128, 1>;
1709
1710//
1711// Accumulator matrix
1712//
1713
1714class MatrixAsmOperand<string RC, int EltSize> : AsmOperandClass {
1715  let Name = "Matrix" # !if(EltSize, !cast<string>(EltSize), "");
1716  let DiagnosticType = "Invalid" # Name;
1717  let ParserMethod = "tryParseMatrixRegister";
1718  let RenderMethod = "addMatrixOperands";
1719  let PredicateMethod = "isMatrixRegOperand<"
1720                          # "MatrixKind::Array" # ", "
1721                          # EltSize # ", AArch64::" # RC # "RegClassID>";
1722}
1723
1724class MatrixOperand<RegisterClass RC, int EltSize> : RegisterOperand<RC> {
1725  let ParserMatchClass = MatrixAsmOperand<!cast<string>(RC), EltSize>;
1726  let PrintMethod = "printMatrix<" # EltSize # ">";
1727}
1728
1729def MatrixOp : MatrixOperand<MPR, 0>;
1730// SME2 register operands and classes
1731def MatrixOp8 : MatrixOperand<MPR, 8>;
1732def MatrixOp16 : MatrixOperand<MPR, 16>;
1733def MatrixOp32 : MatrixOperand<MPR, 32>;
1734def MatrixOp64 : MatrixOperand<MPR, 64>;
1735
1736class MatrixTileListAsmOperand : AsmOperandClass {
1737  let Name = "MatrixTileList";
1738  let ParserMethod = "tryParseMatrixTileList";
1739  let RenderMethod = "addMatrixTileListOperands";
1740  let PredicateMethod = "isMatrixTileList";
1741}
1742
1743class MatrixTileListOperand : Operand<i8> {
1744  let ParserMatchClass = MatrixTileListAsmOperand<>;
1745  let DecoderMethod = "DecodeMatrixTileListRegisterClass";
1746  let EncoderMethod = "EncodeMatrixTileListRegisterClass";
1747  let PrintMethod = "printMatrixTileList";
1748}
1749
1750def MatrixTileList : MatrixTileListOperand<>;
1751
1752def MatrixIndexGPR32_8_11 : RegisterClass<"AArch64", [i32], 32, (sequence "W%u", 8, 11)> {
1753   let DiagnosticType = "InvalidMatrixIndexGPR32_8_11";
1754}
1755def MatrixIndexGPR32_12_15 : RegisterClass<"AArch64", [i32], 32, (sequence "W%u", 12, 15)> {
1756  let DiagnosticType = "InvalidMatrixIndexGPR32_12_15";
1757}
1758def MatrixIndexGPR32Op8_11 : RegisterOperand<MatrixIndexGPR32_8_11> {
1759  let EncoderMethod = "encodeMatrixIndexGPR32<AArch64::W8>";
1760}
1761def MatrixIndexGPR32Op12_15 : RegisterOperand<MatrixIndexGPR32_12_15> {
1762  let EncoderMethod = "encodeMatrixIndexGPR32<AArch64::W12>";
1763}
1764
1765def SVCROperand : AsmOperandClass {
1766  let Name = "SVCR";
1767  let ParserMethod = "tryParseSVCR";
1768  let DiagnosticType = "Invalid" # Name;
1769}
1770
1771def svcr_op : Operand<i32>, TImmLeaf<i32, [{
1772    return AArch64SVCR::lookupSVCRByEncoding(Imm) != nullptr;
1773  }]> {
1774  let ParserMatchClass = SVCROperand;
1775  let PrintMethod = "printSVCROp";
1776  let DecoderMethod = "DecodeSVCROp";
1777  let MCOperandPredicate = [{
1778    if (!MCOp.isImm())
1779      return false;
1780    return AArch64SVCR::lookupSVCRByEncoding(MCOp.getImm()) != nullptr;
1781  }];
1782}
1783
1784//===----------------------------------------------------------------------===//
1785// Register categories.
1786//
1787
1788def GeneralPurposeRegisters : RegisterCategory<[GPR64, GPR32]>;
1789
1790def FIXED_REGS : RegisterClass<"AArch64", [i64], 64, (add FP, SP, VG, FFR)>;
1791def FixedRegisters : RegisterCategory<[CCR, FIXED_REGS]>;
1792