1//=- AArch64RegisterInfo.td - Describe the AArch64 Registers -*- tablegen -*-=//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9//
10//===----------------------------------------------------------------------===//
11
12
13class AArch64Reg<bits<16> enc, string n, list<Register> subregs = [],
14               list<string> altNames = []>
15        : Register<n, altNames> {
16  let HWEncoding = enc;
17  let Namespace = "AArch64";
18  let SubRegs = subregs;
19}
20
21let Namespace = "AArch64" in {
22  def sub_32 : SubRegIndex<32>;
23
24  def bsub : SubRegIndex<8>;
25  def hsub : SubRegIndex<16>;
26  def ssub : SubRegIndex<32>;
27  def dsub : SubRegIndex<64>;
28  def sube32 : SubRegIndex<32>;
29  def subo32 : SubRegIndex<32>;
30  def sube64 : SubRegIndex<64>;
31  def subo64 : SubRegIndex<64>;
32  // SVE
33  def zsub    : SubRegIndex<128>;
34  // Note: zsub_hi should never be used directly because it represents
35  // the scalable part of the SVE vector and cannot be manipulated as a
36  // subvector in the same way the lower 128bits can.
37  def zsub_hi : SubRegIndex<128>;
38  // Note: Code depends on these having consecutive numbers
39  def dsub0 : SubRegIndex<64>;
40  def dsub1 : SubRegIndex<64>;
41  def dsub2 : SubRegIndex<64>;
42  def dsub3 : SubRegIndex<64>;
43  // Note: Code depends on these having consecutive numbers
44  def qsub0 : SubRegIndex<128>;
45  def qsub1 : SubRegIndex<128>;
46  def qsub2 : SubRegIndex<128>;
47  def qsub3 : SubRegIndex<128>;
48  // Note: Code depends on these having consecutive numbers
49  def zasubb  : SubRegIndex<2048>; // (16 x 16)/1 bytes  = 2048 bits
50  def zasubh0 : SubRegIndex<1024>; // (16 x 16)/2 bytes  = 1024 bits
51  def zasubh1 : SubRegIndex<1024>; // (16 x 16)/2 bytes  = 1024 bits
52  def zasubs0 : SubRegIndex<512>;  // (16 x 16)/4 bytes  = 512 bits
53  def zasubs1 : SubRegIndex<512>;  // (16 x 16)/4 bytes  = 512 bits
54  def zasubd0 : SubRegIndex<256>;  // (16 x 16)/8 bytes  = 256 bits
55  def zasubd1 : SubRegIndex<256>;  // (16 x 16)/8 bytes  = 256 bits
56  def zasubq0 : SubRegIndex<128>;  // (16 x 16)/16 bytes = 128 bits
57  def zasubq1 : SubRegIndex<128>;  // (16 x 16)/16 bytes = 128 bits
58}
59
60let Namespace = "AArch64" in {
61  def vreg : RegAltNameIndex;
62  def vlist1 : RegAltNameIndex;
63}
64
65//===----------------------------------------------------------------------===//
66// Registers
67//===----------------------------------------------------------------------===//
68def W0    : AArch64Reg<0,   "w0" >, DwarfRegNum<[0]>;
69def W1    : AArch64Reg<1,   "w1" >, DwarfRegNum<[1]>;
70def W2    : AArch64Reg<2,   "w2" >, DwarfRegNum<[2]>;
71def W3    : AArch64Reg<3,   "w3" >, DwarfRegNum<[3]>;
72def W4    : AArch64Reg<4,   "w4" >, DwarfRegNum<[4]>;
73def W5    : AArch64Reg<5,   "w5" >, DwarfRegNum<[5]>;
74def W6    : AArch64Reg<6,   "w6" >, DwarfRegNum<[6]>;
75def W7    : AArch64Reg<7,   "w7" >, DwarfRegNum<[7]>;
76def W8    : AArch64Reg<8,   "w8" >, DwarfRegNum<[8]>;
77def W9    : AArch64Reg<9,   "w9" >, DwarfRegNum<[9]>;
78def W10   : AArch64Reg<10, "w10">, DwarfRegNum<[10]>;
79def W11   : AArch64Reg<11, "w11">, DwarfRegNum<[11]>;
80def W12   : AArch64Reg<12, "w12">, DwarfRegNum<[12]>;
81def W13   : AArch64Reg<13, "w13">, DwarfRegNum<[13]>;
82def W14   : AArch64Reg<14, "w14">, DwarfRegNum<[14]>;
83def W15   : AArch64Reg<15, "w15">, DwarfRegNum<[15]>;
84def W16   : AArch64Reg<16, "w16">, DwarfRegNum<[16]>;
85def W17   : AArch64Reg<17, "w17">, DwarfRegNum<[17]>;
86def W18   : AArch64Reg<18, "w18">, DwarfRegNum<[18]>;
87def W19   : AArch64Reg<19, "w19">, DwarfRegNum<[19]>;
88def W20   : AArch64Reg<20, "w20">, DwarfRegNum<[20]>;
89def W21   : AArch64Reg<21, "w21">, DwarfRegNum<[21]>;
90def W22   : AArch64Reg<22, "w22">, DwarfRegNum<[22]>;
91def W23   : AArch64Reg<23, "w23">, DwarfRegNum<[23]>;
92def W24   : AArch64Reg<24, "w24">, DwarfRegNum<[24]>;
93def W25   : AArch64Reg<25, "w25">, DwarfRegNum<[25]>;
94def W26   : AArch64Reg<26, "w26">, DwarfRegNum<[26]>;
95def W27   : AArch64Reg<27, "w27">, DwarfRegNum<[27]>;
96def W28   : AArch64Reg<28, "w28">, DwarfRegNum<[28]>;
97def W29   : AArch64Reg<29, "w29">, DwarfRegNum<[29]>;
98def W30   : AArch64Reg<30, "w30">, DwarfRegNum<[30]>;
99def WSP   : AArch64Reg<31, "wsp">, DwarfRegNum<[31]>;
100def WZR   : AArch64Reg<31, "wzr">, DwarfRegAlias<WSP>;
101
102let SubRegIndices = [sub_32] in {
103def X0    : AArch64Reg<0,   "x0",  [W0]>, DwarfRegAlias<W0>;
104def X1    : AArch64Reg<1,   "x1",  [W1]>, DwarfRegAlias<W1>;
105def X2    : AArch64Reg<2,   "x2",  [W2]>, DwarfRegAlias<W2>;
106def X3    : AArch64Reg<3,   "x3",  [W3]>, DwarfRegAlias<W3>;
107def X4    : AArch64Reg<4,   "x4",  [W4]>, DwarfRegAlias<W4>;
108def X5    : AArch64Reg<5,   "x5",  [W5]>, DwarfRegAlias<W5>;
109def X6    : AArch64Reg<6,   "x6",  [W6]>, DwarfRegAlias<W6>;
110def X7    : AArch64Reg<7,   "x7",  [W7]>, DwarfRegAlias<W7>;
111def X8    : AArch64Reg<8,   "x8",  [W8]>, DwarfRegAlias<W8>;
112def X9    : AArch64Reg<9,   "x9",  [W9]>, DwarfRegAlias<W9>;
113def X10   : AArch64Reg<10, "x10", [W10]>, DwarfRegAlias<W10>;
114def X11   : AArch64Reg<11, "x11", [W11]>, DwarfRegAlias<W11>;
115def X12   : AArch64Reg<12, "x12", [W12]>, DwarfRegAlias<W12>;
116def X13   : AArch64Reg<13, "x13", [W13]>, DwarfRegAlias<W13>;
117def X14   : AArch64Reg<14, "x14", [W14]>, DwarfRegAlias<W14>;
118def X15   : AArch64Reg<15, "x15", [W15]>, DwarfRegAlias<W15>;
119def X16   : AArch64Reg<16, "x16", [W16]>, DwarfRegAlias<W16>;
120def X17   : AArch64Reg<17, "x17", [W17]>, DwarfRegAlias<W17>;
121def X18   : AArch64Reg<18, "x18", [W18]>, DwarfRegAlias<W18>;
122def X19   : AArch64Reg<19, "x19", [W19]>, DwarfRegAlias<W19>;
123def X20   : AArch64Reg<20, "x20", [W20]>, DwarfRegAlias<W20>;
124def X21   : AArch64Reg<21, "x21", [W21]>, DwarfRegAlias<W21>;
125def X22   : AArch64Reg<22, "x22", [W22]>, DwarfRegAlias<W22>;
126def X23   : AArch64Reg<23, "x23", [W23]>, DwarfRegAlias<W23>;
127def X24   : AArch64Reg<24, "x24", [W24]>, DwarfRegAlias<W24>;
128def X25   : AArch64Reg<25, "x25", [W25]>, DwarfRegAlias<W25>;
129def X26   : AArch64Reg<26, "x26", [W26]>, DwarfRegAlias<W26>;
130def X27   : AArch64Reg<27, "x27", [W27]>, DwarfRegAlias<W27>;
131def X28   : AArch64Reg<28, "x28", [W28]>, DwarfRegAlias<W28>;
132def FP    : AArch64Reg<29, "x29", [W29]>, DwarfRegAlias<W29>;
133def LR    : AArch64Reg<30, "x30", [W30]>, DwarfRegAlias<W30>;
134def SP    : AArch64Reg<31, "sp",  [WSP]>, DwarfRegAlias<WSP>;
135def XZR   : AArch64Reg<31, "xzr", [WZR]>, DwarfRegAlias<WSP>;
136}
137
138// Condition code register.
139def NZCV  : AArch64Reg<0, "nzcv">;
140
141// First fault status register
142def FFR : AArch64Reg<0, "ffr">, DwarfRegNum<[47]>;
143
144// Purely virtual Vector Granule (VG) Dwarf register
145def VG : AArch64Reg<0, "vg">, DwarfRegNum<[46]>;
146
147// GPR register classes with the intersections of GPR32/GPR32sp and
148// GPR64/GPR64sp for use by the coalescer.
149def GPR32common : RegisterClass<"AArch64", [i32], 32, (sequence "W%u", 0, 30)> {
150  let AltOrders = [(rotl GPR32common, 8)];
151  let AltOrderSelect = [{ return 1; }];
152}
153def GPR64common : RegisterClass<"AArch64", [i64], 64,
154                                (add (sequence "X%u", 0, 28), FP, LR)> {
155  let AltOrders = [(rotl GPR64common, 8)];
156  let AltOrderSelect = [{ return 1; }];
157}
158// GPR register classes which exclude SP/WSP.
159def GPR32 : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WZR)> {
160  let AltOrders = [(rotl GPR32, 8)];
161  let AltOrderSelect = [{ return 1; }];
162}
163def GPR64 : RegisterClass<"AArch64", [i64], 64, (add GPR64common, XZR)> {
164  let AltOrders = [(rotl GPR64, 8)];
165  let AltOrderSelect = [{ return 1; }];
166}
167
168// GPR register classes which include SP/WSP.
169def GPR32sp : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WSP)> {
170  let AltOrders = [(rotl GPR32sp, 8)];
171  let AltOrderSelect = [{ return 1; }];
172}
173def GPR64sp : RegisterClass<"AArch64", [i64], 64, (add GPR64common, SP)> {
174  let AltOrders = [(rotl GPR64sp, 8)];
175  let AltOrderSelect = [{ return 1; }];
176}
177
178def GPR32sponly : RegisterClass<"AArch64", [i32], 32, (add WSP)>;
179def GPR64sponly : RegisterClass<"AArch64", [i64], 64, (add SP)>;
180
181def GPR64spPlus0Operand : AsmOperandClass {
182  let Name = "GPR64sp0";
183  let RenderMethod = "addRegOperands";
184  let PredicateMethod = "isGPR64<AArch64::GPR64spRegClassID>";
185  let ParserMethod = "tryParseGPR64sp0Operand";
186}
187
188def GPR64sp0 : RegisterOperand<GPR64sp> {
189  let ParserMatchClass = GPR64spPlus0Operand;
190}
191
192// GPR32/GPR64 but with zero-register substitution enabled.
193// TODO: Roll this out to GPR32/GPR64/GPR32all/GPR64all.
194def GPR32z : RegisterOperand<GPR32> {
195  let GIZeroRegister = WZR;
196}
197def GPR64z : RegisterOperand<GPR64> {
198  let GIZeroRegister = XZR;
199}
200
201// GPR argument registers.
202def GPR32arg : RegisterClass<"AArch64", [i32], 32, (sequence "W%u", 0, 7)>;
203def GPR64arg : RegisterClass<"AArch64", [i64], 64, (sequence "X%u", 0, 7)>;
204
205// GPR register classes which include WZR/XZR AND SP/WSP. This is not a
206// constraint used by any instructions, it is used as a common super-class.
207def GPR32all : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WZR, WSP)>;
208def GPR64all : RegisterClass<"AArch64", [i64], 64, (add GPR64common, XZR, SP)>;
209
210// For tail calls, we can't use callee-saved registers, as they are restored
211// to the saved value before the tail call, which would clobber a call address.
212// This is for indirect tail calls to store the address of the destination.
213def tcGPR64 : RegisterClass<"AArch64", [i64], 64, (sub GPR64common, X19, X20, X21,
214                                                     X22, X23, X24, X25, X26,
215                                                     X27, X28, FP, LR)>;
216
217// Restricted set of tail call registers, for use when branch target
218// enforcement is enabled. These are the only registers which can be used to
219// indirectly branch (not call) to the "BTI c" instruction at the start of a
220// BTI-protected function.
221def rtcGPR64 : RegisterClass<"AArch64", [i64], 64, (add X16, X17)>;
222
223// Register set that excludes registers that are reserved for procedure calls.
224// This is used for pseudo-instructions that are actually implemented using a
225// procedure call.
226def GPR64noip : RegisterClass<"AArch64", [i64], 64, (sub GPR64, X16, X17, LR)>;
227
228// GPR register classes for post increment amount of vector load/store that
229// has alternate printing when Rm=31 and prints a constant immediate value
230// equal to the total number of bytes transferred.
231
232// FIXME: TableGen *should* be able to do these itself now. There appears to be
233// a bug in counting how many operands a Post-indexed MCInst should have which
234// means the aliases don't trigger.
235def GPR64pi1  : RegisterOperand<GPR64, "printPostIncOperand<1>">;
236def GPR64pi2  : RegisterOperand<GPR64, "printPostIncOperand<2>">;
237def GPR64pi3  : RegisterOperand<GPR64, "printPostIncOperand<3>">;
238def GPR64pi4  : RegisterOperand<GPR64, "printPostIncOperand<4>">;
239def GPR64pi6  : RegisterOperand<GPR64, "printPostIncOperand<6>">;
240def GPR64pi8  : RegisterOperand<GPR64, "printPostIncOperand<8>">;
241def GPR64pi12 : RegisterOperand<GPR64, "printPostIncOperand<12>">;
242def GPR64pi16 : RegisterOperand<GPR64, "printPostIncOperand<16>">;
243def GPR64pi24 : RegisterOperand<GPR64, "printPostIncOperand<24>">;
244def GPR64pi32 : RegisterOperand<GPR64, "printPostIncOperand<32>">;
245def GPR64pi48 : RegisterOperand<GPR64, "printPostIncOperand<48>">;
246def GPR64pi64 : RegisterOperand<GPR64, "printPostIncOperand<64>">;
247
248// Condition code regclass.
249def CCR : RegisterClass<"AArch64", [i32], 32, (add NZCV)> {
250  let CopyCost = -1;  // Don't allow copying of status registers.
251
252  // CCR is not allocatable.
253  let isAllocatable = 0;
254}
255
256//===----------------------------------------------------------------------===//
257// Floating Point Scalar Registers
258//===----------------------------------------------------------------------===//
259
260def B0    : AArch64Reg<0,   "b0">, DwarfRegNum<[64]>;
261def B1    : AArch64Reg<1,   "b1">, DwarfRegNum<[65]>;
262def B2    : AArch64Reg<2,   "b2">, DwarfRegNum<[66]>;
263def B3    : AArch64Reg<3,   "b3">, DwarfRegNum<[67]>;
264def B4    : AArch64Reg<4,   "b4">, DwarfRegNum<[68]>;
265def B5    : AArch64Reg<5,   "b5">, DwarfRegNum<[69]>;
266def B6    : AArch64Reg<6,   "b6">, DwarfRegNum<[70]>;
267def B7    : AArch64Reg<7,   "b7">, DwarfRegNum<[71]>;
268def B8    : AArch64Reg<8,   "b8">, DwarfRegNum<[72]>;
269def B9    : AArch64Reg<9,   "b9">, DwarfRegNum<[73]>;
270def B10   : AArch64Reg<10, "b10">, DwarfRegNum<[74]>;
271def B11   : AArch64Reg<11, "b11">, DwarfRegNum<[75]>;
272def B12   : AArch64Reg<12, "b12">, DwarfRegNum<[76]>;
273def B13   : AArch64Reg<13, "b13">, DwarfRegNum<[77]>;
274def B14   : AArch64Reg<14, "b14">, DwarfRegNum<[78]>;
275def B15   : AArch64Reg<15, "b15">, DwarfRegNum<[79]>;
276def B16   : AArch64Reg<16, "b16">, DwarfRegNum<[80]>;
277def B17   : AArch64Reg<17, "b17">, DwarfRegNum<[81]>;
278def B18   : AArch64Reg<18, "b18">, DwarfRegNum<[82]>;
279def B19   : AArch64Reg<19, "b19">, DwarfRegNum<[83]>;
280def B20   : AArch64Reg<20, "b20">, DwarfRegNum<[84]>;
281def B21   : AArch64Reg<21, "b21">, DwarfRegNum<[85]>;
282def B22   : AArch64Reg<22, "b22">, DwarfRegNum<[86]>;
283def B23   : AArch64Reg<23, "b23">, DwarfRegNum<[87]>;
284def B24   : AArch64Reg<24, "b24">, DwarfRegNum<[88]>;
285def B25   : AArch64Reg<25, "b25">, DwarfRegNum<[89]>;
286def B26   : AArch64Reg<26, "b26">, DwarfRegNum<[90]>;
287def B27   : AArch64Reg<27, "b27">, DwarfRegNum<[91]>;
288def B28   : AArch64Reg<28, "b28">, DwarfRegNum<[92]>;
289def B29   : AArch64Reg<29, "b29">, DwarfRegNum<[93]>;
290def B30   : AArch64Reg<30, "b30">, DwarfRegNum<[94]>;
291def B31   : AArch64Reg<31, "b31">, DwarfRegNum<[95]>;
292
293let SubRegIndices = [bsub] in {
294def H0    : AArch64Reg<0,   "h0", [B0]>, DwarfRegAlias<B0>;
295def H1    : AArch64Reg<1,   "h1", [B1]>, DwarfRegAlias<B1>;
296def H2    : AArch64Reg<2,   "h2", [B2]>, DwarfRegAlias<B2>;
297def H3    : AArch64Reg<3,   "h3", [B3]>, DwarfRegAlias<B3>;
298def H4    : AArch64Reg<4,   "h4", [B4]>, DwarfRegAlias<B4>;
299def H5    : AArch64Reg<5,   "h5", [B5]>, DwarfRegAlias<B5>;
300def H6    : AArch64Reg<6,   "h6", [B6]>, DwarfRegAlias<B6>;
301def H7    : AArch64Reg<7,   "h7", [B7]>, DwarfRegAlias<B7>;
302def H8    : AArch64Reg<8,   "h8", [B8]>, DwarfRegAlias<B8>;
303def H9    : AArch64Reg<9,   "h9", [B9]>, DwarfRegAlias<B9>;
304def H10   : AArch64Reg<10, "h10", [B10]>, DwarfRegAlias<B10>;
305def H11   : AArch64Reg<11, "h11", [B11]>, DwarfRegAlias<B11>;
306def H12   : AArch64Reg<12, "h12", [B12]>, DwarfRegAlias<B12>;
307def H13   : AArch64Reg<13, "h13", [B13]>, DwarfRegAlias<B13>;
308def H14   : AArch64Reg<14, "h14", [B14]>, DwarfRegAlias<B14>;
309def H15   : AArch64Reg<15, "h15", [B15]>, DwarfRegAlias<B15>;
310def H16   : AArch64Reg<16, "h16", [B16]>, DwarfRegAlias<B16>;
311def H17   : AArch64Reg<17, "h17", [B17]>, DwarfRegAlias<B17>;
312def H18   : AArch64Reg<18, "h18", [B18]>, DwarfRegAlias<B18>;
313def H19   : AArch64Reg<19, "h19", [B19]>, DwarfRegAlias<B19>;
314def H20   : AArch64Reg<20, "h20", [B20]>, DwarfRegAlias<B20>;
315def H21   : AArch64Reg<21, "h21", [B21]>, DwarfRegAlias<B21>;
316def H22   : AArch64Reg<22, "h22", [B22]>, DwarfRegAlias<B22>;
317def H23   : AArch64Reg<23, "h23", [B23]>, DwarfRegAlias<B23>;
318def H24   : AArch64Reg<24, "h24", [B24]>, DwarfRegAlias<B24>;
319def H25   : AArch64Reg<25, "h25", [B25]>, DwarfRegAlias<B25>;
320def H26   : AArch64Reg<26, "h26", [B26]>, DwarfRegAlias<B26>;
321def H27   : AArch64Reg<27, "h27", [B27]>, DwarfRegAlias<B27>;
322def H28   : AArch64Reg<28, "h28", [B28]>, DwarfRegAlias<B28>;
323def H29   : AArch64Reg<29, "h29", [B29]>, DwarfRegAlias<B29>;
324def H30   : AArch64Reg<30, "h30", [B30]>, DwarfRegAlias<B30>;
325def H31   : AArch64Reg<31, "h31", [B31]>, DwarfRegAlias<B31>;
326}
327
328let SubRegIndices = [hsub] in {
329def S0    : AArch64Reg<0,   "s0", [H0]>, DwarfRegAlias<B0>;
330def S1    : AArch64Reg<1,   "s1", [H1]>, DwarfRegAlias<B1>;
331def S2    : AArch64Reg<2,   "s2", [H2]>, DwarfRegAlias<B2>;
332def S3    : AArch64Reg<3,   "s3", [H3]>, DwarfRegAlias<B3>;
333def S4    : AArch64Reg<4,   "s4", [H4]>, DwarfRegAlias<B4>;
334def S5    : AArch64Reg<5,   "s5", [H5]>, DwarfRegAlias<B5>;
335def S6    : AArch64Reg<6,   "s6", [H6]>, DwarfRegAlias<B6>;
336def S7    : AArch64Reg<7,   "s7", [H7]>, DwarfRegAlias<B7>;
337def S8    : AArch64Reg<8,   "s8", [H8]>, DwarfRegAlias<B8>;
338def S9    : AArch64Reg<9,   "s9", [H9]>, DwarfRegAlias<B9>;
339def S10   : AArch64Reg<10, "s10", [H10]>, DwarfRegAlias<B10>;
340def S11   : AArch64Reg<11, "s11", [H11]>, DwarfRegAlias<B11>;
341def S12   : AArch64Reg<12, "s12", [H12]>, DwarfRegAlias<B12>;
342def S13   : AArch64Reg<13, "s13", [H13]>, DwarfRegAlias<B13>;
343def S14   : AArch64Reg<14, "s14", [H14]>, DwarfRegAlias<B14>;
344def S15   : AArch64Reg<15, "s15", [H15]>, DwarfRegAlias<B15>;
345def S16   : AArch64Reg<16, "s16", [H16]>, DwarfRegAlias<B16>;
346def S17   : AArch64Reg<17, "s17", [H17]>, DwarfRegAlias<B17>;
347def S18   : AArch64Reg<18, "s18", [H18]>, DwarfRegAlias<B18>;
348def S19   : AArch64Reg<19, "s19", [H19]>, DwarfRegAlias<B19>;
349def S20   : AArch64Reg<20, "s20", [H20]>, DwarfRegAlias<B20>;
350def S21   : AArch64Reg<21, "s21", [H21]>, DwarfRegAlias<B21>;
351def S22   : AArch64Reg<22, "s22", [H22]>, DwarfRegAlias<B22>;
352def S23   : AArch64Reg<23, "s23", [H23]>, DwarfRegAlias<B23>;
353def S24   : AArch64Reg<24, "s24", [H24]>, DwarfRegAlias<B24>;
354def S25   : AArch64Reg<25, "s25", [H25]>, DwarfRegAlias<B25>;
355def S26   : AArch64Reg<26, "s26", [H26]>, DwarfRegAlias<B26>;
356def S27   : AArch64Reg<27, "s27", [H27]>, DwarfRegAlias<B27>;
357def S28   : AArch64Reg<28, "s28", [H28]>, DwarfRegAlias<B28>;
358def S29   : AArch64Reg<29, "s29", [H29]>, DwarfRegAlias<B29>;
359def S30   : AArch64Reg<30, "s30", [H30]>, DwarfRegAlias<B30>;
360def S31   : AArch64Reg<31, "s31", [H31]>, DwarfRegAlias<B31>;
361}
362
363let SubRegIndices = [ssub], RegAltNameIndices = [vreg, vlist1] in {
364def D0    : AArch64Reg<0,   "d0", [S0], ["v0", ""]>, DwarfRegAlias<B0>;
365def D1    : AArch64Reg<1,   "d1", [S1], ["v1", ""]>, DwarfRegAlias<B1>;
366def D2    : AArch64Reg<2,   "d2", [S2], ["v2", ""]>, DwarfRegAlias<B2>;
367def D3    : AArch64Reg<3,   "d3", [S3], ["v3", ""]>, DwarfRegAlias<B3>;
368def D4    : AArch64Reg<4,   "d4", [S4], ["v4", ""]>, DwarfRegAlias<B4>;
369def D5    : AArch64Reg<5,   "d5", [S5], ["v5", ""]>, DwarfRegAlias<B5>;
370def D6    : AArch64Reg<6,   "d6", [S6], ["v6", ""]>, DwarfRegAlias<B6>;
371def D7    : AArch64Reg<7,   "d7", [S7], ["v7", ""]>, DwarfRegAlias<B7>;
372def D8    : AArch64Reg<8,   "d8", [S8], ["v8", ""]>, DwarfRegAlias<B8>;
373def D9    : AArch64Reg<9,   "d9", [S9], ["v9", ""]>, DwarfRegAlias<B9>;
374def D10   : AArch64Reg<10, "d10", [S10], ["v10", ""]>, DwarfRegAlias<B10>;
375def D11   : AArch64Reg<11, "d11", [S11], ["v11", ""]>, DwarfRegAlias<B11>;
376def D12   : AArch64Reg<12, "d12", [S12], ["v12", ""]>, DwarfRegAlias<B12>;
377def D13   : AArch64Reg<13, "d13", [S13], ["v13", ""]>, DwarfRegAlias<B13>;
378def D14   : AArch64Reg<14, "d14", [S14], ["v14", ""]>, DwarfRegAlias<B14>;
379def D15   : AArch64Reg<15, "d15", [S15], ["v15", ""]>, DwarfRegAlias<B15>;
380def D16   : AArch64Reg<16, "d16", [S16], ["v16", ""]>, DwarfRegAlias<B16>;
381def D17   : AArch64Reg<17, "d17", [S17], ["v17", ""]>, DwarfRegAlias<B17>;
382def D18   : AArch64Reg<18, "d18", [S18], ["v18", ""]>, DwarfRegAlias<B18>;
383def D19   : AArch64Reg<19, "d19", [S19], ["v19", ""]>, DwarfRegAlias<B19>;
384def D20   : AArch64Reg<20, "d20", [S20], ["v20", ""]>, DwarfRegAlias<B20>;
385def D21   : AArch64Reg<21, "d21", [S21], ["v21", ""]>, DwarfRegAlias<B21>;
386def D22   : AArch64Reg<22, "d22", [S22], ["v22", ""]>, DwarfRegAlias<B22>;
387def D23   : AArch64Reg<23, "d23", [S23], ["v23", ""]>, DwarfRegAlias<B23>;
388def D24   : AArch64Reg<24, "d24", [S24], ["v24", ""]>, DwarfRegAlias<B24>;
389def D25   : AArch64Reg<25, "d25", [S25], ["v25", ""]>, DwarfRegAlias<B25>;
390def D26   : AArch64Reg<26, "d26", [S26], ["v26", ""]>, DwarfRegAlias<B26>;
391def D27   : AArch64Reg<27, "d27", [S27], ["v27", ""]>, DwarfRegAlias<B27>;
392def D28   : AArch64Reg<28, "d28", [S28], ["v28", ""]>, DwarfRegAlias<B28>;
393def D29   : AArch64Reg<29, "d29", [S29], ["v29", ""]>, DwarfRegAlias<B29>;
394def D30   : AArch64Reg<30, "d30", [S30], ["v30", ""]>, DwarfRegAlias<B30>;
395def D31   : AArch64Reg<31, "d31", [S31], ["v31", ""]>, DwarfRegAlias<B31>;
396}
397
398let SubRegIndices = [dsub], RegAltNameIndices = [vreg, vlist1] in {
399def Q0    : AArch64Reg<0,   "q0", [D0], ["v0", ""]>, DwarfRegAlias<B0>;
400def Q1    : AArch64Reg<1,   "q1", [D1], ["v1", ""]>, DwarfRegAlias<B1>;
401def Q2    : AArch64Reg<2,   "q2", [D2], ["v2", ""]>, DwarfRegAlias<B2>;
402def Q3    : AArch64Reg<3,   "q3", [D3], ["v3", ""]>, DwarfRegAlias<B3>;
403def Q4    : AArch64Reg<4,   "q4", [D4], ["v4", ""]>, DwarfRegAlias<B4>;
404def Q5    : AArch64Reg<5,   "q5", [D5], ["v5", ""]>, DwarfRegAlias<B5>;
405def Q6    : AArch64Reg<6,   "q6", [D6], ["v6", ""]>, DwarfRegAlias<B6>;
406def Q7    : AArch64Reg<7,   "q7", [D7], ["v7", ""]>, DwarfRegAlias<B7>;
407def Q8    : AArch64Reg<8,   "q8", [D8], ["v8", ""]>, DwarfRegAlias<B8>;
408def Q9    : AArch64Reg<9,   "q9", [D9], ["v9", ""]>, DwarfRegAlias<B9>;
409def Q10   : AArch64Reg<10, "q10", [D10], ["v10", ""]>, DwarfRegAlias<B10>;
410def Q11   : AArch64Reg<11, "q11", [D11], ["v11", ""]>, DwarfRegAlias<B11>;
411def Q12   : AArch64Reg<12, "q12", [D12], ["v12", ""]>, DwarfRegAlias<B12>;
412def Q13   : AArch64Reg<13, "q13", [D13], ["v13", ""]>, DwarfRegAlias<B13>;
413def Q14   : AArch64Reg<14, "q14", [D14], ["v14", ""]>, DwarfRegAlias<B14>;
414def Q15   : AArch64Reg<15, "q15", [D15], ["v15", ""]>, DwarfRegAlias<B15>;
415def Q16   : AArch64Reg<16, "q16", [D16], ["v16", ""]>, DwarfRegAlias<B16>;
416def Q17   : AArch64Reg<17, "q17", [D17], ["v17", ""]>, DwarfRegAlias<B17>;
417def Q18   : AArch64Reg<18, "q18", [D18], ["v18", ""]>, DwarfRegAlias<B18>;
418def Q19   : AArch64Reg<19, "q19", [D19], ["v19", ""]>, DwarfRegAlias<B19>;
419def Q20   : AArch64Reg<20, "q20", [D20], ["v20", ""]>, DwarfRegAlias<B20>;
420def Q21   : AArch64Reg<21, "q21", [D21], ["v21", ""]>, DwarfRegAlias<B21>;
421def Q22   : AArch64Reg<22, "q22", [D22], ["v22", ""]>, DwarfRegAlias<B22>;
422def Q23   : AArch64Reg<23, "q23", [D23], ["v23", ""]>, DwarfRegAlias<B23>;
423def Q24   : AArch64Reg<24, "q24", [D24], ["v24", ""]>, DwarfRegAlias<B24>;
424def Q25   : AArch64Reg<25, "q25", [D25], ["v25", ""]>, DwarfRegAlias<B25>;
425def Q26   : AArch64Reg<26, "q26", [D26], ["v26", ""]>, DwarfRegAlias<B26>;
426def Q27   : AArch64Reg<27, "q27", [D27], ["v27", ""]>, DwarfRegAlias<B27>;
427def Q28   : AArch64Reg<28, "q28", [D28], ["v28", ""]>, DwarfRegAlias<B28>;
428def Q29   : AArch64Reg<29, "q29", [D29], ["v29", ""]>, DwarfRegAlias<B29>;
429def Q30   : AArch64Reg<30, "q30", [D30], ["v30", ""]>, DwarfRegAlias<B30>;
430def Q31   : AArch64Reg<31, "q31", [D31], ["v31", ""]>, DwarfRegAlias<B31>;
431}
432
433def FPR8  : RegisterClass<"AArch64", [untyped], 8, (sequence "B%u", 0, 31)> {
434  let Size = 8;
435}
436def FPR16 : RegisterClass<"AArch64", [f16, bf16], 16, (sequence "H%u", 0, 31)> {
437  let Size = 16;
438}
439
440def FPR16_lo : RegisterClass<"AArch64", [f16], 16, (trunc FPR16, 16)> {
441  let Size = 16;
442}
443def FPR32 : RegisterClass<"AArch64", [f32, i32], 32,(sequence "S%u", 0, 31)>;
444def FPR64 : RegisterClass<"AArch64", [f64, i64, v2f32, v1f64, v8i8, v4i16, v2i32,
445                                      v1i64, v4f16, v4bf16],
446                                     64, (sequence "D%u", 0, 31)>;
447def FPR64_lo : RegisterClass<"AArch64",
448                             [v8i8, v4i16, v2i32, v1i64, v4f16, v4bf16, v2f32,
449                              v1f64],
450                             64, (trunc FPR64, 16)>;
451
452// We don't (yet) have an f128 legal type, so don't use that here. We
453// normalize 128-bit vectors to v2f64 for arg passing and such, so use
454// that here.
455def FPR128 : RegisterClass<"AArch64",
456                           [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64, f128,
457                            v8f16, v8bf16],
458                           128, (sequence "Q%u", 0, 31)>;
459
460// The lower 16 vector registers.  Some instructions can only take registers
461// in this range.
462def FPR128_lo : RegisterClass<"AArch64",
463                              [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64, v8f16,
464                               v8bf16],
465                              128, (trunc FPR128, 16)>;
466
467// Pairs, triples, and quads of 64-bit vector registers.
468def DSeqPairs : RegisterTuples<[dsub0, dsub1], [(rotl FPR64, 0), (rotl FPR64, 1)]>;
469def DSeqTriples : RegisterTuples<[dsub0, dsub1, dsub2],
470                                 [(rotl FPR64, 0), (rotl FPR64, 1),
471                                  (rotl FPR64, 2)]>;
472def DSeqQuads : RegisterTuples<[dsub0, dsub1, dsub2, dsub3],
473                               [(rotl FPR64, 0), (rotl FPR64, 1),
474                                (rotl FPR64, 2), (rotl FPR64, 3)]>;
475def DD   : RegisterClass<"AArch64", [untyped], 64, (add DSeqPairs)> {
476  let Size = 128;
477}
478def DDD  : RegisterClass<"AArch64", [untyped], 64, (add DSeqTriples)> {
479  let Size = 192;
480}
481def DDDD : RegisterClass<"AArch64", [untyped], 64, (add DSeqQuads)> {
482  let Size = 256;
483}
484
485// Pairs, triples, and quads of 128-bit vector registers.
486def QSeqPairs : RegisterTuples<[qsub0, qsub1], [(rotl FPR128, 0), (rotl FPR128, 1)]>;
487def QSeqTriples : RegisterTuples<[qsub0, qsub1, qsub2],
488                                 [(rotl FPR128, 0), (rotl FPR128, 1),
489                                  (rotl FPR128, 2)]>;
490def QSeqQuads : RegisterTuples<[qsub0, qsub1, qsub2, qsub3],
491                               [(rotl FPR128, 0), (rotl FPR128, 1),
492                                (rotl FPR128, 2), (rotl FPR128, 3)]>;
493def QQ   : RegisterClass<"AArch64", [untyped], 128, (add QSeqPairs)> {
494  let Size = 256;
495}
496def QQQ  : RegisterClass<"AArch64", [untyped], 128, (add QSeqTriples)> {
497  let Size = 384;
498}
499def QQQQ : RegisterClass<"AArch64", [untyped], 128, (add QSeqQuads)> {
500  let Size = 512;
501}
502
503
504// Vector operand versions of the FP registers. Alternate name printing and
505// assembler matching.
506def VectorReg64AsmOperand : AsmOperandClass {
507  let Name = "VectorReg64";
508  let PredicateMethod = "isNeonVectorReg";
509}
510def VectorReg128AsmOperand : AsmOperandClass {
511  let Name = "VectorReg128";
512  let PredicateMethod = "isNeonVectorReg";
513}
514
515def V64  : RegisterOperand<FPR64, "printVRegOperand"> {
516  let ParserMatchClass = VectorReg64AsmOperand;
517}
518
519def V128 : RegisterOperand<FPR128, "printVRegOperand"> {
520  let ParserMatchClass = VectorReg128AsmOperand;
521}
522
523def VectorRegLoAsmOperand : AsmOperandClass {
524  let Name = "VectorRegLo";
525  let PredicateMethod = "isNeonVectorRegLo";
526}
527def V64_lo : RegisterOperand<FPR64_lo, "printVRegOperand"> {
528  let ParserMatchClass = VectorRegLoAsmOperand;
529}
530def V128_lo : RegisterOperand<FPR128_lo, "printVRegOperand"> {
531  let ParserMatchClass = VectorRegLoAsmOperand;
532}
533
534class TypedVecListAsmOperand<int count, string vecty, int lanes, int eltsize>
535    : AsmOperandClass {
536  let Name = "TypedVectorList" # count # "_" # lanes # eltsize;
537
538  let PredicateMethod
539      = "isTypedVectorList<RegKind::NeonVector, " # count # ", " # lanes # ", " # eltsize # ">";
540  let RenderMethod = "addVectorListOperands<" # vecty  # ", "  # count # ">";
541}
542
543class TypedVecListRegOperand<RegisterClass Reg, int lanes, string eltsize>
544    : RegisterOperand<Reg, "printTypedVectorList<" # lanes # ", '"
545                                                   # eltsize # "'>">;
546
547multiclass VectorList<int count, RegisterClass Reg64, RegisterClass Reg128> {
548  // With implicit types (probably on instruction instead). E.g. { v0, v1 }
549  def _64AsmOperand : AsmOperandClass {
550    let Name = NAME # "64";
551    let PredicateMethod = "isImplicitlyTypedVectorList<RegKind::NeonVector, " # count # ">";
552    let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_DReg, " # count # ">";
553  }
554
555  def "64" : RegisterOperand<Reg64, "printImplicitlyTypedVectorList"> {
556    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_64AsmOperand");
557  }
558
559  def _128AsmOperand : AsmOperandClass {
560    let Name = NAME # "128";
561    let PredicateMethod = "isImplicitlyTypedVectorList<RegKind::NeonVector, " # count # ">";
562    let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_QReg, " # count # ">";
563  }
564
565  def "128" : RegisterOperand<Reg128, "printImplicitlyTypedVectorList"> {
566    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_128AsmOperand");
567  }
568
569  // 64-bit register lists with explicit type.
570
571  // { v0.8b, v1.8b }
572  def _8bAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 8, 8>;
573  def "8b" : TypedVecListRegOperand<Reg64, 8, "b"> {
574    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_8bAsmOperand");
575  }
576
577  // { v0.4h, v1.4h }
578  def _4hAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 4, 16>;
579  def "4h" : TypedVecListRegOperand<Reg64, 4, "h"> {
580    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_4hAsmOperand");
581  }
582
583  // { v0.2s, v1.2s }
584  def _2sAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 2, 32>;
585  def "2s" : TypedVecListRegOperand<Reg64, 2, "s"> {
586    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_2sAsmOperand");
587  }
588
589  // { v0.1d, v1.1d }
590  def _1dAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 1, 64>;
591  def "1d" : TypedVecListRegOperand<Reg64, 1, "d"> {
592    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_1dAsmOperand");
593  }
594
595  // 128-bit register lists with explicit type
596
597  // { v0.16b, v1.16b }
598  def _16bAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 16, 8>;
599  def "16b" : TypedVecListRegOperand<Reg128, 16, "b"> {
600    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_16bAsmOperand");
601  }
602
603  // { v0.8h, v1.8h }
604  def _8hAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 8, 16>;
605  def "8h" : TypedVecListRegOperand<Reg128, 8, "h"> {
606    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_8hAsmOperand");
607  }
608
609  // { v0.4s, v1.4s }
610  def _4sAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 4, 32>;
611  def "4s" : TypedVecListRegOperand<Reg128, 4, "s"> {
612    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_4sAsmOperand");
613  }
614
615  // { v0.2d, v1.2d }
616  def _2dAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 2, 64>;
617  def "2d" : TypedVecListRegOperand<Reg128, 2, "d"> {
618    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_2dAsmOperand");
619  }
620
621  // { v0.b, v1.b }
622  def _bAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 8>;
623  def "b" : TypedVecListRegOperand<Reg128, 0, "b"> {
624    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_bAsmOperand");
625  }
626
627  // { v0.h, v1.h }
628  def _hAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 16>;
629  def "h" : TypedVecListRegOperand<Reg128, 0, "h"> {
630    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_hAsmOperand");
631  }
632
633  // { v0.s, v1.s }
634  def _sAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 32>;
635  def "s" : TypedVecListRegOperand<Reg128, 0, "s"> {
636    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_sAsmOperand");
637  }
638
639  // { v0.d, v1.d }
640  def _dAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 64>;
641  def "d" : TypedVecListRegOperand<Reg128, 0, "d"> {
642    let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_dAsmOperand");
643  }
644
645
646}
647
648defm VecListOne   : VectorList<1, FPR64, FPR128>;
649defm VecListTwo   : VectorList<2, DD,    QQ>;
650defm VecListThree : VectorList<3, DDD,   QQQ>;
651defm VecListFour  : VectorList<4, DDDD,  QQQQ>;
652
653class FPRAsmOperand<string RC> : AsmOperandClass {
654  let Name = "FPRAsmOperand" # RC;
655  let PredicateMethod = "isGPR64<AArch64::" # RC # "RegClassID>";
656  let RenderMethod = "addRegOperands";
657}
658
659// Register operand versions of the scalar FP registers.
660def FPR8Op  : RegisterOperand<FPR8, "printOperand"> {
661  let ParserMatchClass = FPRAsmOperand<"FPR8">;
662}
663
664def FPR16Op  : RegisterOperand<FPR16, "printOperand"> {
665  let ParserMatchClass = FPRAsmOperand<"FPR16">;
666}
667
668def FPR16Op_lo  : RegisterOperand<FPR16_lo, "printOperand"> {
669  let ParserMatchClass = FPRAsmOperand<"FPR16_lo">;
670}
671
672def FPR32Op  : RegisterOperand<FPR32, "printOperand"> {
673  let ParserMatchClass = FPRAsmOperand<"FPR32">;
674}
675
676def FPR64Op  : RegisterOperand<FPR64, "printOperand"> {
677  let ParserMatchClass = FPRAsmOperand<"FPR64">;
678}
679
680def FPR128Op : RegisterOperand<FPR128, "printOperand"> {
681  let ParserMatchClass = FPRAsmOperand<"FPR128">;
682}
683
684//===----------------------------------------------------------------------===//
685// ARMv8.1a atomic CASP register operands
686
687
688def WSeqPairs : RegisterTuples<[sube32, subo32],
689                               [(decimate (rotl GPR32, 0), 2),
690                                (decimate (rotl GPR32, 1), 2)]>;
691def XSeqPairs : RegisterTuples<[sube64, subo64],
692                               [(decimate (rotl GPR64, 0), 2),
693                                (decimate (rotl GPR64, 1), 2)]>;
694
695def WSeqPairsClass   : RegisterClass<"AArch64", [untyped], 32,
696                                     (add WSeqPairs)>{
697  let Size = 64;
698}
699def XSeqPairsClass   : RegisterClass<"AArch64", [untyped], 64,
700                                     (add XSeqPairs)>{
701  let Size = 128;
702}
703
704
705let RenderMethod = "addRegOperands", ParserMethod="tryParseGPRSeqPair" in {
706  def WSeqPairsAsmOperandClass : AsmOperandClass { let Name = "WSeqPair"; }
707  def XSeqPairsAsmOperandClass : AsmOperandClass { let Name = "XSeqPair"; }
708}
709
710def WSeqPairClassOperand :
711    RegisterOperand<WSeqPairsClass, "printGPRSeqPairsClassOperand<32>"> {
712  let ParserMatchClass = WSeqPairsAsmOperandClass;
713}
714def XSeqPairClassOperand :
715    RegisterOperand<XSeqPairsClass, "printGPRSeqPairsClassOperand<64>"> {
716  let ParserMatchClass = XSeqPairsAsmOperandClass;
717}
718
719
720//===----- END: v8.1a atomic CASP register operands -----------------------===//
721
722//===----------------------------------------------------------------------===//
723// Armv8.7a accelerator extension register operands: 8 consecutive GPRs
724// starting with an even one
725
726let Namespace = "AArch64" in {
727  foreach i = 0-7 in
728    def "x8sub_"#i : SubRegIndex<64, !mul(64, i)>;
729}
730
731def Tuples8X : RegisterTuples<
732  !foreach(i, [0,1,2,3,4,5,6,7], !cast<SubRegIndex>("x8sub_"#i)),
733  !foreach(i, [0,1,2,3,4,5,6,7], (trunc (decimate (rotl GPR64, i), 2), 12))>;
734
735def GPR64x8Class : RegisterClass<"AArch64", [i64x8], 512, (trunc Tuples8X, 12)> {
736  let Size = 512;
737}
738def GPR64x8AsmOp : AsmOperandClass {
739  let Name = "GPR64x8";
740  let ParserMethod = "tryParseGPR64x8";
741  let RenderMethod = "addRegOperands";
742}
743def GPR64x8 : RegisterOperand<GPR64x8Class, "printGPR64x8"> {
744  let ParserMatchClass = GPR64x8AsmOp;
745  let PrintMethod = "printGPR64x8";
746}
747
748//===----- END: v8.7a accelerator extension register operands -------------===//
749
750// SVE predicate registers
751def P0    : AArch64Reg<0,   "p0">, DwarfRegNum<[48]>;
752def P1    : AArch64Reg<1,   "p1">, DwarfRegNum<[49]>;
753def P2    : AArch64Reg<2,   "p2">, DwarfRegNum<[50]>;
754def P3    : AArch64Reg<3,   "p3">, DwarfRegNum<[51]>;
755def P4    : AArch64Reg<4,   "p4">, DwarfRegNum<[52]>;
756def P5    : AArch64Reg<5,   "p5">, DwarfRegNum<[53]>;
757def P6    : AArch64Reg<6,   "p6">, DwarfRegNum<[54]>;
758def P7    : AArch64Reg<7,   "p7">, DwarfRegNum<[55]>;
759def P8    : AArch64Reg<8,   "p8">, DwarfRegNum<[56]>;
760def P9    : AArch64Reg<9,   "p9">, DwarfRegNum<[57]>;
761def P10   : AArch64Reg<10, "p10">, DwarfRegNum<[58]>;
762def P11   : AArch64Reg<11, "p11">, DwarfRegNum<[59]>;
763def P12   : AArch64Reg<12, "p12">, DwarfRegNum<[60]>;
764def P13   : AArch64Reg<13, "p13">, DwarfRegNum<[61]>;
765def P14   : AArch64Reg<14, "p14">, DwarfRegNum<[62]>;
766def P15   : AArch64Reg<15, "p15">, DwarfRegNum<[63]>;
767
768// The part of SVE registers that don't overlap Neon registers.
769// These are only used as part of clobber lists.
770def Z0_HI    : AArch64Reg<0,   "z0_hi">;
771def Z1_HI    : AArch64Reg<1,   "z1_hi">;
772def Z2_HI    : AArch64Reg<2,   "z2_hi">;
773def Z3_HI    : AArch64Reg<3,   "z3_hi">;
774def Z4_HI    : AArch64Reg<4,   "z4_hi">;
775def Z5_HI    : AArch64Reg<5,   "z5_hi">;
776def Z6_HI    : AArch64Reg<6,   "z6_hi">;
777def Z7_HI    : AArch64Reg<7,   "z7_hi">;
778def Z8_HI    : AArch64Reg<8,   "z8_hi">;
779def Z9_HI    : AArch64Reg<9,   "z9_hi">;
780def Z10_HI   : AArch64Reg<10, "z10_hi">;
781def Z11_HI   : AArch64Reg<11, "z11_hi">;
782def Z12_HI   : AArch64Reg<12, "z12_hi">;
783def Z13_HI   : AArch64Reg<13, "z13_hi">;
784def Z14_HI   : AArch64Reg<14, "z14_hi">;
785def Z15_HI   : AArch64Reg<15, "z15_hi">;
786def Z16_HI   : AArch64Reg<16, "z16_hi">;
787def Z17_HI   : AArch64Reg<17, "z17_hi">;
788def Z18_HI   : AArch64Reg<18, "z18_hi">;
789def Z19_HI   : AArch64Reg<19, "z19_hi">;
790def Z20_HI   : AArch64Reg<20, "z20_hi">;
791def Z21_HI   : AArch64Reg<21, "z21_hi">;
792def Z22_HI   : AArch64Reg<22, "z22_hi">;
793def Z23_HI   : AArch64Reg<23, "z23_hi">;
794def Z24_HI   : AArch64Reg<24, "z24_hi">;
795def Z25_HI   : AArch64Reg<25, "z25_hi">;
796def Z26_HI   : AArch64Reg<26, "z26_hi">;
797def Z27_HI   : AArch64Reg<27, "z27_hi">;
798def Z28_HI   : AArch64Reg<28, "z28_hi">;
799def Z29_HI   : AArch64Reg<29, "z29_hi">;
800def Z30_HI   : AArch64Reg<30, "z30_hi">;
801def Z31_HI   : AArch64Reg<31, "z31_hi">;
802
803// SVE variable-size vector registers
804let SubRegIndices = [zsub,zsub_hi] in {
805def Z0    : AArch64Reg<0,   "z0",  [Q0,  Z0_HI]>, DwarfRegNum<[96]>;
806def Z1    : AArch64Reg<1,   "z1",  [Q1,  Z1_HI]>, DwarfRegNum<[97]>;
807def Z2    : AArch64Reg<2,   "z2",  [Q2,  Z2_HI]>, DwarfRegNum<[98]>;
808def Z3    : AArch64Reg<3,   "z3",  [Q3,  Z3_HI]>, DwarfRegNum<[99]>;
809def Z4    : AArch64Reg<4,   "z4",  [Q4,  Z4_HI]>, DwarfRegNum<[100]>;
810def Z5    : AArch64Reg<5,   "z5",  [Q5,  Z5_HI]>, DwarfRegNum<[101]>;
811def Z6    : AArch64Reg<6,   "z6",  [Q6,  Z6_HI]>, DwarfRegNum<[102]>;
812def Z7    : AArch64Reg<7,   "z7",  [Q7,  Z7_HI]>, DwarfRegNum<[103]>;
813def Z8    : AArch64Reg<8,   "z8",  [Q8,  Z8_HI]>, DwarfRegNum<[104]>;
814def Z9    : AArch64Reg<9,   "z9",  [Q9,  Z9_HI]>, DwarfRegNum<[105]>;
815def Z10   : AArch64Reg<10, "z10", [Q10, Z10_HI]>, DwarfRegNum<[106]>;
816def Z11   : AArch64Reg<11, "z11", [Q11, Z11_HI]>, DwarfRegNum<[107]>;
817def Z12   : AArch64Reg<12, "z12", [Q12, Z12_HI]>, DwarfRegNum<[108]>;
818def Z13   : AArch64Reg<13, "z13", [Q13, Z13_HI]>, DwarfRegNum<[109]>;
819def Z14   : AArch64Reg<14, "z14", [Q14, Z14_HI]>, DwarfRegNum<[110]>;
820def Z15   : AArch64Reg<15, "z15", [Q15, Z15_HI]>, DwarfRegNum<[111]>;
821def Z16   : AArch64Reg<16, "z16", [Q16, Z16_HI]>, DwarfRegNum<[112]>;
822def Z17   : AArch64Reg<17, "z17", [Q17, Z17_HI]>, DwarfRegNum<[113]>;
823def Z18   : AArch64Reg<18, "z18", [Q18, Z18_HI]>, DwarfRegNum<[114]>;
824def Z19   : AArch64Reg<19, "z19", [Q19, Z19_HI]>, DwarfRegNum<[115]>;
825def Z20   : AArch64Reg<20, "z20", [Q20, Z20_HI]>, DwarfRegNum<[116]>;
826def Z21   : AArch64Reg<21, "z21", [Q21, Z21_HI]>, DwarfRegNum<[117]>;
827def Z22   : AArch64Reg<22, "z22", [Q22, Z22_HI]>, DwarfRegNum<[118]>;
828def Z23   : AArch64Reg<23, "z23", [Q23, Z23_HI]>, DwarfRegNum<[119]>;
829def Z24   : AArch64Reg<24, "z24", [Q24, Z24_HI]>, DwarfRegNum<[120]>;
830def Z25   : AArch64Reg<25, "z25", [Q25, Z25_HI]>, DwarfRegNum<[121]>;
831def Z26   : AArch64Reg<26, "z26", [Q26, Z26_HI]>, DwarfRegNum<[122]>;
832def Z27   : AArch64Reg<27, "z27", [Q27, Z27_HI]>, DwarfRegNum<[123]>;
833def Z28   : AArch64Reg<28, "z28", [Q28, Z28_HI]>, DwarfRegNum<[124]>;
834def Z29   : AArch64Reg<29, "z29", [Q29, Z29_HI]>, DwarfRegNum<[125]>;
835def Z30   : AArch64Reg<30, "z30", [Q30, Z30_HI]>, DwarfRegNum<[126]>;
836def Z31   : AArch64Reg<31, "z31", [Q31, Z31_HI]>, DwarfRegNum<[127]>;
837}
838
839// Enum describing the element size for destructive
840// operations.
841class ElementSizeEnum<bits<3> val> {
842  bits<3> Value = val;
843}
844
845def ElementSizeNone : ElementSizeEnum<0>;
846def ElementSizeB    : ElementSizeEnum<1>;
847def ElementSizeH    : ElementSizeEnum<2>;
848def ElementSizeS    : ElementSizeEnum<3>;
849def ElementSizeD    : ElementSizeEnum<4>;
850def ElementSizeQ    : ElementSizeEnum<5>;  // Unused
851
852class SVERegOp <string Suffix, AsmOperandClass C,
853                ElementSizeEnum Size,
854                RegisterClass RC> : RegisterOperand<RC> {
855  ElementSizeEnum ElementSize;
856
857  let ElementSize = Size;
858  let PrintMethod = !if(!eq(Suffix, ""),
859                        "printSVERegOp<>",
860                        "printSVERegOp<'" # Suffix # "'>");
861  let ParserMatchClass = C;
862}
863
864class PPRRegOp <string Suffix, AsmOperandClass C, ElementSizeEnum Size,
865                RegisterClass RC> : SVERegOp<Suffix, C, Size, RC> {}
866class ZPRRegOp <string Suffix, AsmOperandClass C, ElementSizeEnum Size,
867                RegisterClass RC> : SVERegOp<Suffix, C, Size, RC> {}
868
869//******************************************************************************
870
871// SVE predicate register classes.
872class PPRClass<int lastreg> : RegisterClass<
873                                  "AArch64",
874                                  [ nxv16i1, nxv8i1, nxv4i1, nxv2i1 ], 16,
875                                  (sequence "P%u", 0, lastreg)> {
876  let Size = 16;
877}
878
879def PPR    : PPRClass<15>;
880def PPR_3b : PPRClass<7>; // Restricted 3 bit SVE predicate register class.
881
882class PPRAsmOperand <string name, string RegClass, int Width>: AsmOperandClass {
883  let Name = "SVE" # name # "Reg";
884  let PredicateMethod = "isSVEPredicateVectorRegOfWidth<"
885                            # Width # ", " # "AArch64::" # RegClass # "RegClassID>";
886  let DiagnosticType = "InvalidSVE" # name # "Reg";
887  let RenderMethod = "addRegOperands";
888  let ParserMethod = "tryParseSVEPredicateVector";
889}
890
891def PPRAsmOpAny : PPRAsmOperand<"PredicateAny", "PPR",  0>;
892def PPRAsmOp8   : PPRAsmOperand<"PredicateB",   "PPR",  8>;
893def PPRAsmOp16  : PPRAsmOperand<"PredicateH",   "PPR", 16>;
894def PPRAsmOp32  : PPRAsmOperand<"PredicateS",   "PPR", 32>;
895def PPRAsmOp64  : PPRAsmOperand<"PredicateD",   "PPR", 64>;
896
897def PPRAny : PPRRegOp<"",  PPRAsmOpAny, ElementSizeNone, PPR>;
898def PPR8   : PPRRegOp<"b", PPRAsmOp8,   ElementSizeB,  PPR>;
899def PPR16  : PPRRegOp<"h", PPRAsmOp16,  ElementSizeH,  PPR>;
900def PPR32  : PPRRegOp<"s", PPRAsmOp32,  ElementSizeS,  PPR>;
901def PPR64  : PPRRegOp<"d", PPRAsmOp64,  ElementSizeD,  PPR>;
902
903def PPRAsmOp3bAny : PPRAsmOperand<"Predicate3bAny", "PPR_3b",  0>;
904
905def PPR3bAny : PPRRegOp<"",  PPRAsmOp3bAny, ElementSizeNone, PPR_3b>;
906
907//******************************************************************************
908
909// SVE vector register classes
910class ZPRClass<int lastreg> : RegisterClass<"AArch64",
911                                            [nxv16i8, nxv8i16, nxv4i32, nxv2i64,
912                                             nxv2f16, nxv4f16, nxv8f16,
913                                             nxv2bf16, nxv4bf16, nxv8bf16,
914                                             nxv2f32, nxv4f32,
915                                             nxv2f64],
916                                            128, (sequence "Z%u", 0, lastreg)> {
917  let Size = 128;
918}
919
920def ZPR    : ZPRClass<31>;
921def ZPR_4b : ZPRClass<15>; // Restricted 4 bit SVE vector register class.
922def ZPR_3b : ZPRClass<7>;  // Restricted 3 bit SVE vector register class.
923
924class ZPRAsmOperand<string name, int Width, string RegClassSuffix = "">
925    : AsmOperandClass {
926  let Name = "SVE" # name # "Reg";
927  let PredicateMethod = "isSVEDataVectorRegOfWidth<"
928                            # Width # ", AArch64::ZPR"
929                            # RegClassSuffix # "RegClassID>";
930  let RenderMethod = "addRegOperands";
931  let DiagnosticType = "InvalidZPR" # RegClassSuffix # Width;
932  let ParserMethod = "tryParseSVEDataVector<false, "
933                               # !if(!eq(Width, 0), "false", "true") # ">";
934}
935
936def ZPRAsmOpAny : ZPRAsmOperand<"VectorAny", 0>;
937def ZPRAsmOp8   : ZPRAsmOperand<"VectorB",   8>;
938def ZPRAsmOp16  : ZPRAsmOperand<"VectorH",   16>;
939def ZPRAsmOp32  : ZPRAsmOperand<"VectorS",   32>;
940def ZPRAsmOp64  : ZPRAsmOperand<"VectorD",   64>;
941def ZPRAsmOp128 : ZPRAsmOperand<"VectorQ",   128>;
942
943def ZPRAny  : ZPRRegOp<"",  ZPRAsmOpAny, ElementSizeNone, ZPR>;
944def ZPR8    : ZPRRegOp<"b", ZPRAsmOp8,   ElementSizeB, ZPR>;
945def ZPR16   : ZPRRegOp<"h", ZPRAsmOp16,  ElementSizeH, ZPR>;
946def ZPR32   : ZPRRegOp<"s", ZPRAsmOp32,  ElementSizeS, ZPR>;
947def ZPR64   : ZPRRegOp<"d", ZPRAsmOp64,  ElementSizeD, ZPR>;
948def ZPR128  : ZPRRegOp<"q", ZPRAsmOp128, ElementSizeQ, ZPR>;
949
950def ZPRAsmOp3b8   : ZPRAsmOperand<"Vector3bB", 8, "_3b">;
951def ZPRAsmOp3b16  : ZPRAsmOperand<"Vector3bH", 16, "_3b">;
952def ZPRAsmOp3b32  : ZPRAsmOperand<"Vector3bS", 32, "_3b">;
953
954def ZPR3b8  : ZPRRegOp<"b", ZPRAsmOp3b8,  ElementSizeB, ZPR_3b>;
955def ZPR3b16 : ZPRRegOp<"h", ZPRAsmOp3b16, ElementSizeH, ZPR_3b>;
956def ZPR3b32 : ZPRRegOp<"s", ZPRAsmOp3b32, ElementSizeS, ZPR_3b>;
957
958def ZPRAsmOp4b16  : ZPRAsmOperand<"Vector4bH", 16, "_4b">;
959def ZPRAsmOp4b32  : ZPRAsmOperand<"Vector4bS", 32, "_4b">;
960def ZPRAsmOp4b64  : ZPRAsmOperand<"Vector4bD", 64, "_4b">;
961
962def ZPR4b16 : ZPRRegOp<"h", ZPRAsmOp4b16, ElementSizeH, ZPR_4b>;
963def ZPR4b32 : ZPRRegOp<"s", ZPRAsmOp4b32, ElementSizeS, ZPR_4b>;
964def ZPR4b64 : ZPRRegOp<"d", ZPRAsmOp4b64, ElementSizeD, ZPR_4b>;
965
966class FPRasZPR<int Width> : AsmOperandClass{
967  let Name = "FPR" # Width # "asZPR";
968  let PredicateMethod = "isFPRasZPR<AArch64::FPR" # Width # "RegClassID>";
969  let RenderMethod = "addFPRasZPRRegOperands<" # Width # ">";
970}
971
972class FPRasZPROperand<int Width> : RegisterOperand<ZPR> {
973  let ParserMatchClass = FPRasZPR<Width>;
974  let PrintMethod = "printZPRasFPR<" # Width # ">";
975}
976
977def FPR8asZPR   : FPRasZPROperand<8>;
978def FPR16asZPR  : FPRasZPROperand<16>;
979def FPR32asZPR  : FPRasZPROperand<32>;
980def FPR64asZPR  : FPRasZPROperand<64>;
981def FPR128asZPR : FPRasZPROperand<128>;
982
983let Namespace = "AArch64" in {
984  def zsub0 : SubRegIndex<128, -1>;
985  def zsub1 : SubRegIndex<128, -1>;
986  def zsub2 : SubRegIndex<128, -1>;
987  def zsub3 : SubRegIndex<128, -1>;
988}
989
990// Pairs, triples, and quads of SVE vector registers.
991def ZSeqPairs   : RegisterTuples<[zsub0, zsub1], [(rotl ZPR, 0), (rotl ZPR, 1)]>;
992def ZSeqTriples : RegisterTuples<[zsub0, zsub1, zsub2], [(rotl ZPR, 0), (rotl ZPR, 1), (rotl ZPR, 2)]>;
993def ZSeqQuads   : RegisterTuples<[zsub0, zsub1, zsub2, zsub3], [(rotl ZPR, 0), (rotl ZPR, 1), (rotl ZPR, 2), (rotl ZPR, 3)]>;
994
995def ZPR2   : RegisterClass<"AArch64", [untyped], 128, (add ZSeqPairs)>  {
996  let Size = 256;
997}
998def ZPR3  : RegisterClass<"AArch64", [untyped], 128, (add ZSeqTriples)> {
999  let Size = 384;
1000}
1001def ZPR4 : RegisterClass<"AArch64", [untyped], 128, (add ZSeqQuads)> {
1002  let Size = 512;
1003}
1004
1005class ZPRVectorList<int ElementWidth, int NumRegs> : AsmOperandClass {
1006  let Name = "SVEVectorList" # NumRegs # ElementWidth;
1007  let ParserMethod = "tryParseVectorList<RegKind::SVEDataVector>";
1008  let PredicateMethod =
1009      "isTypedVectorList<RegKind::SVEDataVector, " #NumRegs #", 0, " #ElementWidth #">";
1010  let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_ZReg, " # NumRegs # ">";
1011}
1012
1013def Z_b  : RegisterOperand<ZPR,  "printTypedVectorList<0,'b'>"> {
1014  let ParserMatchClass = ZPRVectorList<8, 1>;
1015}
1016
1017def Z_h  : RegisterOperand<ZPR,  "printTypedVectorList<0,'h'>"> {
1018  let ParserMatchClass = ZPRVectorList<16, 1>;
1019}
1020
1021def Z_s  : RegisterOperand<ZPR,  "printTypedVectorList<0,'s'>"> {
1022  let ParserMatchClass = ZPRVectorList<32, 1>;
1023}
1024
1025def Z_d  : RegisterOperand<ZPR,  "printTypedVectorList<0,'d'>"> {
1026  let ParserMatchClass = ZPRVectorList<64, 1>;
1027}
1028
1029def ZZ_b  : RegisterOperand<ZPR2, "printTypedVectorList<0,'b'>"> {
1030  let ParserMatchClass = ZPRVectorList<8, 2>;
1031}
1032
1033def ZZ_h  : RegisterOperand<ZPR2, "printTypedVectorList<0,'h'>"> {
1034  let ParserMatchClass = ZPRVectorList<16, 2>;
1035}
1036
1037def ZZ_s  : RegisterOperand<ZPR2, "printTypedVectorList<0,'s'>"> {
1038  let ParserMatchClass = ZPRVectorList<32, 2>;
1039}
1040
1041def ZZ_d  : RegisterOperand<ZPR2, "printTypedVectorList<0,'d'>"> {
1042  let ParserMatchClass = ZPRVectorList<64, 2>;
1043}
1044
1045def ZZZ_b  : RegisterOperand<ZPR3, "printTypedVectorList<0,'b'>"> {
1046  let ParserMatchClass = ZPRVectorList<8, 3>;
1047}
1048
1049def ZZZ_h  : RegisterOperand<ZPR3, "printTypedVectorList<0,'h'>"> {
1050  let ParserMatchClass = ZPRVectorList<16, 3>;
1051}
1052
1053def ZZZ_s  : RegisterOperand<ZPR3, "printTypedVectorList<0,'s'>"> {
1054  let ParserMatchClass = ZPRVectorList<32, 3>;
1055}
1056
1057def ZZZ_d  : RegisterOperand<ZPR3, "printTypedVectorList<0,'d'>"> {
1058  let ParserMatchClass = ZPRVectorList<64, 3>;
1059}
1060
1061def ZZZZ_b : RegisterOperand<ZPR4, "printTypedVectorList<0,'b'>"> {
1062  let ParserMatchClass = ZPRVectorList<8, 4>;
1063}
1064
1065def ZZZZ_h : RegisterOperand<ZPR4, "printTypedVectorList<0,'h'>"> {
1066  let ParserMatchClass = ZPRVectorList<16, 4>;
1067}
1068
1069def ZZZZ_s : RegisterOperand<ZPR4, "printTypedVectorList<0,'s'>"> {
1070  let ParserMatchClass = ZPRVectorList<32, 4>;
1071}
1072
1073def ZZZZ_d : RegisterOperand<ZPR4, "printTypedVectorList<0,'d'>"> {
1074  let ParserMatchClass = ZPRVectorList<64, 4>;
1075}
1076
1077class ZPRExtendAsmOperand<string ShiftExtend, int RegWidth, int Scale,
1078                          bit ScaleAlwaysSame = 0b0> : AsmOperandClass {
1079  let Name = "ZPRExtend" # ShiftExtend # RegWidth # Scale
1080                         # !if(ScaleAlwaysSame, "Only", "");
1081
1082  let PredicateMethod = "isSVEDataVectorRegWithShiftExtend<"
1083                          # RegWidth # ", AArch64::ZPRRegClassID, "
1084                          # "AArch64_AM::" # ShiftExtend # ", "
1085                          # Scale # ", "
1086                          # !if(ScaleAlwaysSame, "true", "false")
1087                          # ">";
1088  let DiagnosticType = "InvalidZPR" # RegWidth # ShiftExtend # Scale;
1089  let RenderMethod = "addRegOperands";
1090  let ParserMethod = "tryParseSVEDataVector<true, true>";
1091}
1092
1093class ZPRExtendRegisterOperand<bit SignExtend, bit IsLSL, string Repr,
1094                               int RegWidth, int Scale, string Suffix = "">
1095    : RegisterOperand<ZPR> {
1096  let ParserMatchClass =
1097    !cast<AsmOperandClass>("ZPR" # RegWidth # "AsmOpndExt" # Repr # Scale # Suffix);
1098  let PrintMethod = "printRegWithShiftExtend<"
1099                          # !if(SignExtend, "true", "false") # ", "
1100                          # Scale # ", "
1101                          # !if(IsLSL, "'x'", "'w'") # ", "
1102                          # !if(!eq(RegWidth, 32), "'s'", "'d'") # ">";
1103}
1104
1105foreach RegWidth = [32, 64] in {
1106  // UXTW(8|16|32|64)
1107  def ZPR#RegWidth#AsmOpndExtUXTW8Only : ZPRExtendAsmOperand<"UXTW", RegWidth, 8, 0b1>;
1108  def ZPR#RegWidth#AsmOpndExtUXTW8     : ZPRExtendAsmOperand<"UXTW", RegWidth, 8>;
1109  def ZPR#RegWidth#AsmOpndExtUXTW16    : ZPRExtendAsmOperand<"UXTW", RegWidth, 16>;
1110  def ZPR#RegWidth#AsmOpndExtUXTW32    : ZPRExtendAsmOperand<"UXTW", RegWidth, 32>;
1111  def ZPR#RegWidth#AsmOpndExtUXTW64    : ZPRExtendAsmOperand<"UXTW", RegWidth, 64>;
1112
1113  def ZPR#RegWidth#ExtUXTW8Only        : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 8, "Only">;
1114  def ZPR#RegWidth#ExtUXTW8            : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 8>;
1115  def ZPR#RegWidth#ExtUXTW16           : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 16>;
1116  def ZPR#RegWidth#ExtUXTW32           : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 32>;
1117  def ZPR#RegWidth#ExtUXTW64           : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 64>;
1118
1119  // SXTW(8|16|32|64)
1120  def ZPR#RegWidth#AsmOpndExtSXTW8Only : ZPRExtendAsmOperand<"SXTW", RegWidth, 8, 0b1>;
1121  def ZPR#RegWidth#AsmOpndExtSXTW8     : ZPRExtendAsmOperand<"SXTW", RegWidth, 8>;
1122  def ZPR#RegWidth#AsmOpndExtSXTW16    : ZPRExtendAsmOperand<"SXTW", RegWidth, 16>;
1123  def ZPR#RegWidth#AsmOpndExtSXTW32    : ZPRExtendAsmOperand<"SXTW", RegWidth, 32>;
1124  def ZPR#RegWidth#AsmOpndExtSXTW64    : ZPRExtendAsmOperand<"SXTW", RegWidth, 64>;
1125
1126  def ZPR#RegWidth#ExtSXTW8Only        : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 8, "Only">;
1127  def ZPR#RegWidth#ExtSXTW8            : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 8>;
1128  def ZPR#RegWidth#ExtSXTW16           : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 16>;
1129  def ZPR#RegWidth#ExtSXTW32           : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 32>;
1130  def ZPR#RegWidth#ExtSXTW64           : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 64>;
1131
1132  // LSL(8|16|32|64)
1133  def ZPR#RegWidth#AsmOpndExtLSL8      : ZPRExtendAsmOperand<"LSL", RegWidth, 8>;
1134  def ZPR#RegWidth#AsmOpndExtLSL16     : ZPRExtendAsmOperand<"LSL", RegWidth, 16>;
1135  def ZPR#RegWidth#AsmOpndExtLSL32     : ZPRExtendAsmOperand<"LSL", RegWidth, 32>;
1136  def ZPR#RegWidth#AsmOpndExtLSL64     : ZPRExtendAsmOperand<"LSL", RegWidth, 64>;
1137  def ZPR#RegWidth#ExtLSL8             : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 8>;
1138  def ZPR#RegWidth#ExtLSL16            : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 16>;
1139  def ZPR#RegWidth#ExtLSL32            : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 32>;
1140  def ZPR#RegWidth#ExtLSL64            : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 64>;
1141}
1142
1143class GPR64ShiftExtendAsmOperand <string AsmOperandName, int Scale, string RegClass> : AsmOperandClass {
1144  let Name = AsmOperandName # Scale;
1145  let PredicateMethod = "isGPR64WithShiftExtend<AArch64::"#RegClass#"RegClassID, " # Scale # ">";
1146  let DiagnosticType = "Invalid" # AsmOperandName # Scale;
1147  let RenderMethod = "addRegOperands";
1148  let ParserMethod = "tryParseGPROperand<true>";
1149}
1150
1151class GPR64ExtendRegisterOperand<string Name, int Scale, RegisterClass RegClass> : RegisterOperand<RegClass>{
1152  let ParserMatchClass = !cast<AsmOperandClass>(Name);
1153  let PrintMethod = "printRegWithShiftExtend<false, " # Scale # ", 'x', 0>";
1154}
1155
1156foreach Scale = [8, 16, 32, 64, 128] in {
1157  def GPR64shiftedAsmOpnd # Scale : GPR64ShiftExtendAsmOperand<"GPR64shifted", Scale, "GPR64">;
1158  def GPR64shifted # Scale : GPR64ExtendRegisterOperand<"GPR64shiftedAsmOpnd" # Scale, Scale, GPR64>;
1159
1160  def GPR64NoXZRshiftedAsmOpnd # Scale : GPR64ShiftExtendAsmOperand<"GPR64NoXZRshifted", Scale, "GPR64common">;
1161  def GPR64NoXZRshifted # Scale : GPR64ExtendRegisterOperand<"GPR64NoXZRshiftedAsmOpnd" # Scale, Scale, GPR64common>;
1162}
1163
1164// Accumulator array tiles.
1165def ZAQ0  : AArch64Reg<0,  "za0.q">;
1166def ZAQ1  : AArch64Reg<1,  "za1.q">;
1167def ZAQ2  : AArch64Reg<2,  "za2.q">;
1168def ZAQ3  : AArch64Reg<3,  "za3.q">;
1169def ZAQ4  : AArch64Reg<4,  "za4.q">;
1170def ZAQ5  : AArch64Reg<5,  "za5.q">;
1171def ZAQ6  : AArch64Reg<6,  "za6.q">;
1172def ZAQ7  : AArch64Reg<7,  "za7.q">;
1173def ZAQ8  : AArch64Reg<8,  "za8.q">;
1174def ZAQ9  : AArch64Reg<9,  "za9.q">;
1175def ZAQ10 : AArch64Reg<10, "za10.q">;
1176def ZAQ11 : AArch64Reg<11, "za11.q">;
1177def ZAQ12 : AArch64Reg<12, "za12.q">;
1178def ZAQ13 : AArch64Reg<13, "za13.q">;
1179def ZAQ14 : AArch64Reg<14, "za14.q">;
1180def ZAQ15 : AArch64Reg<15, "za15.q">;
1181
1182let SubRegIndices = [zasubq0, zasubq1] in {
1183  def ZAD0 : AArch64Reg<0, "za0.d", [ZAQ0, ZAQ8]>;
1184  def ZAD1 : AArch64Reg<1, "za1.d", [ZAQ1, ZAQ9]>;
1185  def ZAD2 : AArch64Reg<2, "za2.d", [ZAQ2, ZAQ10]>;
1186  def ZAD3 : AArch64Reg<3, "za3.d", [ZAQ3, ZAQ11]>;
1187  def ZAD4 : AArch64Reg<4, "za4.d", [ZAQ4, ZAQ12]>;
1188  def ZAD5 : AArch64Reg<5, "za5.d", [ZAQ5, ZAQ13]>;
1189  def ZAD6 : AArch64Reg<6, "za6.d", [ZAQ6, ZAQ14]>;
1190  def ZAD7 : AArch64Reg<7, "za7.d", [ZAQ7, ZAQ15]>;
1191}
1192
1193let SubRegIndices = [zasubd0, zasubd1] in {
1194  def ZAS0 : AArch64Reg<0, "za0.s", [ZAD0, ZAD4]>;
1195  def ZAS1 : AArch64Reg<1, "za1.s", [ZAD1, ZAD5]>;
1196  def ZAS2 : AArch64Reg<2, "za2.s", [ZAD2, ZAD6]>;
1197  def ZAS3 : AArch64Reg<3, "za3.s", [ZAD3, ZAD7]>;
1198}
1199
1200let SubRegIndices = [zasubs0, zasubs1] in {
1201  def ZAH0 : AArch64Reg<0, "za0.h", [ZAS0, ZAS2]>;
1202  def ZAH1 : AArch64Reg<1, "za1.h", [ZAS1, ZAS3]>;
1203}
1204
1205let SubRegIndices = [zasubh0, zasubh1] in {
1206  def ZAB0 : AArch64Reg<0, "za0.b", [ZAH0, ZAH1]>;
1207}
1208
1209let SubRegIndices = [zasubb] in {
1210  def ZA : AArch64Reg<0, "za", [ZAB0]>;
1211}
1212
1213// SME Register Classes
1214
1215// Accumulator array
1216def MPR : RegisterClass<"AArch64", [untyped], 2048, (add ZA)> {
1217  let Size = 2048;
1218}
1219
1220// Accumulator array as single tiles
1221def MPR8    : RegisterClass<"AArch64", [untyped], 2048, (add (sequence "ZAB%u", 0, 0))> {
1222  let Size = 2048;
1223}
1224def MPR16   : RegisterClass<"AArch64", [untyped], 1024, (add (sequence "ZAH%u", 0, 1))> {
1225  let Size = 1024;
1226}
1227def MPR32   : RegisterClass<"AArch64", [untyped],  512, (add (sequence "ZAS%u", 0, 3))> {
1228  let Size = 512;
1229}
1230def MPR64   : RegisterClass<"AArch64", [untyped],  256, (add (sequence "ZAD%u", 0, 7))> {
1231  let Size = 256;
1232}
1233def MPR128  : RegisterClass<"AArch64", [untyped],  128, (add (sequence "ZAQ%u", 0, 15))> {
1234  let Size = 128;
1235}
1236
1237// SME Register Operands
1238// There are three types of SME matrix register operands:
1239// * Tiles:
1240//
1241//   These tiles make up the larger accumulator matrix. The tile representation
1242//   has an element type suffix, e.g. za0.b or za15.q and can be any of the
1243//   registers:
1244//          ZAQ0..ZAQ15
1245//          ZAD0..ZAD7
1246//          ZAS0..ZAS3
1247//          ZAH0..ZAH1
1248//       or ZAB0
1249//
1250// * Tile vectors:
1251//
1252//   Their representation is similar to regular tiles, but they have an extra
1253//   'h' or 'v' to tell how the vector at [reg+offset] is layed out in the tile,
1254//   horizontally or vertically.
1255//
1256//   e.g. za1h.h or za15v.q, which corresponds to vectors in registers ZAH1 and
1257//   ZAQ15, respectively. The horizontal/vertical is more a property of the
1258//   instruction, than a property of the asm-operand itself, or its register.
1259//   The distinction is required for the parsing/printing of the operand,
1260//   as from a compiler's perspective, the whole tile is read/written.
1261//
1262// * Accumulator matrix:
1263//
1264//   This is the entire matrix accumulator register ZA (<=> ZAB0), printed as
1265//   'za'.
1266
1267//
1268// Tiles
1269//
1270
1271class MatrixTileAsmOperand<string RC, int EltSize> : AsmOperandClass {
1272  let Name = "MatrixTile" # EltSize;
1273  let DiagnosticType = "Invalid" # Name;
1274  let ParserMethod = "tryParseMatrixRegister";
1275  let RenderMethod = "addMatrixOperands";
1276  let PredicateMethod = "isMatrixRegOperand<"
1277                          # "MatrixKind::Tile" # ", "
1278                          # EltSize # ", AArch64::" # RC # "RegClassID>";
1279}
1280
1281class MatrixTileOperand<int EltSize, int NumBitsForTile, RegisterClass RC>
1282    : RegisterOperand<RC> {
1283  let ParserMatchClass = MatrixTileAsmOperand<!cast<string>(RC), EltSize>;
1284  let DecoderMethod = "DecodeMatrixTile<" # NumBitsForTile # ">";
1285  let PrintMethod = "printMatrixTile";
1286}
1287
1288def TileOp32  : MatrixTileOperand<32, 2, MPR32>;
1289def TileOp64  : MatrixTileOperand<64, 3, MPR64>;
1290
1291//
1292// Tile vectors (horizontal and vertical)
1293//
1294
1295class MatrixTileVectorAsmOperand<string RC, int EltSize, int IsVertical>
1296    : AsmOperandClass {
1297  let Name = "MatrixTileVector" # !if(IsVertical, "V", "H") # EltSize;
1298  let DiagnosticType = "Invalid" # Name;
1299  let ParserMethod = "tryParseMatrixRegister";
1300  let RenderMethod = "addMatrixOperands";
1301  let PredicateMethod = "isMatrixRegOperand<"
1302                          # "MatrixKind::"
1303                          # !if(IsVertical, "Col", "Row") # ", "
1304                          # EltSize # ", AArch64::" # RC # "RegClassID>";
1305}
1306
1307class MatrixTileVectorOperand<int EltSize, int NumBitsForTile,
1308                              RegisterClass RC, int IsVertical>
1309    : RegisterOperand<RC> {
1310  let ParserMatchClass = MatrixTileVectorAsmOperand<!cast<string>(RC), EltSize,
1311                                                    IsVertical>;
1312  let DecoderMethod = "DecodeMatrixTile<" # NumBitsForTile # ">";
1313  let PrintMethod = "printMatrixTileVector<" # IsVertical # ">";
1314}
1315
1316def TileVectorOpH8   : MatrixTileVectorOperand<  8, 0, MPR8,   0>;
1317def TileVectorOpH16  : MatrixTileVectorOperand< 16, 1, MPR16,  0>;
1318def TileVectorOpH32  : MatrixTileVectorOperand< 32, 2, MPR32,  0>;
1319def TileVectorOpH64  : MatrixTileVectorOperand< 64, 3, MPR64,  0>;
1320def TileVectorOpH128 : MatrixTileVectorOperand<128, 4, MPR128, 0>;
1321
1322def TileVectorOpV8   : MatrixTileVectorOperand<  8, 0, MPR8,   1>;
1323def TileVectorOpV16  : MatrixTileVectorOperand< 16, 1, MPR16,  1>;
1324def TileVectorOpV32  : MatrixTileVectorOperand< 32, 2, MPR32,  1>;
1325def TileVectorOpV64  : MatrixTileVectorOperand< 64, 3, MPR64,  1>;
1326def TileVectorOpV128 : MatrixTileVectorOperand<128, 4, MPR128, 1>;
1327
1328//
1329// Accumulator matrix
1330//
1331
1332class MatrixAsmOperand<string RC, int EltSize> : AsmOperandClass {
1333  let Name = "Matrix";
1334  let DiagnosticType = "Invalid" # Name;
1335  let ParserMethod = "tryParseMatrixRegister";
1336  let RenderMethod = "addMatrixOperands";
1337  let PredicateMethod = "isMatrixRegOperand<"
1338                          # "MatrixKind::Array" # ", "
1339                          # EltSize # ", AArch64::" # RC # "RegClassID>";
1340}
1341
1342class MatrixOperand<RegisterClass RC, int EltSize> : RegisterOperand<RC> {
1343  let ParserMatchClass = MatrixAsmOperand<!cast<string>(RC), EltSize>;
1344  let PrintMethod = "printMatrix<" # EltSize # ">";
1345}
1346
1347def MatrixOp : MatrixOperand<MPR, 0>;
1348
1349class MatrixTileListAsmOperand : AsmOperandClass {
1350  let Name = "MatrixTileList";
1351  let ParserMethod = "tryParseMatrixTileList";
1352  let RenderMethod = "addMatrixTileListOperands";
1353  let PredicateMethod = "isMatrixTileList";
1354}
1355
1356class MatrixTileListOperand : Operand<i8> {
1357  let ParserMatchClass = MatrixTileListAsmOperand<>;
1358  let DecoderMethod = "DecodeMatrixTileListRegisterClass";
1359  let EncoderMethod = "EncodeMatrixTileListRegisterClass";
1360  let PrintMethod = "printMatrixTileList";
1361}
1362
1363def MatrixTileList : MatrixTileListOperand<>;
1364
1365def MatrixIndexGPR32_12_15 : RegisterClass<"AArch64", [i32], 32, (sequence "W%u", 12, 15)> {
1366  let DiagnosticType = "InvalidMatrixIndexGPR32_12_15";
1367}
1368def MatrixIndexGPR32Op12_15 : RegisterOperand<MatrixIndexGPR32_12_15> {
1369  let EncoderMethod = "encodeMatrixIndexGPR32";
1370}
1371
1372def SVCROperand : AsmOperandClass {
1373  let Name = "SVCR";
1374  let ParserMethod = "tryParseSVCR";
1375  let DiagnosticType = "Invalid" # Name;
1376}
1377
1378def svcr_op : Operand<i32> {
1379  let ParserMatchClass = SVCROperand;
1380  let PrintMethod = "printSVCROp";
1381  let DecoderMethod = "DecodeSVCROp";
1382  let MCOperandPredicate = [{
1383    if (!MCOp.isImm())
1384      return false;
1385    return AArch64SVCR::lookupSVCRByEncoding(MCOp.getImm()) != nullptr;
1386  }];
1387}
1388