1//=- AArch64RegisterInfo.td - Describe the AArch64 Registers -*- tablegen -*-=// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// 11//===----------------------------------------------------------------------===// 12 13 14class AArch64Reg<bits<16> enc, string n, list<Register> subregs = [], 15 list<string> altNames = []> 16 : Register<n, altNames> { 17 let HWEncoding = enc; 18 let Namespace = "AArch64"; 19 let SubRegs = subregs; 20} 21 22let Namespace = "AArch64" in { 23 def sub_32 : SubRegIndex<32>; 24 25 def bsub : SubRegIndex<8>; 26 def hsub : SubRegIndex<16>; 27 def ssub : SubRegIndex<32>; 28 def dsub : SubRegIndex<32>; 29 def sube32 : SubRegIndex<32>; 30 def subo32 : SubRegIndex<32>; 31 def qhisub : SubRegIndex<64>; 32 def qsub : SubRegIndex<64>; 33 def sube64 : SubRegIndex<64>; 34 def subo64 : SubRegIndex<64>; 35 // SVE 36 def zsub : SubRegIndex<128>; 37 // Note: zsub_hi should never be used directly because it represents 38 // the scalable part of the SVE vector and cannot be manipulated as a 39 // subvector in the same way the lower 128bits can. 40 def zsub_hi : SubRegIndex<128>; 41 // Note: Code depends on these having consecutive numbers 42 def dsub0 : SubRegIndex<64>; 43 def dsub1 : SubRegIndex<64>; 44 def dsub2 : SubRegIndex<64>; 45 def dsub3 : SubRegIndex<64>; 46 // Note: Code depends on these having consecutive numbers 47 def qsub0 : SubRegIndex<128>; 48 def qsub1 : SubRegIndex<128>; 49 def qsub2 : SubRegIndex<128>; 50 def qsub3 : SubRegIndex<128>; 51} 52 53let Namespace = "AArch64" in { 54 def vreg : RegAltNameIndex; 55 def vlist1 : RegAltNameIndex; 56} 57 58//===----------------------------------------------------------------------===// 59// Registers 60//===----------------------------------------------------------------------===// 61def W0 : AArch64Reg<0, "w0" >, DwarfRegNum<[0]>; 62def W1 : AArch64Reg<1, "w1" >, DwarfRegNum<[1]>; 63def W2 : AArch64Reg<2, "w2" >, DwarfRegNum<[2]>; 64def W3 : AArch64Reg<3, "w3" >, DwarfRegNum<[3]>; 65def W4 : AArch64Reg<4, "w4" >, DwarfRegNum<[4]>; 66def W5 : AArch64Reg<5, "w5" >, DwarfRegNum<[5]>; 67def W6 : AArch64Reg<6, "w6" >, DwarfRegNum<[6]>; 68def W7 : AArch64Reg<7, "w7" >, DwarfRegNum<[7]>; 69def W8 : AArch64Reg<8, "w8" >, DwarfRegNum<[8]>; 70def W9 : AArch64Reg<9, "w9" >, DwarfRegNum<[9]>; 71def W10 : AArch64Reg<10, "w10">, DwarfRegNum<[10]>; 72def W11 : AArch64Reg<11, "w11">, DwarfRegNum<[11]>; 73def W12 : AArch64Reg<12, "w12">, DwarfRegNum<[12]>; 74def W13 : AArch64Reg<13, "w13">, DwarfRegNum<[13]>; 75def W14 : AArch64Reg<14, "w14">, DwarfRegNum<[14]>; 76def W15 : AArch64Reg<15, "w15">, DwarfRegNum<[15]>; 77def W16 : AArch64Reg<16, "w16">, DwarfRegNum<[16]>; 78def W17 : AArch64Reg<17, "w17">, DwarfRegNum<[17]>; 79def W18 : AArch64Reg<18, "w18">, DwarfRegNum<[18]>; 80def W19 : AArch64Reg<19, "w19">, DwarfRegNum<[19]>; 81def W20 : AArch64Reg<20, "w20">, DwarfRegNum<[20]>; 82def W21 : AArch64Reg<21, "w21">, DwarfRegNum<[21]>; 83def W22 : AArch64Reg<22, "w22">, DwarfRegNum<[22]>; 84def W23 : AArch64Reg<23, "w23">, DwarfRegNum<[23]>; 85def W24 : AArch64Reg<24, "w24">, DwarfRegNum<[24]>; 86def W25 : AArch64Reg<25, "w25">, DwarfRegNum<[25]>; 87def W26 : AArch64Reg<26, "w26">, DwarfRegNum<[26]>; 88def W27 : AArch64Reg<27, "w27">, DwarfRegNum<[27]>; 89def W28 : AArch64Reg<28, "w28">, DwarfRegNum<[28]>; 90def W29 : AArch64Reg<29, "w29">, DwarfRegNum<[29]>; 91def W30 : AArch64Reg<30, "w30">, DwarfRegNum<[30]>; 92def WSP : AArch64Reg<31, "wsp">, DwarfRegNum<[31]>; 93def WZR : AArch64Reg<31, "wzr">, DwarfRegAlias<WSP>; 94 95let SubRegIndices = [sub_32] in { 96def X0 : AArch64Reg<0, "x0", [W0]>, DwarfRegAlias<W0>; 97def X1 : AArch64Reg<1, "x1", [W1]>, DwarfRegAlias<W1>; 98def X2 : AArch64Reg<2, "x2", [W2]>, DwarfRegAlias<W2>; 99def X3 : AArch64Reg<3, "x3", [W3]>, DwarfRegAlias<W3>; 100def X4 : AArch64Reg<4, "x4", [W4]>, DwarfRegAlias<W4>; 101def X5 : AArch64Reg<5, "x5", [W5]>, DwarfRegAlias<W5>; 102def X6 : AArch64Reg<6, "x6", [W6]>, DwarfRegAlias<W6>; 103def X7 : AArch64Reg<7, "x7", [W7]>, DwarfRegAlias<W7>; 104def X8 : AArch64Reg<8, "x8", [W8]>, DwarfRegAlias<W8>; 105def X9 : AArch64Reg<9, "x9", [W9]>, DwarfRegAlias<W9>; 106def X10 : AArch64Reg<10, "x10", [W10]>, DwarfRegAlias<W10>; 107def X11 : AArch64Reg<11, "x11", [W11]>, DwarfRegAlias<W11>; 108def X12 : AArch64Reg<12, "x12", [W12]>, DwarfRegAlias<W12>; 109def X13 : AArch64Reg<13, "x13", [W13]>, DwarfRegAlias<W13>; 110def X14 : AArch64Reg<14, "x14", [W14]>, DwarfRegAlias<W14>; 111def X15 : AArch64Reg<15, "x15", [W15]>, DwarfRegAlias<W15>; 112def X16 : AArch64Reg<16, "x16", [W16]>, DwarfRegAlias<W16>; 113def X17 : AArch64Reg<17, "x17", [W17]>, DwarfRegAlias<W17>; 114def X18 : AArch64Reg<18, "x18", [W18]>, DwarfRegAlias<W18>; 115def X19 : AArch64Reg<19, "x19", [W19]>, DwarfRegAlias<W19>; 116def X20 : AArch64Reg<20, "x20", [W20]>, DwarfRegAlias<W20>; 117def X21 : AArch64Reg<21, "x21", [W21]>, DwarfRegAlias<W21>; 118def X22 : AArch64Reg<22, "x22", [W22]>, DwarfRegAlias<W22>; 119def X23 : AArch64Reg<23, "x23", [W23]>, DwarfRegAlias<W23>; 120def X24 : AArch64Reg<24, "x24", [W24]>, DwarfRegAlias<W24>; 121def X25 : AArch64Reg<25, "x25", [W25]>, DwarfRegAlias<W25>; 122def X26 : AArch64Reg<26, "x26", [W26]>, DwarfRegAlias<W26>; 123def X27 : AArch64Reg<27, "x27", [W27]>, DwarfRegAlias<W27>; 124def X28 : AArch64Reg<28, "x28", [W28]>, DwarfRegAlias<W28>; 125def FP : AArch64Reg<29, "x29", [W29]>, DwarfRegAlias<W29>; 126def LR : AArch64Reg<30, "x30", [W30]>, DwarfRegAlias<W30>; 127def SP : AArch64Reg<31, "sp", [WSP]>, DwarfRegAlias<WSP>; 128def XZR : AArch64Reg<31, "xzr", [WZR]>, DwarfRegAlias<WSP>; 129} 130 131// Condition code register. 132def NZCV : AArch64Reg<0, "nzcv">; 133 134// First fault status register 135def FFR : AArch64Reg<0, "ffr">, DwarfRegNum<[47]>; 136 137// GPR register classes with the intersections of GPR32/GPR32sp and 138// GPR64/GPR64sp for use by the coalescer. 139def GPR32common : RegisterClass<"AArch64", [i32], 32, (sequence "W%u", 0, 30)> { 140 let AltOrders = [(rotl GPR32common, 8)]; 141 let AltOrderSelect = [{ return 1; }]; 142} 143def GPR64common : RegisterClass<"AArch64", [i64], 64, 144 (add (sequence "X%u", 0, 28), FP, LR)> { 145 let AltOrders = [(rotl GPR64common, 8)]; 146 let AltOrderSelect = [{ return 1; }]; 147} 148// GPR register classes which exclude SP/WSP. 149def GPR32 : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WZR)> { 150 let AltOrders = [(rotl GPR32, 8)]; 151 let AltOrderSelect = [{ return 1; }]; 152} 153def GPR64 : RegisterClass<"AArch64", [i64], 64, (add GPR64common, XZR)> { 154 let AltOrders = [(rotl GPR64, 8)]; 155 let AltOrderSelect = [{ return 1; }]; 156} 157 158// GPR register classes which include SP/WSP. 159def GPR32sp : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WSP)> { 160 let AltOrders = [(rotl GPR32sp, 8)]; 161 let AltOrderSelect = [{ return 1; }]; 162} 163def GPR64sp : RegisterClass<"AArch64", [i64], 64, (add GPR64common, SP)> { 164 let AltOrders = [(rotl GPR64sp, 8)]; 165 let AltOrderSelect = [{ return 1; }]; 166} 167 168def GPR32sponly : RegisterClass<"AArch64", [i32], 32, (add WSP)>; 169def GPR64sponly : RegisterClass<"AArch64", [i64], 64, (add SP)>; 170 171def GPR64spPlus0Operand : AsmOperandClass { 172 let Name = "GPR64sp0"; 173 let RenderMethod = "addRegOperands"; 174 let PredicateMethod = "isGPR64<AArch64::GPR64spRegClassID>"; 175 let ParserMethod = "tryParseGPR64sp0Operand"; 176} 177 178def GPR64sp0 : RegisterOperand<GPR64sp> { 179 let ParserMatchClass = GPR64spPlus0Operand; 180} 181 182// GPR32/GPR64 but with zero-register substitution enabled. 183// TODO: Roll this out to GPR32/GPR64/GPR32all/GPR64all. 184def GPR32z : RegisterOperand<GPR32> { 185 let GIZeroRegister = WZR; 186} 187def GPR64z : RegisterOperand<GPR64> { 188 let GIZeroRegister = XZR; 189} 190 191// GPR register classes which include WZR/XZR AND SP/WSP. This is not a 192// constraint used by any instructions, it is used as a common super-class. 193def GPR32all : RegisterClass<"AArch64", [i32], 32, (add GPR32common, WZR, WSP)>; 194def GPR64all : RegisterClass<"AArch64", [i64], 64, (add GPR64common, XZR, SP)>; 195 196// For tail calls, we can't use callee-saved registers, as they are restored 197// to the saved value before the tail call, which would clobber a call address. 198// This is for indirect tail calls to store the address of the destination. 199def tcGPR64 : RegisterClass<"AArch64", [i64], 64, (sub GPR64common, X19, X20, X21, 200 X22, X23, X24, X25, X26, 201 X27, X28, FP, LR)>; 202 203// Restricted set of tail call registers, for use when branch target 204// enforcement is enabled. These are the only registers which can be used to 205// indirectly branch (not call) to the "BTI c" instruction at the start of a 206// BTI-protected function. 207def rtcGPR64 : RegisterClass<"AArch64", [i64], 64, (add X16, X17)>; 208 209// GPR register classes for post increment amount of vector load/store that 210// has alternate printing when Rm=31 and prints a constant immediate value 211// equal to the total number of bytes transferred. 212 213// FIXME: TableGen *should* be able to do these itself now. There appears to be 214// a bug in counting how many operands a Post-indexed MCInst should have which 215// means the aliases don't trigger. 216def GPR64pi1 : RegisterOperand<GPR64, "printPostIncOperand<1>">; 217def GPR64pi2 : RegisterOperand<GPR64, "printPostIncOperand<2>">; 218def GPR64pi3 : RegisterOperand<GPR64, "printPostIncOperand<3>">; 219def GPR64pi4 : RegisterOperand<GPR64, "printPostIncOperand<4>">; 220def GPR64pi6 : RegisterOperand<GPR64, "printPostIncOperand<6>">; 221def GPR64pi8 : RegisterOperand<GPR64, "printPostIncOperand<8>">; 222def GPR64pi12 : RegisterOperand<GPR64, "printPostIncOperand<12>">; 223def GPR64pi16 : RegisterOperand<GPR64, "printPostIncOperand<16>">; 224def GPR64pi24 : RegisterOperand<GPR64, "printPostIncOperand<24>">; 225def GPR64pi32 : RegisterOperand<GPR64, "printPostIncOperand<32>">; 226def GPR64pi48 : RegisterOperand<GPR64, "printPostIncOperand<48>">; 227def GPR64pi64 : RegisterOperand<GPR64, "printPostIncOperand<64>">; 228 229// Condition code regclass. 230def CCR : RegisterClass<"AArch64", [i32], 32, (add NZCV)> { 231 let CopyCost = -1; // Don't allow copying of status registers. 232 233 // CCR is not allocatable. 234 let isAllocatable = 0; 235} 236 237//===----------------------------------------------------------------------===// 238// Floating Point Scalar Registers 239//===----------------------------------------------------------------------===// 240 241def B0 : AArch64Reg<0, "b0">, DwarfRegNum<[64]>; 242def B1 : AArch64Reg<1, "b1">, DwarfRegNum<[65]>; 243def B2 : AArch64Reg<2, "b2">, DwarfRegNum<[66]>; 244def B3 : AArch64Reg<3, "b3">, DwarfRegNum<[67]>; 245def B4 : AArch64Reg<4, "b4">, DwarfRegNum<[68]>; 246def B5 : AArch64Reg<5, "b5">, DwarfRegNum<[69]>; 247def B6 : AArch64Reg<6, "b6">, DwarfRegNum<[70]>; 248def B7 : AArch64Reg<7, "b7">, DwarfRegNum<[71]>; 249def B8 : AArch64Reg<8, "b8">, DwarfRegNum<[72]>; 250def B9 : AArch64Reg<9, "b9">, DwarfRegNum<[73]>; 251def B10 : AArch64Reg<10, "b10">, DwarfRegNum<[74]>; 252def B11 : AArch64Reg<11, "b11">, DwarfRegNum<[75]>; 253def B12 : AArch64Reg<12, "b12">, DwarfRegNum<[76]>; 254def B13 : AArch64Reg<13, "b13">, DwarfRegNum<[77]>; 255def B14 : AArch64Reg<14, "b14">, DwarfRegNum<[78]>; 256def B15 : AArch64Reg<15, "b15">, DwarfRegNum<[79]>; 257def B16 : AArch64Reg<16, "b16">, DwarfRegNum<[80]>; 258def B17 : AArch64Reg<17, "b17">, DwarfRegNum<[81]>; 259def B18 : AArch64Reg<18, "b18">, DwarfRegNum<[82]>; 260def B19 : AArch64Reg<19, "b19">, DwarfRegNum<[83]>; 261def B20 : AArch64Reg<20, "b20">, DwarfRegNum<[84]>; 262def B21 : AArch64Reg<21, "b21">, DwarfRegNum<[85]>; 263def B22 : AArch64Reg<22, "b22">, DwarfRegNum<[86]>; 264def B23 : AArch64Reg<23, "b23">, DwarfRegNum<[87]>; 265def B24 : AArch64Reg<24, "b24">, DwarfRegNum<[88]>; 266def B25 : AArch64Reg<25, "b25">, DwarfRegNum<[89]>; 267def B26 : AArch64Reg<26, "b26">, DwarfRegNum<[90]>; 268def B27 : AArch64Reg<27, "b27">, DwarfRegNum<[91]>; 269def B28 : AArch64Reg<28, "b28">, DwarfRegNum<[92]>; 270def B29 : AArch64Reg<29, "b29">, DwarfRegNum<[93]>; 271def B30 : AArch64Reg<30, "b30">, DwarfRegNum<[94]>; 272def B31 : AArch64Reg<31, "b31">, DwarfRegNum<[95]>; 273 274let SubRegIndices = [bsub] in { 275def H0 : AArch64Reg<0, "h0", [B0]>, DwarfRegAlias<B0>; 276def H1 : AArch64Reg<1, "h1", [B1]>, DwarfRegAlias<B1>; 277def H2 : AArch64Reg<2, "h2", [B2]>, DwarfRegAlias<B2>; 278def H3 : AArch64Reg<3, "h3", [B3]>, DwarfRegAlias<B3>; 279def H4 : AArch64Reg<4, "h4", [B4]>, DwarfRegAlias<B4>; 280def H5 : AArch64Reg<5, "h5", [B5]>, DwarfRegAlias<B5>; 281def H6 : AArch64Reg<6, "h6", [B6]>, DwarfRegAlias<B6>; 282def H7 : AArch64Reg<7, "h7", [B7]>, DwarfRegAlias<B7>; 283def H8 : AArch64Reg<8, "h8", [B8]>, DwarfRegAlias<B8>; 284def H9 : AArch64Reg<9, "h9", [B9]>, DwarfRegAlias<B9>; 285def H10 : AArch64Reg<10, "h10", [B10]>, DwarfRegAlias<B10>; 286def H11 : AArch64Reg<11, "h11", [B11]>, DwarfRegAlias<B11>; 287def H12 : AArch64Reg<12, "h12", [B12]>, DwarfRegAlias<B12>; 288def H13 : AArch64Reg<13, "h13", [B13]>, DwarfRegAlias<B13>; 289def H14 : AArch64Reg<14, "h14", [B14]>, DwarfRegAlias<B14>; 290def H15 : AArch64Reg<15, "h15", [B15]>, DwarfRegAlias<B15>; 291def H16 : AArch64Reg<16, "h16", [B16]>, DwarfRegAlias<B16>; 292def H17 : AArch64Reg<17, "h17", [B17]>, DwarfRegAlias<B17>; 293def H18 : AArch64Reg<18, "h18", [B18]>, DwarfRegAlias<B18>; 294def H19 : AArch64Reg<19, "h19", [B19]>, DwarfRegAlias<B19>; 295def H20 : AArch64Reg<20, "h20", [B20]>, DwarfRegAlias<B20>; 296def H21 : AArch64Reg<21, "h21", [B21]>, DwarfRegAlias<B21>; 297def H22 : AArch64Reg<22, "h22", [B22]>, DwarfRegAlias<B22>; 298def H23 : AArch64Reg<23, "h23", [B23]>, DwarfRegAlias<B23>; 299def H24 : AArch64Reg<24, "h24", [B24]>, DwarfRegAlias<B24>; 300def H25 : AArch64Reg<25, "h25", [B25]>, DwarfRegAlias<B25>; 301def H26 : AArch64Reg<26, "h26", [B26]>, DwarfRegAlias<B26>; 302def H27 : AArch64Reg<27, "h27", [B27]>, DwarfRegAlias<B27>; 303def H28 : AArch64Reg<28, "h28", [B28]>, DwarfRegAlias<B28>; 304def H29 : AArch64Reg<29, "h29", [B29]>, DwarfRegAlias<B29>; 305def H30 : AArch64Reg<30, "h30", [B30]>, DwarfRegAlias<B30>; 306def H31 : AArch64Reg<31, "h31", [B31]>, DwarfRegAlias<B31>; 307} 308 309let SubRegIndices = [hsub] in { 310def S0 : AArch64Reg<0, "s0", [H0]>, DwarfRegAlias<B0>; 311def S1 : AArch64Reg<1, "s1", [H1]>, DwarfRegAlias<B1>; 312def S2 : AArch64Reg<2, "s2", [H2]>, DwarfRegAlias<B2>; 313def S3 : AArch64Reg<3, "s3", [H3]>, DwarfRegAlias<B3>; 314def S4 : AArch64Reg<4, "s4", [H4]>, DwarfRegAlias<B4>; 315def S5 : AArch64Reg<5, "s5", [H5]>, DwarfRegAlias<B5>; 316def S6 : AArch64Reg<6, "s6", [H6]>, DwarfRegAlias<B6>; 317def S7 : AArch64Reg<7, "s7", [H7]>, DwarfRegAlias<B7>; 318def S8 : AArch64Reg<8, "s8", [H8]>, DwarfRegAlias<B8>; 319def S9 : AArch64Reg<9, "s9", [H9]>, DwarfRegAlias<B9>; 320def S10 : AArch64Reg<10, "s10", [H10]>, DwarfRegAlias<B10>; 321def S11 : AArch64Reg<11, "s11", [H11]>, DwarfRegAlias<B11>; 322def S12 : AArch64Reg<12, "s12", [H12]>, DwarfRegAlias<B12>; 323def S13 : AArch64Reg<13, "s13", [H13]>, DwarfRegAlias<B13>; 324def S14 : AArch64Reg<14, "s14", [H14]>, DwarfRegAlias<B14>; 325def S15 : AArch64Reg<15, "s15", [H15]>, DwarfRegAlias<B15>; 326def S16 : AArch64Reg<16, "s16", [H16]>, DwarfRegAlias<B16>; 327def S17 : AArch64Reg<17, "s17", [H17]>, DwarfRegAlias<B17>; 328def S18 : AArch64Reg<18, "s18", [H18]>, DwarfRegAlias<B18>; 329def S19 : AArch64Reg<19, "s19", [H19]>, DwarfRegAlias<B19>; 330def S20 : AArch64Reg<20, "s20", [H20]>, DwarfRegAlias<B20>; 331def S21 : AArch64Reg<21, "s21", [H21]>, DwarfRegAlias<B21>; 332def S22 : AArch64Reg<22, "s22", [H22]>, DwarfRegAlias<B22>; 333def S23 : AArch64Reg<23, "s23", [H23]>, DwarfRegAlias<B23>; 334def S24 : AArch64Reg<24, "s24", [H24]>, DwarfRegAlias<B24>; 335def S25 : AArch64Reg<25, "s25", [H25]>, DwarfRegAlias<B25>; 336def S26 : AArch64Reg<26, "s26", [H26]>, DwarfRegAlias<B26>; 337def S27 : AArch64Reg<27, "s27", [H27]>, DwarfRegAlias<B27>; 338def S28 : AArch64Reg<28, "s28", [H28]>, DwarfRegAlias<B28>; 339def S29 : AArch64Reg<29, "s29", [H29]>, DwarfRegAlias<B29>; 340def S30 : AArch64Reg<30, "s30", [H30]>, DwarfRegAlias<B30>; 341def S31 : AArch64Reg<31, "s31", [H31]>, DwarfRegAlias<B31>; 342} 343 344let SubRegIndices = [ssub], RegAltNameIndices = [vreg, vlist1] in { 345def D0 : AArch64Reg<0, "d0", [S0], ["v0", ""]>, DwarfRegAlias<B0>; 346def D1 : AArch64Reg<1, "d1", [S1], ["v1", ""]>, DwarfRegAlias<B1>; 347def D2 : AArch64Reg<2, "d2", [S2], ["v2", ""]>, DwarfRegAlias<B2>; 348def D3 : AArch64Reg<3, "d3", [S3], ["v3", ""]>, DwarfRegAlias<B3>; 349def D4 : AArch64Reg<4, "d4", [S4], ["v4", ""]>, DwarfRegAlias<B4>; 350def D5 : AArch64Reg<5, "d5", [S5], ["v5", ""]>, DwarfRegAlias<B5>; 351def D6 : AArch64Reg<6, "d6", [S6], ["v6", ""]>, DwarfRegAlias<B6>; 352def D7 : AArch64Reg<7, "d7", [S7], ["v7", ""]>, DwarfRegAlias<B7>; 353def D8 : AArch64Reg<8, "d8", [S8], ["v8", ""]>, DwarfRegAlias<B8>; 354def D9 : AArch64Reg<9, "d9", [S9], ["v9", ""]>, DwarfRegAlias<B9>; 355def D10 : AArch64Reg<10, "d10", [S10], ["v10", ""]>, DwarfRegAlias<B10>; 356def D11 : AArch64Reg<11, "d11", [S11], ["v11", ""]>, DwarfRegAlias<B11>; 357def D12 : AArch64Reg<12, "d12", [S12], ["v12", ""]>, DwarfRegAlias<B12>; 358def D13 : AArch64Reg<13, "d13", [S13], ["v13", ""]>, DwarfRegAlias<B13>; 359def D14 : AArch64Reg<14, "d14", [S14], ["v14", ""]>, DwarfRegAlias<B14>; 360def D15 : AArch64Reg<15, "d15", [S15], ["v15", ""]>, DwarfRegAlias<B15>; 361def D16 : AArch64Reg<16, "d16", [S16], ["v16", ""]>, DwarfRegAlias<B16>; 362def D17 : AArch64Reg<17, "d17", [S17], ["v17", ""]>, DwarfRegAlias<B17>; 363def D18 : AArch64Reg<18, "d18", [S18], ["v18", ""]>, DwarfRegAlias<B18>; 364def D19 : AArch64Reg<19, "d19", [S19], ["v19", ""]>, DwarfRegAlias<B19>; 365def D20 : AArch64Reg<20, "d20", [S20], ["v20", ""]>, DwarfRegAlias<B20>; 366def D21 : AArch64Reg<21, "d21", [S21], ["v21", ""]>, DwarfRegAlias<B21>; 367def D22 : AArch64Reg<22, "d22", [S22], ["v22", ""]>, DwarfRegAlias<B22>; 368def D23 : AArch64Reg<23, "d23", [S23], ["v23", ""]>, DwarfRegAlias<B23>; 369def D24 : AArch64Reg<24, "d24", [S24], ["v24", ""]>, DwarfRegAlias<B24>; 370def D25 : AArch64Reg<25, "d25", [S25], ["v25", ""]>, DwarfRegAlias<B25>; 371def D26 : AArch64Reg<26, "d26", [S26], ["v26", ""]>, DwarfRegAlias<B26>; 372def D27 : AArch64Reg<27, "d27", [S27], ["v27", ""]>, DwarfRegAlias<B27>; 373def D28 : AArch64Reg<28, "d28", [S28], ["v28", ""]>, DwarfRegAlias<B28>; 374def D29 : AArch64Reg<29, "d29", [S29], ["v29", ""]>, DwarfRegAlias<B29>; 375def D30 : AArch64Reg<30, "d30", [S30], ["v30", ""]>, DwarfRegAlias<B30>; 376def D31 : AArch64Reg<31, "d31", [S31], ["v31", ""]>, DwarfRegAlias<B31>; 377} 378 379let SubRegIndices = [dsub], RegAltNameIndices = [vreg, vlist1] in { 380def Q0 : AArch64Reg<0, "q0", [D0], ["v0", ""]>, DwarfRegAlias<B0>; 381def Q1 : AArch64Reg<1, "q1", [D1], ["v1", ""]>, DwarfRegAlias<B1>; 382def Q2 : AArch64Reg<2, "q2", [D2], ["v2", ""]>, DwarfRegAlias<B2>; 383def Q3 : AArch64Reg<3, "q3", [D3], ["v3", ""]>, DwarfRegAlias<B3>; 384def Q4 : AArch64Reg<4, "q4", [D4], ["v4", ""]>, DwarfRegAlias<B4>; 385def Q5 : AArch64Reg<5, "q5", [D5], ["v5", ""]>, DwarfRegAlias<B5>; 386def Q6 : AArch64Reg<6, "q6", [D6], ["v6", ""]>, DwarfRegAlias<B6>; 387def Q7 : AArch64Reg<7, "q7", [D7], ["v7", ""]>, DwarfRegAlias<B7>; 388def Q8 : AArch64Reg<8, "q8", [D8], ["v8", ""]>, DwarfRegAlias<B8>; 389def Q9 : AArch64Reg<9, "q9", [D9], ["v9", ""]>, DwarfRegAlias<B9>; 390def Q10 : AArch64Reg<10, "q10", [D10], ["v10", ""]>, DwarfRegAlias<B10>; 391def Q11 : AArch64Reg<11, "q11", [D11], ["v11", ""]>, DwarfRegAlias<B11>; 392def Q12 : AArch64Reg<12, "q12", [D12], ["v12", ""]>, DwarfRegAlias<B12>; 393def Q13 : AArch64Reg<13, "q13", [D13], ["v13", ""]>, DwarfRegAlias<B13>; 394def Q14 : AArch64Reg<14, "q14", [D14], ["v14", ""]>, DwarfRegAlias<B14>; 395def Q15 : AArch64Reg<15, "q15", [D15], ["v15", ""]>, DwarfRegAlias<B15>; 396def Q16 : AArch64Reg<16, "q16", [D16], ["v16", ""]>, DwarfRegAlias<B16>; 397def Q17 : AArch64Reg<17, "q17", [D17], ["v17", ""]>, DwarfRegAlias<B17>; 398def Q18 : AArch64Reg<18, "q18", [D18], ["v18", ""]>, DwarfRegAlias<B18>; 399def Q19 : AArch64Reg<19, "q19", [D19], ["v19", ""]>, DwarfRegAlias<B19>; 400def Q20 : AArch64Reg<20, "q20", [D20], ["v20", ""]>, DwarfRegAlias<B20>; 401def Q21 : AArch64Reg<21, "q21", [D21], ["v21", ""]>, DwarfRegAlias<B21>; 402def Q22 : AArch64Reg<22, "q22", [D22], ["v22", ""]>, DwarfRegAlias<B22>; 403def Q23 : AArch64Reg<23, "q23", [D23], ["v23", ""]>, DwarfRegAlias<B23>; 404def Q24 : AArch64Reg<24, "q24", [D24], ["v24", ""]>, DwarfRegAlias<B24>; 405def Q25 : AArch64Reg<25, "q25", [D25], ["v25", ""]>, DwarfRegAlias<B25>; 406def Q26 : AArch64Reg<26, "q26", [D26], ["v26", ""]>, DwarfRegAlias<B26>; 407def Q27 : AArch64Reg<27, "q27", [D27], ["v27", ""]>, DwarfRegAlias<B27>; 408def Q28 : AArch64Reg<28, "q28", [D28], ["v28", ""]>, DwarfRegAlias<B28>; 409def Q29 : AArch64Reg<29, "q29", [D29], ["v29", ""]>, DwarfRegAlias<B29>; 410def Q30 : AArch64Reg<30, "q30", [D30], ["v30", ""]>, DwarfRegAlias<B30>; 411def Q31 : AArch64Reg<31, "q31", [D31], ["v31", ""]>, DwarfRegAlias<B31>; 412} 413 414def FPR8 : RegisterClass<"AArch64", [untyped], 8, (sequence "B%u", 0, 31)> { 415 let Size = 8; 416} 417def FPR16 : RegisterClass<"AArch64", [f16], 16, (sequence "H%u", 0, 31)> { 418 let Size = 16; 419} 420def FPR32 : RegisterClass<"AArch64", [f32, i32], 32,(sequence "S%u", 0, 31)>; 421def FPR64 : RegisterClass<"AArch64", [f64, i64, v2f32, v1f64, v8i8, v4i16, v2i32, 422 v1i64, v4f16], 423 64, (sequence "D%u", 0, 31)>; 424// We don't (yet) have an f128 legal type, so don't use that here. We 425// normalize 128-bit vectors to v2f64 for arg passing and such, so use 426// that here. 427def FPR128 : RegisterClass<"AArch64", 428 [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64, f128, 429 v8f16], 430 128, (sequence "Q%u", 0, 31)>; 431 432// The lower 16 vector registers. Some instructions can only take registers 433// in this range. 434def FPR128_lo : RegisterClass<"AArch64", 435 [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64, v8f16], 436 128, (trunc FPR128, 16)>; 437 438// Pairs, triples, and quads of 64-bit vector registers. 439def DSeqPairs : RegisterTuples<[dsub0, dsub1], [(rotl FPR64, 0), (rotl FPR64, 1)]>; 440def DSeqTriples : RegisterTuples<[dsub0, dsub1, dsub2], 441 [(rotl FPR64, 0), (rotl FPR64, 1), 442 (rotl FPR64, 2)]>; 443def DSeqQuads : RegisterTuples<[dsub0, dsub1, dsub2, dsub3], 444 [(rotl FPR64, 0), (rotl FPR64, 1), 445 (rotl FPR64, 2), (rotl FPR64, 3)]>; 446def DD : RegisterClass<"AArch64", [untyped], 64, (add DSeqPairs)> { 447 let Size = 128; 448} 449def DDD : RegisterClass<"AArch64", [untyped], 64, (add DSeqTriples)> { 450 let Size = 192; 451} 452def DDDD : RegisterClass<"AArch64", [untyped], 64, (add DSeqQuads)> { 453 let Size = 256; 454} 455 456// Pairs, triples, and quads of 128-bit vector registers. 457def QSeqPairs : RegisterTuples<[qsub0, qsub1], [(rotl FPR128, 0), (rotl FPR128, 1)]>; 458def QSeqTriples : RegisterTuples<[qsub0, qsub1, qsub2], 459 [(rotl FPR128, 0), (rotl FPR128, 1), 460 (rotl FPR128, 2)]>; 461def QSeqQuads : RegisterTuples<[qsub0, qsub1, qsub2, qsub3], 462 [(rotl FPR128, 0), (rotl FPR128, 1), 463 (rotl FPR128, 2), (rotl FPR128, 3)]>; 464def QQ : RegisterClass<"AArch64", [untyped], 128, (add QSeqPairs)> { 465 let Size = 256; 466} 467def QQQ : RegisterClass<"AArch64", [untyped], 128, (add QSeqTriples)> { 468 let Size = 384; 469} 470def QQQQ : RegisterClass<"AArch64", [untyped], 128, (add QSeqQuads)> { 471 let Size = 512; 472} 473 474 475// Vector operand versions of the FP registers. Alternate name printing and 476// assmebler matching. 477def VectorReg64AsmOperand : AsmOperandClass { 478 let Name = "VectorReg64"; 479 let PredicateMethod = "isNeonVectorReg"; 480} 481def VectorReg128AsmOperand : AsmOperandClass { 482 let Name = "VectorReg128"; 483 let PredicateMethod = "isNeonVectorReg"; 484} 485 486def V64 : RegisterOperand<FPR64, "printVRegOperand"> { 487 let ParserMatchClass = VectorReg64AsmOperand; 488} 489 490def V128 : RegisterOperand<FPR128, "printVRegOperand"> { 491 let ParserMatchClass = VectorReg128AsmOperand; 492} 493 494def VectorRegLoAsmOperand : AsmOperandClass { 495 let Name = "VectorRegLo"; 496 let PredicateMethod = "isNeonVectorRegLo"; 497} 498def V128_lo : RegisterOperand<FPR128_lo, "printVRegOperand"> { 499 let ParserMatchClass = VectorRegLoAsmOperand; 500} 501 502class TypedVecListAsmOperand<int count, string vecty, int lanes, int eltsize> 503 : AsmOperandClass { 504 let Name = "TypedVectorList" # count # "_" # lanes # eltsize; 505 506 let PredicateMethod 507 = "isTypedVectorList<RegKind::NeonVector, " # count # ", " # lanes # ", " # eltsize # ">"; 508 let RenderMethod = "addVectorListOperands<" # vecty # ", " # count # ">"; 509} 510 511class TypedVecListRegOperand<RegisterClass Reg, int lanes, string eltsize> 512 : RegisterOperand<Reg, "printTypedVectorList<" # lanes # ", '" 513 # eltsize # "'>">; 514 515multiclass VectorList<int count, RegisterClass Reg64, RegisterClass Reg128> { 516 // With implicit types (probably on instruction instead). E.g. { v0, v1 } 517 def _64AsmOperand : AsmOperandClass { 518 let Name = NAME # "64"; 519 let PredicateMethod = "isImplicitlyTypedVectorList<RegKind::NeonVector, " # count # ">"; 520 let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_DReg, " # count # ">"; 521 } 522 523 def "64" : RegisterOperand<Reg64, "printImplicitlyTypedVectorList"> { 524 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_64AsmOperand"); 525 } 526 527 def _128AsmOperand : AsmOperandClass { 528 let Name = NAME # "128"; 529 let PredicateMethod = "isImplicitlyTypedVectorList<RegKind::NeonVector, " # count # ">"; 530 let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_QReg, " # count # ">"; 531 } 532 533 def "128" : RegisterOperand<Reg128, "printImplicitlyTypedVectorList"> { 534 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_128AsmOperand"); 535 } 536 537 // 64-bit register lists with explicit type. 538 539 // { v0.8b, v1.8b } 540 def _8bAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 8, 8>; 541 def "8b" : TypedVecListRegOperand<Reg64, 8, "b"> { 542 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_8bAsmOperand"); 543 } 544 545 // { v0.4h, v1.4h } 546 def _4hAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 4, 16>; 547 def "4h" : TypedVecListRegOperand<Reg64, 4, "h"> { 548 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_4hAsmOperand"); 549 } 550 551 // { v0.2s, v1.2s } 552 def _2sAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 2, 32>; 553 def "2s" : TypedVecListRegOperand<Reg64, 2, "s"> { 554 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_2sAsmOperand"); 555 } 556 557 // { v0.1d, v1.1d } 558 def _1dAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_DReg", 1, 64>; 559 def "1d" : TypedVecListRegOperand<Reg64, 1, "d"> { 560 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_1dAsmOperand"); 561 } 562 563 // 128-bit register lists with explicit type 564 565 // { v0.16b, v1.16b } 566 def _16bAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 16, 8>; 567 def "16b" : TypedVecListRegOperand<Reg128, 16, "b"> { 568 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_16bAsmOperand"); 569 } 570 571 // { v0.8h, v1.8h } 572 def _8hAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 8, 16>; 573 def "8h" : TypedVecListRegOperand<Reg128, 8, "h"> { 574 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_8hAsmOperand"); 575 } 576 577 // { v0.4s, v1.4s } 578 def _4sAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 4, 32>; 579 def "4s" : TypedVecListRegOperand<Reg128, 4, "s"> { 580 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_4sAsmOperand"); 581 } 582 583 // { v0.2d, v1.2d } 584 def _2dAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 2, 64>; 585 def "2d" : TypedVecListRegOperand<Reg128, 2, "d"> { 586 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_2dAsmOperand"); 587 } 588 589 // { v0.b, v1.b } 590 def _bAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 8>; 591 def "b" : TypedVecListRegOperand<Reg128, 0, "b"> { 592 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_bAsmOperand"); 593 } 594 595 // { v0.h, v1.h } 596 def _hAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 16>; 597 def "h" : TypedVecListRegOperand<Reg128, 0, "h"> { 598 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_hAsmOperand"); 599 } 600 601 // { v0.s, v1.s } 602 def _sAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 32>; 603 def "s" : TypedVecListRegOperand<Reg128, 0, "s"> { 604 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_sAsmOperand"); 605 } 606 607 // { v0.d, v1.d } 608 def _dAsmOperand : TypedVecListAsmOperand<count, "AArch64Operand::VecListIdx_QReg", 0, 64>; 609 def "d" : TypedVecListRegOperand<Reg128, 0, "d"> { 610 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_dAsmOperand"); 611 } 612 613 614} 615 616defm VecListOne : VectorList<1, FPR64, FPR128>; 617defm VecListTwo : VectorList<2, DD, QQ>; 618defm VecListThree : VectorList<3, DDD, QQQ>; 619defm VecListFour : VectorList<4, DDDD, QQQQ>; 620 621class FPRAsmOperand<string RC> : AsmOperandClass { 622 let Name = "FPRAsmOperand" # RC; 623 let PredicateMethod = "isGPR64<AArch64::" # RC # "RegClassID>"; 624 let RenderMethod = "addRegOperands"; 625} 626 627// Register operand versions of the scalar FP registers. 628def FPR8Op : RegisterOperand<FPR8, "printOperand"> { 629 let ParserMatchClass = FPRAsmOperand<"FPR8">; 630} 631 632def FPR16Op : RegisterOperand<FPR16, "printOperand"> { 633 let ParserMatchClass = FPRAsmOperand<"FPR16">; 634} 635 636def FPR32Op : RegisterOperand<FPR32, "printOperand"> { 637 let ParserMatchClass = FPRAsmOperand<"FPR32">; 638} 639 640def FPR64Op : RegisterOperand<FPR64, "printOperand"> { 641 let ParserMatchClass = FPRAsmOperand<"FPR64">; 642} 643 644def FPR128Op : RegisterOperand<FPR128, "printOperand"> { 645 let ParserMatchClass = FPRAsmOperand<"FPR128">; 646} 647 648//===----------------------------------------------------------------------===// 649// ARMv8.1a atomic CASP register operands 650 651 652def WSeqPairs : RegisterTuples<[sube32, subo32], 653 [(decimate (rotl GPR32, 0), 2), 654 (decimate (rotl GPR32, 1), 2)]>; 655def XSeqPairs : RegisterTuples<[sube64, subo64], 656 [(decimate (rotl GPR64, 0), 2), 657 (decimate (rotl GPR64, 1), 2)]>; 658 659def WSeqPairsClass : RegisterClass<"AArch64", [untyped], 32, 660 (add WSeqPairs)>{ 661 let Size = 64; 662} 663def XSeqPairsClass : RegisterClass<"AArch64", [untyped], 64, 664 (add XSeqPairs)>{ 665 let Size = 128; 666} 667 668 669let RenderMethod = "addRegOperands", ParserMethod="tryParseGPRSeqPair" in { 670 def WSeqPairsAsmOperandClass : AsmOperandClass { let Name = "WSeqPair"; } 671 def XSeqPairsAsmOperandClass : AsmOperandClass { let Name = "XSeqPair"; } 672} 673 674def WSeqPairClassOperand : 675 RegisterOperand<WSeqPairsClass, "printGPRSeqPairsClassOperand<32>"> { 676 let ParserMatchClass = WSeqPairsAsmOperandClass; 677} 678def XSeqPairClassOperand : 679 RegisterOperand<XSeqPairsClass, "printGPRSeqPairsClassOperand<64>"> { 680 let ParserMatchClass = XSeqPairsAsmOperandClass; 681} 682 683 684//===----- END: v8.1a atomic CASP register operands -----------------------===// 685 686// SVE predicate registers 687def P0 : AArch64Reg<0, "p0">, DwarfRegNum<[48]>; 688def P1 : AArch64Reg<1, "p1">, DwarfRegNum<[49]>; 689def P2 : AArch64Reg<2, "p2">, DwarfRegNum<[50]>; 690def P3 : AArch64Reg<3, "p3">, DwarfRegNum<[51]>; 691def P4 : AArch64Reg<4, "p4">, DwarfRegNum<[52]>; 692def P5 : AArch64Reg<5, "p5">, DwarfRegNum<[53]>; 693def P6 : AArch64Reg<6, "p6">, DwarfRegNum<[54]>; 694def P7 : AArch64Reg<7, "p7">, DwarfRegNum<[55]>; 695def P8 : AArch64Reg<8, "p8">, DwarfRegNum<[56]>; 696def P9 : AArch64Reg<9, "p9">, DwarfRegNum<[57]>; 697def P10 : AArch64Reg<10, "p10">, DwarfRegNum<[58]>; 698def P11 : AArch64Reg<11, "p11">, DwarfRegNum<[59]>; 699def P12 : AArch64Reg<12, "p12">, DwarfRegNum<[60]>; 700def P13 : AArch64Reg<13, "p13">, DwarfRegNum<[61]>; 701def P14 : AArch64Reg<14, "p14">, DwarfRegNum<[62]>; 702def P15 : AArch64Reg<15, "p15">, DwarfRegNum<[63]>; 703 704// The part of SVE registers that don't overlap Neon registers. 705// These are only used as part of clobber lists. 706def Z0_HI : AArch64Reg<0, "z0_hi">; 707def Z1_HI : AArch64Reg<1, "z1_hi">; 708def Z2_HI : AArch64Reg<2, "z2_hi">; 709def Z3_HI : AArch64Reg<3, "z3_hi">; 710def Z4_HI : AArch64Reg<4, "z4_hi">; 711def Z5_HI : AArch64Reg<5, "z5_hi">; 712def Z6_HI : AArch64Reg<6, "z6_hi">; 713def Z7_HI : AArch64Reg<7, "z7_hi">; 714def Z8_HI : AArch64Reg<8, "z8_hi">; 715def Z9_HI : AArch64Reg<9, "z9_hi">; 716def Z10_HI : AArch64Reg<10, "z10_hi">; 717def Z11_HI : AArch64Reg<11, "z11_hi">; 718def Z12_HI : AArch64Reg<12, "z12_hi">; 719def Z13_HI : AArch64Reg<13, "z13_hi">; 720def Z14_HI : AArch64Reg<14, "z14_hi">; 721def Z15_HI : AArch64Reg<15, "z15_hi">; 722def Z16_HI : AArch64Reg<16, "z16_hi">; 723def Z17_HI : AArch64Reg<17, "z17_hi">; 724def Z18_HI : AArch64Reg<18, "z18_hi">; 725def Z19_HI : AArch64Reg<19, "z19_hi">; 726def Z20_HI : AArch64Reg<20, "z20_hi">; 727def Z21_HI : AArch64Reg<21, "z21_hi">; 728def Z22_HI : AArch64Reg<22, "z22_hi">; 729def Z23_HI : AArch64Reg<23, "z23_hi">; 730def Z24_HI : AArch64Reg<24, "z24_hi">; 731def Z25_HI : AArch64Reg<25, "z25_hi">; 732def Z26_HI : AArch64Reg<26, "z26_hi">; 733def Z27_HI : AArch64Reg<27, "z27_hi">; 734def Z28_HI : AArch64Reg<28, "z28_hi">; 735def Z29_HI : AArch64Reg<29, "z29_hi">; 736def Z30_HI : AArch64Reg<30, "z30_hi">; 737def Z31_HI : AArch64Reg<31, "z31_hi">; 738 739// SVE variable-size vector registers 740let SubRegIndices = [zsub,zsub_hi] in { 741def Z0 : AArch64Reg<0, "z0", [Q0, Z0_HI]>, DwarfRegNum<[96]>; 742def Z1 : AArch64Reg<1, "z1", [Q1, Z1_HI]>, DwarfRegNum<[97]>; 743def Z2 : AArch64Reg<2, "z2", [Q2, Z2_HI]>, DwarfRegNum<[98]>; 744def Z3 : AArch64Reg<3, "z3", [Q3, Z3_HI]>, DwarfRegNum<[99]>; 745def Z4 : AArch64Reg<4, "z4", [Q4, Z4_HI]>, DwarfRegNum<[100]>; 746def Z5 : AArch64Reg<5, "z5", [Q5, Z5_HI]>, DwarfRegNum<[101]>; 747def Z6 : AArch64Reg<6, "z6", [Q6, Z6_HI]>, DwarfRegNum<[102]>; 748def Z7 : AArch64Reg<7, "z7", [Q7, Z7_HI]>, DwarfRegNum<[103]>; 749def Z8 : AArch64Reg<8, "z8", [Q8, Z8_HI]>, DwarfRegNum<[104]>; 750def Z9 : AArch64Reg<9, "z9", [Q9, Z9_HI]>, DwarfRegNum<[105]>; 751def Z10 : AArch64Reg<10, "z10", [Q10, Z10_HI]>, DwarfRegNum<[106]>; 752def Z11 : AArch64Reg<11, "z11", [Q11, Z11_HI]>, DwarfRegNum<[107]>; 753def Z12 : AArch64Reg<12, "z12", [Q12, Z12_HI]>, DwarfRegNum<[108]>; 754def Z13 : AArch64Reg<13, "z13", [Q13, Z13_HI]>, DwarfRegNum<[109]>; 755def Z14 : AArch64Reg<14, "z14", [Q14, Z14_HI]>, DwarfRegNum<[110]>; 756def Z15 : AArch64Reg<15, "z15", [Q15, Z15_HI]>, DwarfRegNum<[111]>; 757def Z16 : AArch64Reg<16, "z16", [Q16, Z16_HI]>, DwarfRegNum<[112]>; 758def Z17 : AArch64Reg<17, "z17", [Q17, Z17_HI]>, DwarfRegNum<[113]>; 759def Z18 : AArch64Reg<18, "z18", [Q18, Z18_HI]>, DwarfRegNum<[114]>; 760def Z19 : AArch64Reg<19, "z19", [Q19, Z19_HI]>, DwarfRegNum<[115]>; 761def Z20 : AArch64Reg<20, "z20", [Q20, Z20_HI]>, DwarfRegNum<[116]>; 762def Z21 : AArch64Reg<21, "z21", [Q21, Z21_HI]>, DwarfRegNum<[117]>; 763def Z22 : AArch64Reg<22, "z22", [Q22, Z22_HI]>, DwarfRegNum<[118]>; 764def Z23 : AArch64Reg<23, "z23", [Q23, Z23_HI]>, DwarfRegNum<[119]>; 765def Z24 : AArch64Reg<24, "z24", [Q24, Z24_HI]>, DwarfRegNum<[120]>; 766def Z25 : AArch64Reg<25, "z25", [Q25, Z25_HI]>, DwarfRegNum<[121]>; 767def Z26 : AArch64Reg<26, "z26", [Q26, Z26_HI]>, DwarfRegNum<[122]>; 768def Z27 : AArch64Reg<27, "z27", [Q27, Z27_HI]>, DwarfRegNum<[123]>; 769def Z28 : AArch64Reg<28, "z28", [Q28, Z28_HI]>, DwarfRegNum<[124]>; 770def Z29 : AArch64Reg<29, "z29", [Q29, Z29_HI]>, DwarfRegNum<[125]>; 771def Z30 : AArch64Reg<30, "z30", [Q30, Z30_HI]>, DwarfRegNum<[126]>; 772def Z31 : AArch64Reg<31, "z31", [Q31, Z31_HI]>, DwarfRegNum<[127]>; 773} 774 775// Enum descibing the element size for destructive 776// operations. 777class ElementSizeEnum<bits<3> val> { 778 bits<3> Value = val; 779} 780 781def ElementSizeNone : ElementSizeEnum<0>; 782def ElementSizeB : ElementSizeEnum<1>; 783def ElementSizeH : ElementSizeEnum<2>; 784def ElementSizeS : ElementSizeEnum<3>; 785def ElementSizeD : ElementSizeEnum<4>; 786def ElementSizeQ : ElementSizeEnum<5>; // Unused 787 788class SVERegOp <string Suffix, AsmOperandClass C, 789 ElementSizeEnum Size, 790 RegisterClass RC> : RegisterOperand<RC> { 791 ElementSizeEnum ElementSize; 792 793 let ElementSize = Size; 794 let PrintMethod = !if(!eq(Suffix, ""), 795 "printSVERegOp<>", 796 "printSVERegOp<'" # Suffix # "'>"); 797 let ParserMatchClass = C; 798} 799 800class PPRRegOp <string Suffix, AsmOperandClass C, ElementSizeEnum Size, 801 RegisterClass RC> : SVERegOp<Suffix, C, Size, RC> {} 802class ZPRRegOp <string Suffix, AsmOperandClass C, ElementSizeEnum Size, 803 RegisterClass RC> : SVERegOp<Suffix, C, Size, RC> {} 804 805//****************************************************************************** 806 807// SVE predicate register classes. 808class PPRClass<int lastreg> : RegisterClass< 809 "AArch64", 810 [ nxv16i1, nxv8i1, nxv4i1, nxv2i1 ], 16, 811 (sequence "P%u", 0, lastreg)> { 812 let Size = 16; 813} 814 815def PPR : PPRClass<15>; 816def PPR_3b : PPRClass<7>; // Restricted 3 bit SVE predicate register class. 817 818class PPRAsmOperand <string name, string RegClass, int Width>: AsmOperandClass { 819 let Name = "SVE" # name # "Reg"; 820 let PredicateMethod = "isSVEPredicateVectorRegOfWidth<" 821 # Width # ", " # "AArch64::" # RegClass # "RegClassID>"; 822 let DiagnosticType = "InvalidSVE" # name # "Reg"; 823 let RenderMethod = "addRegOperands"; 824 let ParserMethod = "tryParseSVEPredicateVector"; 825} 826 827def PPRAsmOpAny : PPRAsmOperand<"PredicateAny", "PPR", 0>; 828def PPRAsmOp8 : PPRAsmOperand<"PredicateB", "PPR", 8>; 829def PPRAsmOp16 : PPRAsmOperand<"PredicateH", "PPR", 16>; 830def PPRAsmOp32 : PPRAsmOperand<"PredicateS", "PPR", 32>; 831def PPRAsmOp64 : PPRAsmOperand<"PredicateD", "PPR", 64>; 832 833def PPRAny : PPRRegOp<"", PPRAsmOpAny, ElementSizeNone, PPR>; 834def PPR8 : PPRRegOp<"b", PPRAsmOp8, ElementSizeB, PPR>; 835def PPR16 : PPRRegOp<"h", PPRAsmOp16, ElementSizeH, PPR>; 836def PPR32 : PPRRegOp<"s", PPRAsmOp32, ElementSizeS, PPR>; 837def PPR64 : PPRRegOp<"d", PPRAsmOp64, ElementSizeD, PPR>; 838 839def PPRAsmOp3bAny : PPRAsmOperand<"Predicate3bAny", "PPR_3b", 0>; 840def PPRAsmOp3b8 : PPRAsmOperand<"Predicate3bB", "PPR_3b", 8>; 841def PPRAsmOp3b16 : PPRAsmOperand<"Predicate3bH", "PPR_3b", 16>; 842def PPRAsmOp3b32 : PPRAsmOperand<"Predicate3bS", "PPR_3b", 32>; 843def PPRAsmOp3b64 : PPRAsmOperand<"Predicate3bD", "PPR_3b", 64>; 844 845def PPR3bAny : PPRRegOp<"", PPRAsmOp3bAny, ElementSizeNone, PPR_3b>; 846def PPR3b8 : PPRRegOp<"b", PPRAsmOp3b8, ElementSizeB, PPR_3b>; 847def PPR3b16 : PPRRegOp<"h", PPRAsmOp3b16, ElementSizeH, PPR_3b>; 848def PPR3b32 : PPRRegOp<"s", PPRAsmOp3b32, ElementSizeS, PPR_3b>; 849def PPR3b64 : PPRRegOp<"d", PPRAsmOp3b64, ElementSizeD, PPR_3b>; 850 851//****************************************************************************** 852 853// SVE vector register class 854def ZPR : RegisterClass<"AArch64", 855 [nxv16i8, nxv8i16, nxv4i32, nxv2i64, 856 nxv2f16, nxv4f16, nxv8f16, 857 nxv1f32, nxv2f32, nxv4f32, 858 nxv1f64, nxv2f64], 859 128, (sequence "Z%u", 0, 31)> { 860 let Size = 128; 861} 862 863// SVE restricted 4 bit scalable vector register class 864def ZPR_4b : RegisterClass<"AArch64", 865 [nxv16i8, nxv8i16, nxv4i32, nxv2i64, 866 nxv2f16, nxv4f16, nxv8f16, 867 nxv1f32, nxv2f32, nxv4f32, 868 nxv1f64, nxv2f64], 869 128, (sequence "Z%u", 0, 15)> { 870 let Size = 128; 871} 872 873// SVE restricted 3 bit scalable vector register class 874def ZPR_3b : RegisterClass<"AArch64", 875 [nxv16i8, nxv8i16, nxv4i32, nxv2i64, 876 nxv2f16, nxv4f16, nxv8f16, 877 nxv1f32, nxv2f32, nxv4f32, 878 nxv1f64, nxv2f64], 879 128, (sequence "Z%u", 0, 7)> { 880 let Size = 128; 881} 882 883class ZPRAsmOperand<string name, int Width, string RegClassSuffix = ""> 884 : AsmOperandClass { 885 let Name = "SVE" # name # "Reg"; 886 let PredicateMethod = "isSVEDataVectorRegOfWidth<" 887 # Width # ", AArch64::ZPR" 888 # RegClassSuffix # "RegClassID>"; 889 let RenderMethod = "addRegOperands"; 890 let DiagnosticType = "InvalidZPR" # RegClassSuffix # Width; 891 let ParserMethod = "tryParseSVEDataVector<false, " 892 # !if(!eq(Width, 0), "false", "true") # ">"; 893} 894 895def ZPRAsmOpAny : ZPRAsmOperand<"VectorAny", 0>; 896def ZPRAsmOp8 : ZPRAsmOperand<"VectorB", 8>; 897def ZPRAsmOp16 : ZPRAsmOperand<"VectorH", 16>; 898def ZPRAsmOp32 : ZPRAsmOperand<"VectorS", 32>; 899def ZPRAsmOp64 : ZPRAsmOperand<"VectorD", 64>; 900def ZPRAsmOp128 : ZPRAsmOperand<"VectorQ", 128>; 901 902def ZPRAny : ZPRRegOp<"", ZPRAsmOpAny, ElementSizeNone, ZPR>; 903def ZPR8 : ZPRRegOp<"b", ZPRAsmOp8, ElementSizeB, ZPR>; 904def ZPR16 : ZPRRegOp<"h", ZPRAsmOp16, ElementSizeH, ZPR>; 905def ZPR32 : ZPRRegOp<"s", ZPRAsmOp32, ElementSizeS, ZPR>; 906def ZPR64 : ZPRRegOp<"d", ZPRAsmOp64, ElementSizeD, ZPR>; 907def ZPR128 : ZPRRegOp<"q", ZPRAsmOp128, ElementSizeQ, ZPR>; 908 909def ZPRAsmOp3b8 : ZPRAsmOperand<"Vector3bB", 8, "_3b">; 910def ZPRAsmOp3b16 : ZPRAsmOperand<"Vector3bH", 16, "_3b">; 911def ZPRAsmOp3b32 : ZPRAsmOperand<"Vector3bS", 32, "_3b">; 912 913def ZPR3b8 : ZPRRegOp<"b", ZPRAsmOp3b8, ElementSizeB, ZPR_3b>; 914def ZPR3b16 : ZPRRegOp<"h", ZPRAsmOp3b16, ElementSizeH, ZPR_3b>; 915def ZPR3b32 : ZPRRegOp<"s", ZPRAsmOp3b32, ElementSizeS, ZPR_3b>; 916 917def ZPRAsmOp4b16 : ZPRAsmOperand<"Vector4bH", 16, "_4b">; 918def ZPRAsmOp4b32 : ZPRAsmOperand<"Vector4bS", 32, "_4b">; 919def ZPRAsmOp4b64 : ZPRAsmOperand<"Vector4bD", 64, "_4b">; 920 921def ZPR4b16 : ZPRRegOp<"h", ZPRAsmOp4b16, ElementSizeH, ZPR_4b>; 922def ZPR4b32 : ZPRRegOp<"s", ZPRAsmOp4b32, ElementSizeS, ZPR_4b>; 923def ZPR4b64 : ZPRRegOp<"d", ZPRAsmOp4b64, ElementSizeD, ZPR_4b>; 924 925class FPRasZPR<int Width> : AsmOperandClass{ 926 let Name = "FPR" # Width # "asZPR"; 927 let PredicateMethod = "isFPRasZPR<AArch64::FPR" # Width # "RegClassID>"; 928 let RenderMethod = "addFPRasZPRRegOperands<" # Width # ">"; 929} 930 931class FPRasZPROperand<int Width> : RegisterOperand<ZPR> { 932 let ParserMatchClass = FPRasZPR<Width>; 933 let PrintMethod = "printZPRasFPR<" # Width # ">"; 934} 935 936def FPR8asZPR : FPRasZPROperand<8>; 937def FPR16asZPR : FPRasZPROperand<16>; 938def FPR32asZPR : FPRasZPROperand<32>; 939def FPR64asZPR : FPRasZPROperand<64>; 940def FPR128asZPR : FPRasZPROperand<128>; 941 942let Namespace = "AArch64" in { 943 def zsub0 : SubRegIndex<128, -1>; 944 def zsub1 : SubRegIndex<128, -1>; 945 def zsub2 : SubRegIndex<128, -1>; 946 def zsub3 : SubRegIndex<128, -1>; 947} 948 949// Pairs, triples, and quads of SVE vector registers. 950def ZSeqPairs : RegisterTuples<[zsub0, zsub1], [(rotl ZPR, 0), (rotl ZPR, 1)]>; 951def ZSeqTriples : RegisterTuples<[zsub0, zsub1, zsub2], [(rotl ZPR, 0), (rotl ZPR, 1), (rotl ZPR, 2)]>; 952def ZSeqQuads : RegisterTuples<[zsub0, zsub1, zsub2, zsub3], [(rotl ZPR, 0), (rotl ZPR, 1), (rotl ZPR, 2), (rotl ZPR, 3)]>; 953 954def ZPR2 : RegisterClass<"AArch64", [untyped], 128, (add ZSeqPairs)> { 955 let Size = 256; 956} 957def ZPR3 : RegisterClass<"AArch64", [untyped], 128, (add ZSeqTriples)> { 958 let Size = 384; 959} 960def ZPR4 : RegisterClass<"AArch64", [untyped], 128, (add ZSeqQuads)> { 961 let Size = 512; 962} 963 964class ZPRVectorList<int ElementWidth, int NumRegs> : AsmOperandClass { 965 let Name = "SVEVectorList" # NumRegs # ElementWidth; 966 let ParserMethod = "tryParseVectorList<RegKind::SVEDataVector>"; 967 let PredicateMethod = 968 "isTypedVectorList<RegKind::SVEDataVector, " #NumRegs #", 0, " #ElementWidth #">"; 969 let RenderMethod = "addVectorListOperands<AArch64Operand::VecListIdx_ZReg, " # NumRegs # ">"; 970} 971 972def Z_b : RegisterOperand<ZPR, "printTypedVectorList<0,'b'>"> { 973 let ParserMatchClass = ZPRVectorList<8, 1>; 974} 975 976def Z_h : RegisterOperand<ZPR, "printTypedVectorList<0,'h'>"> { 977 let ParserMatchClass = ZPRVectorList<16, 1>; 978} 979 980def Z_s : RegisterOperand<ZPR, "printTypedVectorList<0,'s'>"> { 981 let ParserMatchClass = ZPRVectorList<32, 1>; 982} 983 984def Z_d : RegisterOperand<ZPR, "printTypedVectorList<0,'d'>"> { 985 let ParserMatchClass = ZPRVectorList<64, 1>; 986} 987 988def ZZ_b : RegisterOperand<ZPR2, "printTypedVectorList<0,'b'>"> { 989 let ParserMatchClass = ZPRVectorList<8, 2>; 990} 991 992def ZZ_h : RegisterOperand<ZPR2, "printTypedVectorList<0,'h'>"> { 993 let ParserMatchClass = ZPRVectorList<16, 2>; 994} 995 996def ZZ_s : RegisterOperand<ZPR2, "printTypedVectorList<0,'s'>"> { 997 let ParserMatchClass = ZPRVectorList<32, 2>; 998} 999 1000def ZZ_d : RegisterOperand<ZPR2, "printTypedVectorList<0,'d'>"> { 1001 let ParserMatchClass = ZPRVectorList<64, 2>; 1002} 1003 1004def ZZZ_b : RegisterOperand<ZPR3, "printTypedVectorList<0,'b'>"> { 1005 let ParserMatchClass = ZPRVectorList<8, 3>; 1006} 1007 1008def ZZZ_h : RegisterOperand<ZPR3, "printTypedVectorList<0,'h'>"> { 1009 let ParserMatchClass = ZPRVectorList<16, 3>; 1010} 1011 1012def ZZZ_s : RegisterOperand<ZPR3, "printTypedVectorList<0,'s'>"> { 1013 let ParserMatchClass = ZPRVectorList<32, 3>; 1014} 1015 1016def ZZZ_d : RegisterOperand<ZPR3, "printTypedVectorList<0,'d'>"> { 1017 let ParserMatchClass = ZPRVectorList<64, 3>; 1018} 1019 1020def ZZZZ_b : RegisterOperand<ZPR4, "printTypedVectorList<0,'b'>"> { 1021 let ParserMatchClass = ZPRVectorList<8, 4>; 1022} 1023 1024def ZZZZ_h : RegisterOperand<ZPR4, "printTypedVectorList<0,'h'>"> { 1025 let ParserMatchClass = ZPRVectorList<16, 4>; 1026} 1027 1028def ZZZZ_s : RegisterOperand<ZPR4, "printTypedVectorList<0,'s'>"> { 1029 let ParserMatchClass = ZPRVectorList<32, 4>; 1030} 1031 1032def ZZZZ_d : RegisterOperand<ZPR4, "printTypedVectorList<0,'d'>"> { 1033 let ParserMatchClass = ZPRVectorList<64, 4>; 1034} 1035 1036class ZPRExtendAsmOperand<string ShiftExtend, int RegWidth, int Scale, 1037 bit ScaleAlwaysSame = 0b0> : AsmOperandClass { 1038 let Name = "ZPRExtend" # ShiftExtend # RegWidth # Scale 1039 # !if(ScaleAlwaysSame, "Only", ""); 1040 1041 let PredicateMethod = "isSVEDataVectorRegWithShiftExtend<" 1042 # RegWidth # ", AArch64::ZPRRegClassID, " 1043 # "AArch64_AM::" # ShiftExtend # ", " 1044 # Scale # ", " 1045 # !if(ScaleAlwaysSame, "true", "false") 1046 # ">"; 1047 let DiagnosticType = "InvalidZPR" # RegWidth # ShiftExtend # Scale; 1048 let RenderMethod = "addRegOperands"; 1049 let ParserMethod = "tryParseSVEDataVector<true, true>"; 1050} 1051 1052class ZPRExtendRegisterOperand<bit SignExtend, bit IsLSL, string Repr, 1053 int RegWidth, int Scale, string Suffix = ""> 1054 : RegisterOperand<ZPR> { 1055 let ParserMatchClass = 1056 !cast<AsmOperandClass>("ZPR" # RegWidth # "AsmOpndExt" # Repr # Scale # Suffix); 1057 let PrintMethod = "printRegWithShiftExtend<" 1058 # !if(SignExtend, "true", "false") # ", " 1059 # Scale # ", " 1060 # !if(IsLSL, "'x'", "'w'") # ", " 1061 # !if(!eq(RegWidth, 32), "'s'", "'d'") # ">"; 1062} 1063 1064foreach RegWidth = [32, 64] in { 1065 // UXTW(8|16|32|64) 1066 def ZPR#RegWidth#AsmOpndExtUXTW8Only : ZPRExtendAsmOperand<"UXTW", RegWidth, 8, 0b1>; 1067 def ZPR#RegWidth#AsmOpndExtUXTW8 : ZPRExtendAsmOperand<"UXTW", RegWidth, 8>; 1068 def ZPR#RegWidth#AsmOpndExtUXTW16 : ZPRExtendAsmOperand<"UXTW", RegWidth, 16>; 1069 def ZPR#RegWidth#AsmOpndExtUXTW32 : ZPRExtendAsmOperand<"UXTW", RegWidth, 32>; 1070 def ZPR#RegWidth#AsmOpndExtUXTW64 : ZPRExtendAsmOperand<"UXTW", RegWidth, 64>; 1071 1072 def ZPR#RegWidth#ExtUXTW8Only : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 8, "Only">; 1073 def ZPR#RegWidth#ExtUXTW8 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 8>; 1074 def ZPR#RegWidth#ExtUXTW16 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 16>; 1075 def ZPR#RegWidth#ExtUXTW32 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 32>; 1076 def ZPR#RegWidth#ExtUXTW64 : ZPRExtendRegisterOperand<0b0, 0b0, "UXTW", RegWidth, 64>; 1077 1078 // SXTW(8|16|32|64) 1079 def ZPR#RegWidth#AsmOpndExtSXTW8Only : ZPRExtendAsmOperand<"SXTW", RegWidth, 8, 0b1>; 1080 def ZPR#RegWidth#AsmOpndExtSXTW8 : ZPRExtendAsmOperand<"SXTW", RegWidth, 8>; 1081 def ZPR#RegWidth#AsmOpndExtSXTW16 : ZPRExtendAsmOperand<"SXTW", RegWidth, 16>; 1082 def ZPR#RegWidth#AsmOpndExtSXTW32 : ZPRExtendAsmOperand<"SXTW", RegWidth, 32>; 1083 def ZPR#RegWidth#AsmOpndExtSXTW64 : ZPRExtendAsmOperand<"SXTW", RegWidth, 64>; 1084 1085 def ZPR#RegWidth#ExtSXTW8Only : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 8, "Only">; 1086 def ZPR#RegWidth#ExtSXTW8 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 8>; 1087 def ZPR#RegWidth#ExtSXTW16 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 16>; 1088 def ZPR#RegWidth#ExtSXTW32 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 32>; 1089 def ZPR#RegWidth#ExtSXTW64 : ZPRExtendRegisterOperand<0b1, 0b0, "SXTW", RegWidth, 64>; 1090 1091 // LSL(8|16|32|64) 1092 def ZPR#RegWidth#AsmOpndExtLSL8 : ZPRExtendAsmOperand<"LSL", RegWidth, 8>; 1093 def ZPR#RegWidth#AsmOpndExtLSL16 : ZPRExtendAsmOperand<"LSL", RegWidth, 16>; 1094 def ZPR#RegWidth#AsmOpndExtLSL32 : ZPRExtendAsmOperand<"LSL", RegWidth, 32>; 1095 def ZPR#RegWidth#AsmOpndExtLSL64 : ZPRExtendAsmOperand<"LSL", RegWidth, 64>; 1096 def ZPR#RegWidth#ExtLSL8 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 8>; 1097 def ZPR#RegWidth#ExtLSL16 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 16>; 1098 def ZPR#RegWidth#ExtLSL32 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 32>; 1099 def ZPR#RegWidth#ExtLSL64 : ZPRExtendRegisterOperand<0b0, 0b1, "LSL", RegWidth, 64>; 1100} 1101 1102class GPR64ShiftExtendAsmOperand <string AsmOperandName, int Scale, string RegClass> : AsmOperandClass { 1103 let Name = AsmOperandName # Scale; 1104 let PredicateMethod = "isGPR64WithShiftExtend<AArch64::"#RegClass#"RegClassID, " # Scale # ">"; 1105 let DiagnosticType = "Invalid" # AsmOperandName # Scale; 1106 let RenderMethod = "addRegOperands"; 1107 let ParserMethod = "tryParseGPROperand<true>"; 1108} 1109 1110class GPR64ExtendRegisterOperand<string Name, int Scale, RegisterClass RegClass> : RegisterOperand<RegClass>{ 1111 let ParserMatchClass = !cast<AsmOperandClass>(Name); 1112 let PrintMethod = "printRegWithShiftExtend<false, " # Scale # ", 'x', 0>"; 1113} 1114 1115foreach Scale = [8, 16, 32, 64] in { 1116 def GPR64shiftedAsmOpnd # Scale : GPR64ShiftExtendAsmOperand<"GPR64shifted", Scale, "GPR64">; 1117 def GPR64shifted # Scale : GPR64ExtendRegisterOperand<"GPR64shiftedAsmOpnd" # Scale, Scale, GPR64>; 1118 1119 def GPR64NoXZRshiftedAsmOpnd # Scale : GPR64ShiftExtendAsmOperand<"GPR64NoXZRshifted", Scale, "GPR64common">; 1120 def GPR64NoXZRshifted # Scale : GPR64ExtendRegisterOperand<"GPR64NoXZRshiftedAsmOpnd" # Scale, Scale, GPR64common>; 1121} 1122