1//=- AArch64CallingConv.td - Calling Conventions for AArch64 -*- tablegen -*-=// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This describes the calling conventions for AArch64 architecture. 11// 12//===----------------------------------------------------------------------===// 13 14/// CCIfAlign - Match of the original alignment of the arg 15class CCIfAlign<string Align, CCAction A> : 16 CCIf<!strconcat("ArgFlags.getOrigAlign() == ", Align), A>; 17/// CCIfBigEndian - Match only if we're in big endian mode. 18class CCIfBigEndian<CCAction A> : 19 CCIf<"State.getMachineFunction().getSubtarget().getDataLayout()->isBigEndian()", A>; 20 21//===----------------------------------------------------------------------===// 22// ARM AAPCS64 Calling Convention 23//===----------------------------------------------------------------------===// 24 25def CC_AArch64_AAPCS : CallingConv<[ 26 CCIfType<[v2f32], CCBitConvertToType<v2i32>>, 27 CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>, 28 29 // Big endian vectors must be passed as if they were 1-element vectors so that 30 // their lanes are in a consistent order. 31 CCIfBigEndian<CCIfType<[v2i32, v2f32, v4i16, v4f16, v8i8], 32 CCBitConvertToType<f64>>>, 33 CCIfBigEndian<CCIfType<[v2i64, v2f64, v4i32, v4f32, v8i16, v8f16, v16i8], 34 CCBitConvertToType<f128>>>, 35 36 // An SRet is passed in X8, not X0 like a normal pointer parameter. 37 CCIfSRet<CCIfType<[i64], CCAssignToRegWithShadow<[X8], [W8]>>>, 38 39 // Put ByVal arguments directly on the stack. Minimum size and alignment of a 40 // slot is 64-bit. 41 CCIfByVal<CCPassByVal<8, 8>>, 42 43 CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Block">>, 44 45 // Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers, 46 // up to eight each of GPR and FPR. 47 CCIfType<[i1, i8, i16], CCPromoteToType<i32>>, 48 CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7], 49 [X0, X1, X2, X3, X4, X5, X6, X7]>>, 50 // i128 is split to two i64s, we can't fit half to register X7. 51 CCIfType<[i64], CCIfSplit<CCAssignToRegWithShadow<[X0, X2, X4, X6], 52 [X0, X1, X3, X5]>>>, 53 54 // i128 is split to two i64s, and its stack alignment is 16 bytes. 55 CCIfType<[i64], CCIfSplit<CCAssignToStackWithShadow<8, 16, [X7]>>>, 56 57 CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7], 58 [W0, W1, W2, W3, W4, W5, W6, W7]>>, 59 CCIfType<[f16], CCAssignToRegWithShadow<[H0, H1, H2, H3, H4, H5, H6, H7], 60 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 61 CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7], 62 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 63 CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7], 64 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 65 CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16], 66 CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7], 67 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 68 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16], 69 CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 70 71 // If more than will fit in registers, pass them on the stack instead. 72 CCIfType<[i1, i8, i16, f16], CCAssignToStack<8, 8>>, 73 CCIfType<[i32, f32], CCAssignToStack<8, 8>>, 74 CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8, v4f16], 75 CCAssignToStack<8, 8>>, 76 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16], 77 CCAssignToStack<16, 16>> 78]>; 79 80def RetCC_AArch64_AAPCS : CallingConv<[ 81 CCIfType<[v2f32], CCBitConvertToType<v2i32>>, 82 CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>, 83 84 // Big endian vectors must be passed as if they were 1-element vectors so that 85 // their lanes are in a consistent order. 86 CCIfBigEndian<CCIfType<[v2i32, v2f32, v4i16, v4f16, v8i8], 87 CCBitConvertToType<f64>>>, 88 CCIfBigEndian<CCIfType<[v2i64, v2f64, v4i32, v4f32, v8i16, v8f16, v16i8], 89 CCBitConvertToType<f128>>>, 90 91 CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7], 92 [X0, X1, X2, X3, X4, X5, X6, X7]>>, 93 CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7], 94 [W0, W1, W2, W3, W4, W5, W6, W7]>>, 95 CCIfType<[f16], CCAssignToRegWithShadow<[H0, H1, H2, H3, H4, H5, H6, H7], 96 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 97 CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7], 98 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 99 CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7], 100 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 101 CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16], 102 CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7], 103 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 104 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16], 105 CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>> 106]>; 107 108 109// Darwin uses a calling convention which differs in only two ways 110// from the standard one at this level: 111// + i128s (i.e. split i64s) don't need even registers. 112// + Stack slots are sized as needed rather than being at least 64-bit. 113def CC_AArch64_DarwinPCS : CallingConv<[ 114 CCIfType<[v2f32], CCBitConvertToType<v2i32>>, 115 CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>, 116 117 // An SRet is passed in X8, not X0 like a normal pointer parameter. 118 CCIfSRet<CCIfType<[i64], CCAssignToRegWithShadow<[X8], [W8]>>>, 119 120 // Put ByVal arguments directly on the stack. Minimum size and alignment of a 121 // slot is 64-bit. 122 CCIfByVal<CCPassByVal<8, 8>>, 123 124 CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Block">>, 125 126 // Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers, 127 // up to eight each of GPR and FPR. 128 CCIfType<[i1, i8, i16], CCPromoteToType<i32>>, 129 CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7], 130 [X0, X1, X2, X3, X4, X5, X6, X7]>>, 131 // i128 is split to two i64s, we can't fit half to register X7. 132 CCIfType<[i64], 133 CCIfSplit<CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6], 134 [W0, W1, W2, W3, W4, W5, W6]>>>, 135 // i128 is split to two i64s, and its stack alignment is 16 bytes. 136 CCIfType<[i64], CCIfSplit<CCAssignToStackWithShadow<8, 16, [X7]>>>, 137 138 CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7], 139 [W0, W1, W2, W3, W4, W5, W6, W7]>>, 140 CCIfType<[f16], CCAssignToRegWithShadow<[H0, H1, H2, H3, H4, H5, H6, H7], 141 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 142 CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7], 143 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 144 CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7], 145 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 146 CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16], 147 CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7], 148 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 149 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16], 150 CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 151 152 // If more than will fit in registers, pass them on the stack instead. 153 CCIf<"ValVT == MVT::i1 || ValVT == MVT::i8", CCAssignToStack<1, 1>>, 154 CCIf<"ValVT == MVT::i16 || ValVT == MVT::f16", CCAssignToStack<2, 2>>, 155 CCIfType<[i32, f32], CCAssignToStack<4, 4>>, 156 CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8, v4f16], 157 CCAssignToStack<8, 8>>, 158 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16], 159 CCAssignToStack<16, 16>> 160]>; 161 162def CC_AArch64_DarwinPCS_VarArg : CallingConv<[ 163 CCIfType<[v2f32], CCBitConvertToType<v2i32>>, 164 CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>, 165 166 CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Stack_Block">>, 167 168 // Handle all scalar types as either i64 or f64. 169 CCIfType<[i8, i16, i32], CCPromoteToType<i64>>, 170 CCIfType<[f16, f32], CCPromoteToType<f64>>, 171 172 // Everything is on the stack. 173 // i128 is split to two i64s, and its stack alignment is 16 bytes. 174 CCIfType<[i64], CCIfSplit<CCAssignToStack<8, 16>>>, 175 CCIfType<[i64, f64, v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16], 176 CCAssignToStack<8, 8>>, 177 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16], 178 CCAssignToStack<16, 16>> 179]>; 180 181// The WebKit_JS calling convention only passes the first argument (the callee) 182// in register and the remaining arguments on stack. We allow 32bit stack slots, 183// so that WebKit can write partial values in the stack and define the other 184// 32bit quantity as undef. 185def CC_AArch64_WebKit_JS : CallingConv<[ 186 // Handle i1, i8, i16, i32, and i64 passing in register X0 (W0). 187 CCIfType<[i1, i8, i16], CCPromoteToType<i32>>, 188 CCIfType<[i32], CCAssignToRegWithShadow<[W0], [X0]>>, 189 CCIfType<[i64], CCAssignToRegWithShadow<[X0], [W0]>>, 190 191 // Pass the remaining arguments on the stack instead. 192 CCIfType<[i32, f32], CCAssignToStack<4, 4>>, 193 CCIfType<[i64, f64], CCAssignToStack<8, 8>> 194]>; 195 196def RetCC_AArch64_WebKit_JS : CallingConv<[ 197 CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7], 198 [X0, X1, X2, X3, X4, X5, X6, X7]>>, 199 CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7], 200 [W0, W1, W2, W3, W4, W5, W6, W7]>>, 201 CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7], 202 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 203 CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7], 204 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>> 205]>; 206 207//===----------------------------------------------------------------------===// 208// ARM64 Calling Convention for GHC 209//===----------------------------------------------------------------------===// 210 211// This calling convention is specific to the Glasgow Haskell Compiler. 212// The only documentation is the GHC source code, specifically the C header 213// file: 214// 215// https://github.com/ghc/ghc/blob/master/includes/stg/MachRegs.h 216// 217// which defines the registers for the Spineless Tagless G-Machine (STG) that 218// GHC uses to implement lazy evaluation. The generic STG machine has a set of 219// registers which are mapped to appropriate set of architecture specific 220// registers for each CPU architecture. 221// 222// The STG Machine is documented here: 223// 224// https://ghc.haskell.org/trac/ghc/wiki/Commentary/Compiler/GeneratedCode 225// 226// The AArch64 register mapping is under the heading "The ARMv8/AArch64 ABI 227// register mapping". 228 229def CC_AArch64_GHC : CallingConv<[ 230 // Handle all vector types as either f64 or v2f64. 231 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>, 232 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, f128], CCBitConvertToType<v2f64>>, 233 234 CCIfType<[v2f64], CCAssignToReg<[Q4, Q5]>>, 235 CCIfType<[f32], CCAssignToReg<[S8, S9, S10, S11]>>, 236 CCIfType<[f64], CCAssignToReg<[D12, D13, D14, D15]>>, 237 238 // Promote i8/i16/i32 arguments to i64. 239 CCIfType<[i8, i16, i32], CCPromoteToType<i64>>, 240 241 // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, SpLim 242 CCIfType<[i64], CCAssignToReg<[X19, X20, X21, X22, X23, X24, X25, X26, X27, X28]>> 243]>; 244 245// FIXME: LR is only callee-saved in the sense that *we* preserve it and are 246// presumably a callee to someone. External functions may not do so, but this 247// is currently safe since BL has LR as an implicit-def and what happens after a 248// tail call doesn't matter. 249// 250// It would be better to model its preservation semantics properly (create a 251// vreg on entry, use it in RET & tail call generation; make that vreg def if we 252// end up saving LR as part of a call frame). Watch this space... 253def CSR_AArch64_AAPCS : CalleeSavedRegs<(add LR, FP, X19, X20, X21, X22, 254 X23, X24, X25, X26, X27, X28, 255 D8, D9, D10, D11, 256 D12, D13, D14, D15)>; 257 258// Constructors and destructors return 'this' in the iOS 64-bit C++ ABI; since 259// 'this' and the pointer return value are both passed in X0 in these cases, 260// this can be partially modelled by treating X0 as a callee-saved register; 261// only the resulting RegMask is used; the SaveList is ignored 262// 263// (For generic ARM 64-bit ABI code, clang will not generate constructors or 264// destructors with 'this' returns, so this RegMask will not be used in that 265// case) 266def CSR_AArch64_AAPCS_ThisReturn : CalleeSavedRegs<(add CSR_AArch64_AAPCS, X0)>; 267 268// The function used by Darwin to obtain the address of a thread-local variable 269// guarantees more than a normal AAPCS function. x16 and x17 are used on the 270// fast path for calculation, but other registers except X0 (argument/return) 271// and LR (it is a call, after all) are preserved. 272def CSR_AArch64_TLS_Darwin 273 : CalleeSavedRegs<(add (sub (sequence "X%u", 1, 28), X16, X17), 274 FP, 275 (sequence "Q%u", 0, 31))>; 276 277// The ELF stub used for TLS-descriptor access saves every feasible 278// register. Only X0 and LR are clobbered. 279def CSR_AArch64_TLS_ELF 280 : CalleeSavedRegs<(add (sequence "X%u", 1, 28), FP, 281 (sequence "Q%u", 0, 31))>; 282 283def CSR_AArch64_AllRegs 284 : CalleeSavedRegs<(add (sequence "W%u", 0, 30), WSP, 285 (sequence "X%u", 0, 28), FP, LR, SP, 286 (sequence "B%u", 0, 31), (sequence "H%u", 0, 31), 287 (sequence "S%u", 0, 31), (sequence "D%u", 0, 31), 288 (sequence "Q%u", 0, 31))>; 289 290def CSR_AArch64_NoRegs : CalleeSavedRegs<(add)>; 291