1 /* Machine description for AArch64 architecture. 2 Copyright (C) 2009-2021 Free Software Foundation, Inc. 3 Contributed by ARM Ltd. 4 5 This file is part of GCC. 6 7 GCC is free software; you can redistribute it and/or modify it 8 under the terms of the GNU General Public License as published by 9 the Free Software Foundation; either version 3, or (at your option) 10 any later version. 11 12 GCC is distributed in the hope that it will be useful, but 13 WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 General Public License for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with GCC; see the file COPYING3. If not see 19 <http://www.gnu.org/licenses/>. */ 20 21 22 #ifndef GCC_AARCH64_H 23 #define GCC_AARCH64_H 24 25 /* Target CPU builtins. */ 26 #define TARGET_CPU_CPP_BUILTINS() \ 27 aarch64_cpu_cpp_builtins (pfile) 28 29 /* Target hooks for D language. */ 30 #define TARGET_D_CPU_VERSIONS aarch64_d_target_versions 31 #define TARGET_D_REGISTER_CPU_TARGET_INFO aarch64_d_register_target_info 32 33 34 35 #define REGISTER_TARGET_PRAGMAS() aarch64_register_pragmas () 36 37 /* Target machine storage layout. */ 38 39 #define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \ 40 if (GET_MODE_CLASS (MODE) == MODE_INT \ 41 && GET_MODE_SIZE (MODE) < 4) \ 42 { \ 43 if (MODE == QImode || MODE == HImode) \ 44 { \ 45 MODE = SImode; \ 46 } \ 47 } 48 49 /* Bits are always numbered from the LSBit. */ 50 #define BITS_BIG_ENDIAN 0 51 52 /* Big/little-endian flavour. */ 53 #define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0) 54 #define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN) 55 56 /* AdvSIMD is supported in the default configuration, unless disabled by 57 -mgeneral-regs-only or by the +nosimd extension. */ 58 #define TARGET_SIMD (!TARGET_GENERAL_REGS_ONLY && AARCH64_ISA_SIMD) 59 #define TARGET_FLOAT (!TARGET_GENERAL_REGS_ONLY && AARCH64_ISA_FP) 60 61 #define UNITS_PER_WORD 8 62 63 #define UNITS_PER_VREG 16 64 65 #define PARM_BOUNDARY 64 66 67 #define STACK_BOUNDARY 128 68 69 #define FUNCTION_BOUNDARY 32 70 71 #define EMPTY_FIELD_BOUNDARY 32 72 73 #define BIGGEST_ALIGNMENT 128 74 75 #define SHORT_TYPE_SIZE 16 76 77 #define INT_TYPE_SIZE 32 78 79 #define LONG_TYPE_SIZE (TARGET_ILP32 ? 32 : 64) 80 81 #define POINTER_SIZE (TARGET_ILP32 ? 32 : 64) 82 83 #define LONG_LONG_TYPE_SIZE 64 84 85 #define FLOAT_TYPE_SIZE 32 86 87 #define DOUBLE_TYPE_SIZE 64 88 89 #define LONG_DOUBLE_TYPE_SIZE 128 90 91 /* This value is the amount of bytes a caller is allowed to drop the stack 92 before probing has to be done for stack clash protection. */ 93 #define STACK_CLASH_CALLER_GUARD 1024 94 95 /* This value represents the minimum amount of bytes we expect the function's 96 outgoing arguments to be when stack-clash is enabled. */ 97 #define STACK_CLASH_MIN_BYTES_OUTGOING_ARGS 8 98 99 /* This value controls how many pages we manually unroll the loop for when 100 generating stack clash probes. */ 101 #define STACK_CLASH_MAX_UNROLL_PAGES 4 102 103 /* The architecture reserves all bits of the address for hardware use, 104 so the vbit must go into the delta field of pointers to member 105 functions. This is the same config as that in the AArch32 106 port. */ 107 #define TARGET_PTRMEMFUNC_VBIT_LOCATION ptrmemfunc_vbit_in_delta 108 109 110 /* Emit calls to libgcc helpers for atomic operations for runtime detection 111 of LSE instructions. */ 112 #define TARGET_OUTLINE_ATOMICS (aarch64_flag_outline_atomics) 113 114 /* Align definitions of arrays, unions and structures so that 115 initializations and copies can be made more efficient. This is not 116 ABI-changing, so it only affects places where we can see the 117 definition. Increasing the alignment tends to introduce padding, 118 so don't do this when optimizing for size/conserving stack space. */ 119 #define AARCH64_EXPAND_ALIGNMENT(COND, EXP, ALIGN) \ 120 (((COND) && ((ALIGN) < BITS_PER_WORD) \ 121 && (TREE_CODE (EXP) == ARRAY_TYPE \ 122 || TREE_CODE (EXP) == UNION_TYPE \ 123 || TREE_CODE (EXP) == RECORD_TYPE)) ? BITS_PER_WORD : (ALIGN)) 124 125 /* Align global data. */ 126 #define DATA_ALIGNMENT(EXP, ALIGN) \ 127 AARCH64_EXPAND_ALIGNMENT (!optimize_size, EXP, ALIGN) 128 129 /* Similarly, make sure that objects on the stack are sensibly aligned. */ 130 #define LOCAL_ALIGNMENT(EXP, ALIGN) \ 131 AARCH64_EXPAND_ALIGNMENT (!flag_conserve_stack, EXP, ALIGN) 132 133 #define STRUCTURE_SIZE_BOUNDARY 8 134 135 /* Heap alignment (same as BIGGEST_ALIGNMENT and STACK_BOUNDARY). */ 136 #define MALLOC_ABI_ALIGNMENT 128 137 138 /* Defined by the ABI */ 139 #define WCHAR_TYPE "unsigned int" 140 #define WCHAR_TYPE_SIZE 32 141 142 /* Using long long breaks -ansi and -std=c90, so these will need to be 143 made conditional for an LLP64 ABI. */ 144 145 #define SIZE_TYPE "long unsigned int" 146 147 #define PTRDIFF_TYPE "long int" 148 149 #define PCC_BITFIELD_TYPE_MATTERS 1 150 151 /* Major revision number of the ARM Architecture implemented by the target. */ 152 extern unsigned aarch64_architecture_version; 153 154 /* Instruction tuning/selection flags. */ 155 156 /* Bit values used to identify processor capabilities. */ 157 #define AARCH64_FL_SIMD (1 << 0) /* Has SIMD instructions. */ 158 #define AARCH64_FL_FP (1 << 1) /* Has FP. */ 159 #define AARCH64_FL_CRYPTO (1 << 2) /* Has crypto. */ 160 #define AARCH64_FL_CRC (1 << 3) /* Has CRC. */ 161 /* ARMv8.1-A architecture extensions. */ 162 #define AARCH64_FL_LSE (1 << 4) /* Has Large System Extensions. */ 163 #define AARCH64_FL_RDMA (1 << 5) /* Has Round Double Multiply Add. */ 164 #define AARCH64_FL_V8_1 (1 << 6) /* Has ARMv8.1-A extensions. */ 165 /* Armv8-R. */ 166 #define AARCH64_FL_V8_R (1 << 7) /* Armv8-R AArch64. */ 167 /* ARMv8.2-A architecture extensions. */ 168 #define AARCH64_FL_V8_2 (1 << 8) /* Has ARMv8.2-A features. */ 169 #define AARCH64_FL_F16 (1 << 9) /* Has ARMv8.2-A FP16 extensions. */ 170 #define AARCH64_FL_SVE (1 << 10) /* Has Scalable Vector Extensions. */ 171 /* ARMv8.3-A architecture extensions. */ 172 #define AARCH64_FL_V8_3 (1 << 11) /* Has ARMv8.3-A features. */ 173 #define AARCH64_FL_RCPC (1 << 12) /* Has support for RCpc model. */ 174 #define AARCH64_FL_DOTPROD (1 << 13) /* Has ARMv8.2-A Dot Product ins. */ 175 /* New flags to split crypto into aes and sha2. */ 176 #define AARCH64_FL_AES (1 << 14) /* Has Crypto AES. */ 177 #define AARCH64_FL_SHA2 (1 << 15) /* Has Crypto SHA2. */ 178 /* ARMv8.4-A architecture extensions. */ 179 #define AARCH64_FL_V8_4 (1 << 16) /* Has ARMv8.4-A features. */ 180 #define AARCH64_FL_SM4 (1 << 17) /* Has ARMv8.4-A SM3 and SM4. */ 181 #define AARCH64_FL_SHA3 (1 << 18) /* Has ARMv8.4-a SHA3 and SHA512. */ 182 #define AARCH64_FL_F16FML (1 << 19) /* Has ARMv8.4-a FP16 extensions. */ 183 #define AARCH64_FL_RCPC8_4 (1 << 20) /* Has ARMv8.4-a RCPC extensions. */ 184 185 /* Statistical Profiling extensions. */ 186 #define AARCH64_FL_PROFILE (1 << 21) 187 188 /* ARMv8.5-A architecture extensions. */ 189 #define AARCH64_FL_V8_5 (1 << 22) /* Has ARMv8.5-A features. */ 190 #define AARCH64_FL_RNG (1 << 23) /* ARMv8.5-A Random Number Insns. */ 191 #define AARCH64_FL_MEMTAG (1 << 24) /* ARMv8.5-A Memory Tagging 192 Extensions. */ 193 194 /* Speculation Barrier instruction supported. */ 195 #define AARCH64_FL_SB (1 << 25) 196 197 /* Speculative Store Bypass Safe instruction supported. */ 198 #define AARCH64_FL_SSBS (1 << 26) 199 200 /* Execution and Data Prediction Restriction instructions supported. */ 201 #define AARCH64_FL_PREDRES (1 << 27) 202 203 /* SVE2 instruction supported. */ 204 #define AARCH64_FL_SVE2 (1 << 28) 205 #define AARCH64_FL_SVE2_AES (1 << 29) 206 #define AARCH64_FL_SVE2_SM4 (1 << 30) 207 #define AARCH64_FL_SVE2_SHA3 (1ULL << 31) 208 #define AARCH64_FL_SVE2_BITPERM (1ULL << 32) 209 210 /* Transactional Memory Extension. */ 211 #define AARCH64_FL_TME (1ULL << 33) /* Has TME instructions. */ 212 213 /* Armv8.6-A architecture extensions. */ 214 #define AARCH64_FL_V8_6 (1ULL << 34) 215 216 /* 8-bit Integer Matrix Multiply (I8MM) extensions. */ 217 #define AARCH64_FL_I8MM (1ULL << 35) 218 219 /* Brain half-precision floating-point (BFloat16) Extension. */ 220 #define AARCH64_FL_BF16 (1ULL << 36) 221 222 /* 32-bit Floating-point Matrix Multiply (F32MM) extensions. */ 223 #define AARCH64_FL_F32MM (1ULL << 37) 224 225 /* 64-bit Floating-point Matrix Multiply (F64MM) extensions. */ 226 #define AARCH64_FL_F64MM (1ULL << 38) 227 228 /* Flag Manipulation Instructions (FLAGM) extension. */ 229 #define AARCH64_FL_FLAGM (1ULL << 39) 230 231 /* Pointer Authentication (PAUTH) extension. */ 232 #define AARCH64_FL_PAUTH (1ULL << 40) 233 234 /* Has FP and SIMD. */ 235 #define AARCH64_FL_FPSIMD (AARCH64_FL_FP | AARCH64_FL_SIMD) 236 237 /* Has FP without SIMD. */ 238 #define AARCH64_FL_FPQ16 (AARCH64_FL_FP & ~AARCH64_FL_SIMD) 239 240 /* Architecture flags that effect instruction selection. */ 241 #define AARCH64_FL_FOR_ARCH8 (AARCH64_FL_FPSIMD) 242 #define AARCH64_FL_FOR_ARCH8_1 \ 243 (AARCH64_FL_FOR_ARCH8 | AARCH64_FL_LSE | AARCH64_FL_CRC \ 244 | AARCH64_FL_RDMA | AARCH64_FL_V8_1) 245 #define AARCH64_FL_FOR_ARCH8_2 \ 246 (AARCH64_FL_FOR_ARCH8_1 | AARCH64_FL_V8_2) 247 #define AARCH64_FL_FOR_ARCH8_3 \ 248 (AARCH64_FL_FOR_ARCH8_2 | AARCH64_FL_V8_3 | AARCH64_FL_PAUTH) 249 #define AARCH64_FL_FOR_ARCH8_4 \ 250 (AARCH64_FL_FOR_ARCH8_3 | AARCH64_FL_V8_4 | AARCH64_FL_F16FML \ 251 | AARCH64_FL_DOTPROD | AARCH64_FL_RCPC8_4 | AARCH64_FL_FLAGM) 252 #define AARCH64_FL_FOR_ARCH8_5 \ 253 (AARCH64_FL_FOR_ARCH8_4 | AARCH64_FL_V8_5 \ 254 | AARCH64_FL_SB | AARCH64_FL_SSBS | AARCH64_FL_PREDRES) 255 #define AARCH64_FL_FOR_ARCH8_6 \ 256 (AARCH64_FL_FOR_ARCH8_5 | AARCH64_FL_V8_6 | AARCH64_FL_FPSIMD \ 257 | AARCH64_FL_I8MM | AARCH64_FL_BF16) 258 #define AARCH64_FL_FOR_ARCH8_R \ 259 (AARCH64_FL_FOR_ARCH8_4 | AARCH64_FL_V8_R) 260 261 /* Macros to test ISA flags. */ 262 263 #define AARCH64_ISA_CRC (aarch64_isa_flags & AARCH64_FL_CRC) 264 #define AARCH64_ISA_CRYPTO (aarch64_isa_flags & AARCH64_FL_CRYPTO) 265 #define AARCH64_ISA_FP (aarch64_isa_flags & AARCH64_FL_FP) 266 #define AARCH64_ISA_SIMD (aarch64_isa_flags & AARCH64_FL_SIMD) 267 #define AARCH64_ISA_LSE (aarch64_isa_flags & AARCH64_FL_LSE) 268 #define AARCH64_ISA_RDMA (aarch64_isa_flags & AARCH64_FL_RDMA) 269 #define AARCH64_ISA_V8_2 (aarch64_isa_flags & AARCH64_FL_V8_2) 270 #define AARCH64_ISA_F16 (aarch64_isa_flags & AARCH64_FL_F16) 271 #define AARCH64_ISA_SVE (aarch64_isa_flags & AARCH64_FL_SVE) 272 #define AARCH64_ISA_SVE2 (aarch64_isa_flags & AARCH64_FL_SVE2) 273 #define AARCH64_ISA_SVE2_AES (aarch64_isa_flags & AARCH64_FL_SVE2_AES) 274 #define AARCH64_ISA_SVE2_BITPERM (aarch64_isa_flags & AARCH64_FL_SVE2_BITPERM) 275 #define AARCH64_ISA_SVE2_SHA3 (aarch64_isa_flags & AARCH64_FL_SVE2_SHA3) 276 #define AARCH64_ISA_SVE2_SM4 (aarch64_isa_flags & AARCH64_FL_SVE2_SM4) 277 #define AARCH64_ISA_V8_3 (aarch64_isa_flags & AARCH64_FL_V8_3) 278 #define AARCH64_ISA_DOTPROD (aarch64_isa_flags & AARCH64_FL_DOTPROD) 279 #define AARCH64_ISA_AES (aarch64_isa_flags & AARCH64_FL_AES) 280 #define AARCH64_ISA_SHA2 (aarch64_isa_flags & AARCH64_FL_SHA2) 281 #define AARCH64_ISA_V8_4 (aarch64_isa_flags & AARCH64_FL_V8_4) 282 #define AARCH64_ISA_SM4 (aarch64_isa_flags & AARCH64_FL_SM4) 283 #define AARCH64_ISA_SHA3 (aarch64_isa_flags & AARCH64_FL_SHA3) 284 #define AARCH64_ISA_F16FML (aarch64_isa_flags & AARCH64_FL_F16FML) 285 #define AARCH64_ISA_RCPC8_4 (aarch64_isa_flags & AARCH64_FL_RCPC8_4) 286 #define AARCH64_ISA_RNG (aarch64_isa_flags & AARCH64_FL_RNG) 287 #define AARCH64_ISA_V8_5 (aarch64_isa_flags & AARCH64_FL_V8_5) 288 #define AARCH64_ISA_TME (aarch64_isa_flags & AARCH64_FL_TME) 289 #define AARCH64_ISA_MEMTAG (aarch64_isa_flags & AARCH64_FL_MEMTAG) 290 #define AARCH64_ISA_V8_6 (aarch64_isa_flags & AARCH64_FL_V8_6) 291 #define AARCH64_ISA_I8MM (aarch64_isa_flags & AARCH64_FL_I8MM) 292 #define AARCH64_ISA_F32MM (aarch64_isa_flags & AARCH64_FL_F32MM) 293 #define AARCH64_ISA_F64MM (aarch64_isa_flags & AARCH64_FL_F64MM) 294 #define AARCH64_ISA_BF16 (aarch64_isa_flags & AARCH64_FL_BF16) 295 #define AARCH64_ISA_SB (aarch64_isa_flags & AARCH64_FL_SB) 296 #define AARCH64_ISA_V8_R (aarch64_isa_flags & AARCH64_FL_V8_R) 297 #define AARCH64_ISA_PAUTH (aarch64_isa_flags & AARCH64_FL_PAUTH) 298 299 /* Crypto is an optional extension to AdvSIMD. */ 300 #define TARGET_CRYPTO (TARGET_SIMD && AARCH64_ISA_CRYPTO) 301 302 /* SHA2 is an optional extension to AdvSIMD. */ 303 #define TARGET_SHA2 ((TARGET_SIMD && AARCH64_ISA_SHA2) || TARGET_CRYPTO) 304 305 /* SHA3 is an optional extension to AdvSIMD. */ 306 #define TARGET_SHA3 (TARGET_SIMD && AARCH64_ISA_SHA3) 307 308 /* AES is an optional extension to AdvSIMD. */ 309 #define TARGET_AES ((TARGET_SIMD && AARCH64_ISA_AES) || TARGET_CRYPTO) 310 311 /* SM is an optional extension to AdvSIMD. */ 312 #define TARGET_SM4 (TARGET_SIMD && AARCH64_ISA_SM4) 313 314 /* FP16FML is an optional extension to AdvSIMD. */ 315 #define TARGET_F16FML (TARGET_SIMD && AARCH64_ISA_F16FML && TARGET_FP_F16INST) 316 317 /* CRC instructions that can be enabled through +crc arch extension. */ 318 #define TARGET_CRC32 (AARCH64_ISA_CRC) 319 320 /* Atomic instructions that can be enabled through the +lse extension. */ 321 #define TARGET_LSE (AARCH64_ISA_LSE) 322 323 /* ARMv8.2-A FP16 support that can be enabled through the +fp16 extension. */ 324 #define TARGET_FP_F16INST (TARGET_FLOAT && AARCH64_ISA_F16) 325 #define TARGET_SIMD_F16INST (TARGET_SIMD && AARCH64_ISA_F16) 326 327 /* Dot Product is an optional extension to AdvSIMD enabled through +dotprod. */ 328 #define TARGET_DOTPROD (TARGET_SIMD && AARCH64_ISA_DOTPROD) 329 330 /* SVE instructions, enabled through +sve. */ 331 #define TARGET_SVE (!TARGET_GENERAL_REGS_ONLY && AARCH64_ISA_SVE) 332 333 /* SVE2 instructions, enabled through +sve2. */ 334 #define TARGET_SVE2 (TARGET_SVE && AARCH64_ISA_SVE2) 335 336 /* SVE2 AES instructions, enabled through +sve2-aes. */ 337 #define TARGET_SVE2_AES (TARGET_SVE2 && AARCH64_ISA_SVE2_AES) 338 339 /* SVE2 BITPERM instructions, enabled through +sve2-bitperm. */ 340 #define TARGET_SVE2_BITPERM (TARGET_SVE2 && AARCH64_ISA_SVE2_BITPERM) 341 342 /* SVE2 SHA3 instructions, enabled through +sve2-sha3. */ 343 #define TARGET_SVE2_SHA3 (TARGET_SVE2 && AARCH64_ISA_SVE2_SHA3) 344 345 /* SVE2 SM4 instructions, enabled through +sve2-sm4. */ 346 #define TARGET_SVE2_SM4 (TARGET_SVE2 && AARCH64_ISA_SVE2_SM4) 347 348 /* ARMv8.3-A features. */ 349 #define TARGET_ARMV8_3 (AARCH64_ISA_V8_3) 350 351 /* Javascript conversion instruction from Armv8.3-a. */ 352 #define TARGET_JSCVT (TARGET_FLOAT && AARCH64_ISA_V8_3) 353 354 /* Armv8.3-a Complex number extension to AdvSIMD extensions. */ 355 #define TARGET_COMPLEX (TARGET_SIMD && TARGET_ARMV8_3) 356 357 /* Floating-point rounding instructions from Armv8.5-a. */ 358 #define TARGET_FRINT (AARCH64_ISA_V8_5 && TARGET_FLOAT) 359 360 /* TME instructions are enabled. */ 361 #define TARGET_TME (AARCH64_ISA_TME) 362 363 /* Random number instructions from Armv8.5-a. */ 364 #define TARGET_RNG (AARCH64_ISA_RNG) 365 366 /* Memory Tagging instructions optional to Armv8.5 enabled through +memtag. */ 367 #define TARGET_MEMTAG (AARCH64_ISA_V8_5 && AARCH64_ISA_MEMTAG) 368 369 /* I8MM instructions are enabled through +i8mm. */ 370 #define TARGET_I8MM (AARCH64_ISA_I8MM) 371 #define TARGET_SVE_I8MM (TARGET_SVE && AARCH64_ISA_I8MM) 372 373 /* F32MM instructions are enabled through +f32mm. */ 374 #define TARGET_F32MM (AARCH64_ISA_F32MM) 375 #define TARGET_SVE_F32MM (TARGET_SVE && AARCH64_ISA_F32MM) 376 377 /* F64MM instructions are enabled through +f64mm. */ 378 #define TARGET_F64MM (AARCH64_ISA_F64MM) 379 #define TARGET_SVE_F64MM (TARGET_SVE && AARCH64_ISA_F64MM) 380 381 /* BF16 instructions are enabled through +bf16. */ 382 #define TARGET_BF16_FP (AARCH64_ISA_BF16) 383 #define TARGET_BF16_SIMD (AARCH64_ISA_BF16 && TARGET_SIMD) 384 #define TARGET_SVE_BF16 (TARGET_SVE && AARCH64_ISA_BF16) 385 386 /* PAUTH instructions are enabled through +pauth. */ 387 #define TARGET_PAUTH (AARCH64_ISA_PAUTH) 388 389 /* Make sure this is always defined so we don't have to check for ifdefs 390 but rather use normal ifs. */ 391 #ifndef TARGET_FIX_ERR_A53_835769_DEFAULT 392 #define TARGET_FIX_ERR_A53_835769_DEFAULT 0 393 #else 394 #undef TARGET_FIX_ERR_A53_835769_DEFAULT 395 #define TARGET_FIX_ERR_A53_835769_DEFAULT 1 396 #endif 397 398 /* SB instruction is enabled through +sb. */ 399 #define TARGET_SB (AARCH64_ISA_SB) 400 401 /* Apply the workaround for Cortex-A53 erratum 835769. */ 402 #define TARGET_FIX_ERR_A53_835769 \ 403 ((aarch64_fix_a53_err835769 == 2) \ 404 ? TARGET_FIX_ERR_A53_835769_DEFAULT : aarch64_fix_a53_err835769) 405 406 /* Make sure this is always defined so we don't have to check for ifdefs 407 but rather use normal ifs. */ 408 #ifndef TARGET_FIX_ERR_A53_843419_DEFAULT 409 #define TARGET_FIX_ERR_A53_843419_DEFAULT 0 410 #else 411 #undef TARGET_FIX_ERR_A53_843419_DEFAULT 412 #define TARGET_FIX_ERR_A53_843419_DEFAULT 1 413 #endif 414 415 /* Apply the workaround for Cortex-A53 erratum 843419. */ 416 #define TARGET_FIX_ERR_A53_843419 \ 417 ((aarch64_fix_a53_err843419 == 2) \ 418 ? TARGET_FIX_ERR_A53_843419_DEFAULT : aarch64_fix_a53_err843419) 419 420 /* ARMv8.1-A Adv.SIMD support. */ 421 #define TARGET_SIMD_RDMA (TARGET_SIMD && AARCH64_ISA_RDMA) 422 423 /* Standard register usage. */ 424 425 /* 31 64-bit general purpose registers R0-R30: 426 R30 LR (link register) 427 R29 FP (frame pointer) 428 R19-R28 Callee-saved registers 429 R18 The platform register; use as temporary register. 430 R17 IP1 The second intra-procedure-call temporary register 431 (can be used by call veneers and PLT code); otherwise use 432 as a temporary register 433 R16 IP0 The first intra-procedure-call temporary register (can 434 be used by call veneers and PLT code); otherwise use as a 435 temporary register 436 R9-R15 Temporary registers 437 R8 Structure value parameter / temporary register 438 R0-R7 Parameter/result registers 439 440 SP stack pointer, encoded as X/R31 where permitted. 441 ZR zero register, encoded as X/R31 elsewhere 442 443 32 x 128-bit floating-point/vector registers 444 V16-V31 Caller-saved (temporary) registers 445 V8-V15 Callee-saved registers 446 V0-V7 Parameter/result registers 447 448 The vector register V0 holds scalar B0, H0, S0 and D0 in its least 449 significant bits. Unlike AArch32 S1 is not packed into D0, etc. 450 451 P0-P7 Predicate low registers: valid in all predicate contexts 452 P8-P15 Predicate high registers: used as scratch space 453 454 FFR First Fault Register, a fixed-use SVE predicate register 455 FFRT FFR token: a fake register used for modelling dependencies 456 457 VG Pseudo "vector granules" register 458 459 VG is the number of 64-bit elements in an SVE vector. We define 460 it as a hard register so that we can easily map it to the DWARF VG 461 register. GCC internally uses the poly_int variable aarch64_sve_vg 462 instead. */ 463 464 #define FIXED_REGISTERS \ 465 { \ 466 0, 0, 0, 0, 0, 0, 0, 0, /* R0 - R7 */ \ 467 0, 0, 0, 0, 0, 0, 0, 0, /* R8 - R15 */ \ 468 0, 0, 0, 0, 0, 0, 0, 0, /* R16 - R23 */ \ 469 0, 0, 0, 0, 0, 1, 0, 1, /* R24 - R30, SP */ \ 470 0, 0, 0, 0, 0, 0, 0, 0, /* V0 - V7 */ \ 471 0, 0, 0, 0, 0, 0, 0, 0, /* V8 - V15 */ \ 472 0, 0, 0, 0, 0, 0, 0, 0, /* V16 - V23 */ \ 473 0, 0, 0, 0, 0, 0, 0, 0, /* V24 - V31 */ \ 474 1, 1, 1, 1, /* SFP, AP, CC, VG */ \ 475 0, 0, 0, 0, 0, 0, 0, 0, /* P0 - P7 */ \ 476 0, 0, 0, 0, 0, 0, 0, 0, /* P8 - P15 */ \ 477 1, 1 /* FFR and FFRT */ \ 478 } 479 480 /* X30 is marked as caller-saved which is in line with regular function call 481 behavior since the call instructions clobber it; AARCH64_EXPAND_CALL does 482 that for regular function calls and avoids it for sibcalls. X30 is 483 considered live for sibcalls; EPILOGUE_USES helps achieve that by returning 484 true but not until function epilogues have been generated. This ensures 485 that X30 is available for use in leaf functions if needed. */ 486 487 #define CALL_USED_REGISTERS \ 488 { \ 489 1, 1, 1, 1, 1, 1, 1, 1, /* R0 - R7 */ \ 490 1, 1, 1, 1, 1, 1, 1, 1, /* R8 - R15 */ \ 491 1, 1, 1, 0, 0, 0, 0, 0, /* R16 - R23 */ \ 492 0, 0, 0, 0, 0, 1, 1, 1, /* R24 - R30, SP */ \ 493 1, 1, 1, 1, 1, 1, 1, 1, /* V0 - V7 */ \ 494 0, 0, 0, 0, 0, 0, 0, 0, /* V8 - V15 */ \ 495 1, 1, 1, 1, 1, 1, 1, 1, /* V16 - V23 */ \ 496 1, 1, 1, 1, 1, 1, 1, 1, /* V24 - V31 */ \ 497 1, 1, 1, 1, /* SFP, AP, CC, VG */ \ 498 1, 1, 1, 1, 1, 1, 1, 1, /* P0 - P7 */ \ 499 1, 1, 1, 1, 1, 1, 1, 1, /* P8 - P15 */ \ 500 1, 1 /* FFR and FFRT */ \ 501 } 502 503 #define REGISTER_NAMES \ 504 { \ 505 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", \ 506 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", \ 507 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", \ 508 "x24", "x25", "x26", "x27", "x28", "x29", "x30", "sp", \ 509 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", \ 510 "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", \ 511 "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", \ 512 "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", \ 513 "sfp", "ap", "cc", "vg", \ 514 "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", \ 515 "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", \ 516 "ffr", "ffrt" \ 517 } 518 519 /* Generate the register aliases for core register N */ 520 #define R_ALIASES(N) {"r" # N, R0_REGNUM + (N)}, \ 521 {"w" # N, R0_REGNUM + (N)} 522 523 #define V_ALIASES(N) {"q" # N, V0_REGNUM + (N)}, \ 524 {"d" # N, V0_REGNUM + (N)}, \ 525 {"s" # N, V0_REGNUM + (N)}, \ 526 {"h" # N, V0_REGNUM + (N)}, \ 527 {"b" # N, V0_REGNUM + (N)}, \ 528 {"z" # N, V0_REGNUM + (N)} 529 530 /* Provide aliases for all of the ISA defined register name forms. 531 These aliases are convenient for use in the clobber lists of inline 532 asm statements. */ 533 534 #define ADDITIONAL_REGISTER_NAMES \ 535 { R_ALIASES(0), R_ALIASES(1), R_ALIASES(2), R_ALIASES(3), \ 536 R_ALIASES(4), R_ALIASES(5), R_ALIASES(6), R_ALIASES(7), \ 537 R_ALIASES(8), R_ALIASES(9), R_ALIASES(10), R_ALIASES(11), \ 538 R_ALIASES(12), R_ALIASES(13), R_ALIASES(14), R_ALIASES(15), \ 539 R_ALIASES(16), R_ALIASES(17), R_ALIASES(18), R_ALIASES(19), \ 540 R_ALIASES(20), R_ALIASES(21), R_ALIASES(22), R_ALIASES(23), \ 541 R_ALIASES(24), R_ALIASES(25), R_ALIASES(26), R_ALIASES(27), \ 542 R_ALIASES(28), R_ALIASES(29), R_ALIASES(30), {"wsp", R0_REGNUM + 31}, \ 543 V_ALIASES(0), V_ALIASES(1), V_ALIASES(2), V_ALIASES(3), \ 544 V_ALIASES(4), V_ALIASES(5), V_ALIASES(6), V_ALIASES(7), \ 545 V_ALIASES(8), V_ALIASES(9), V_ALIASES(10), V_ALIASES(11), \ 546 V_ALIASES(12), V_ALIASES(13), V_ALIASES(14), V_ALIASES(15), \ 547 V_ALIASES(16), V_ALIASES(17), V_ALIASES(18), V_ALIASES(19), \ 548 V_ALIASES(20), V_ALIASES(21), V_ALIASES(22), V_ALIASES(23), \ 549 V_ALIASES(24), V_ALIASES(25), V_ALIASES(26), V_ALIASES(27), \ 550 V_ALIASES(28), V_ALIASES(29), V_ALIASES(30), V_ALIASES(31) \ 551 } 552 553 #define EPILOGUE_USES(REGNO) (aarch64_epilogue_uses (REGNO)) 554 555 /* EXIT_IGNORE_STACK should be nonzero if, when returning from a function, 556 the stack pointer does not matter. This is only true if the function 557 uses alloca. */ 558 #define EXIT_IGNORE_STACK (cfun->calls_alloca) 559 560 #define STATIC_CHAIN_REGNUM R18_REGNUM 561 #define HARD_FRAME_POINTER_REGNUM R29_REGNUM 562 #define FRAME_POINTER_REGNUM SFP_REGNUM 563 #define STACK_POINTER_REGNUM SP_REGNUM 564 #define ARG_POINTER_REGNUM AP_REGNUM 565 #define FIRST_PSEUDO_REGISTER (FFRT_REGNUM + 1) 566 567 /* The number of argument registers available for each class. */ 568 #define NUM_ARG_REGS 8 569 #define NUM_FP_ARG_REGS 8 570 #define NUM_PR_ARG_REGS 4 571 572 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most 573 four members. */ 574 #define HA_MAX_NUM_FLDS 4 575 576 /* External dwarf register number scheme. These number are used to 577 identify registers in dwarf debug information, the values are 578 defined by the AArch64 ABI. The numbering scheme is independent of 579 GCC's internal register numbering scheme. */ 580 581 #define AARCH64_DWARF_R0 0 582 583 /* The number of R registers, note 31! not 32. */ 584 #define AARCH64_DWARF_NUMBER_R 31 585 586 #define AARCH64_DWARF_SP 31 587 #define AARCH64_DWARF_VG 46 588 #define AARCH64_DWARF_P0 48 589 #define AARCH64_DWARF_V0 64 590 591 /* The number of V registers. */ 592 #define AARCH64_DWARF_NUMBER_V 32 593 594 /* For signal frames we need to use an alternative return column. This 595 value must not correspond to a hard register and must be out of the 596 range of DWARF_FRAME_REGNUM(). */ 597 #define DWARF_ALT_FRAME_RETURN_COLUMN \ 598 (AARCH64_DWARF_V0 + AARCH64_DWARF_NUMBER_V) 599 600 /* We add 1 extra frame register for use as the 601 DWARF_ALT_FRAME_RETURN_COLUMN. */ 602 #define DWARF_FRAME_REGISTERS (DWARF_ALT_FRAME_RETURN_COLUMN + 1) 603 604 605 #define DBX_REGISTER_NUMBER(REGNO) aarch64_dbx_register_number (REGNO) 606 /* Provide a definition of DWARF_FRAME_REGNUM here so that fallback unwinders 607 can use DWARF_ALT_FRAME_RETURN_COLUMN defined below. This is just the same 608 as the default definition in dwarf2out.c. */ 609 #undef DWARF_FRAME_REGNUM 610 #define DWARF_FRAME_REGNUM(REGNO) DBX_REGISTER_NUMBER (REGNO) 611 612 #define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (LR_REGNUM) 613 614 #define DWARF2_UNWIND_INFO 1 615 616 /* Use R0 through R3 to pass exception handling information. */ 617 #define EH_RETURN_DATA_REGNO(N) \ 618 ((N) < 4 ? ((unsigned int) R0_REGNUM + (N)) : INVALID_REGNUM) 619 620 /* Select a format to encode pointers in exception handling data. */ 621 #define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \ 622 aarch64_asm_preferred_eh_data_format ((CODE), (GLOBAL)) 623 624 /* Output the assembly strings we want to add to a function definition. */ 625 #define ASM_DECLARE_FUNCTION_NAME(STR, NAME, DECL) \ 626 aarch64_declare_function_name (STR, NAME, DECL) 627 628 /* Output assembly strings for alias definition. */ 629 #define ASM_OUTPUT_DEF_FROM_DECLS(STR, DECL, TARGET) \ 630 aarch64_asm_output_alias (STR, DECL, TARGET) 631 632 /* Output assembly strings for undefined extern symbols. */ 633 #undef ASM_OUTPUT_EXTERNAL 634 #define ASM_OUTPUT_EXTERNAL(STR, DECL, NAME) \ 635 aarch64_asm_output_external (STR, DECL, NAME) 636 637 /* Output assembly strings after .cfi_startproc is emitted. */ 638 #define ASM_POST_CFI_STARTPROC aarch64_post_cfi_startproc 639 640 /* For EH returns X4 contains the stack adjustment. */ 641 #define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, R4_REGNUM) 642 #define EH_RETURN_HANDLER_RTX aarch64_eh_return_handler_rtx () 643 644 /* Don't use __builtin_setjmp until we've defined it. */ 645 #undef DONT_USE_BUILTIN_SETJMP 646 #define DONT_USE_BUILTIN_SETJMP 1 647 648 #undef TARGET_COMPUTE_FRAME_LAYOUT 649 #define TARGET_COMPUTE_FRAME_LAYOUT aarch64_layout_frame 650 651 /* Register in which the structure value is to be returned. */ 652 #define AARCH64_STRUCT_VALUE_REGNUM R8_REGNUM 653 654 /* Non-zero if REGNO is part of the Core register set. 655 656 The rather unusual way of expressing this check is to avoid 657 warnings when building the compiler when R0_REGNUM is 0 and REGNO 658 is unsigned. */ 659 #define GP_REGNUM_P(REGNO) \ 660 (((unsigned) (REGNO - R0_REGNUM)) <= (R30_REGNUM - R0_REGNUM)) 661 662 /* Registers known to be preserved over a BL instruction. This consists of the 663 GENERAL_REGS without x16, x17, and x30. The x30 register is changed by the 664 BL instruction itself, while the x16 and x17 registers may be used by 665 veneers which can be inserted by the linker. */ 666 #define STUB_REGNUM_P(REGNO) \ 667 (GP_REGNUM_P (REGNO) \ 668 && (REGNO) != R16_REGNUM \ 669 && (REGNO) != R17_REGNUM \ 670 && (REGNO) != R30_REGNUM) \ 671 672 #define FP_REGNUM_P(REGNO) \ 673 (((unsigned) (REGNO - V0_REGNUM)) <= (V31_REGNUM - V0_REGNUM)) 674 675 #define FP_LO_REGNUM_P(REGNO) \ 676 (((unsigned) (REGNO - V0_REGNUM)) <= (V15_REGNUM - V0_REGNUM)) 677 678 #define FP_LO8_REGNUM_P(REGNO) \ 679 (((unsigned) (REGNO - V0_REGNUM)) <= (V7_REGNUM - V0_REGNUM)) 680 681 #define PR_REGNUM_P(REGNO)\ 682 (((unsigned) (REGNO - P0_REGNUM)) <= (P15_REGNUM - P0_REGNUM)) 683 684 #define PR_LO_REGNUM_P(REGNO)\ 685 (((unsigned) (REGNO - P0_REGNUM)) <= (P7_REGNUM - P0_REGNUM)) 686 687 #define FP_SIMD_SAVED_REGNUM_P(REGNO) \ 688 (((unsigned) (REGNO - V8_REGNUM)) <= (V23_REGNUM - V8_REGNUM)) 689 690 /* Register and constant classes. */ 691 692 enum reg_class 693 { 694 NO_REGS, 695 TAILCALL_ADDR_REGS, 696 STUB_REGS, 697 GENERAL_REGS, 698 STACK_REG, 699 POINTER_REGS, 700 FP_LO8_REGS, 701 FP_LO_REGS, 702 FP_REGS, 703 POINTER_AND_FP_REGS, 704 PR_LO_REGS, 705 PR_HI_REGS, 706 PR_REGS, 707 FFR_REGS, 708 PR_AND_FFR_REGS, 709 ALL_REGS, 710 LIM_REG_CLASSES /* Last */ 711 }; 712 713 #define N_REG_CLASSES ((int) LIM_REG_CLASSES) 714 715 #define REG_CLASS_NAMES \ 716 { \ 717 "NO_REGS", \ 718 "TAILCALL_ADDR_REGS", \ 719 "STUB_REGS", \ 720 "GENERAL_REGS", \ 721 "STACK_REG", \ 722 "POINTER_REGS", \ 723 "FP_LO8_REGS", \ 724 "FP_LO_REGS", \ 725 "FP_REGS", \ 726 "POINTER_AND_FP_REGS", \ 727 "PR_LO_REGS", \ 728 "PR_HI_REGS", \ 729 "PR_REGS", \ 730 "FFR_REGS", \ 731 "PR_AND_FFR_REGS", \ 732 "ALL_REGS" \ 733 } 734 735 #define REG_CLASS_CONTENTS \ 736 { \ 737 { 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \ 738 { 0x00030000, 0x00000000, 0x00000000 }, /* TAILCALL_ADDR_REGS */\ 739 { 0x3ffcffff, 0x00000000, 0x00000000 }, /* STUB_REGS */ \ 740 { 0x7fffffff, 0x00000000, 0x00000003 }, /* GENERAL_REGS */ \ 741 { 0x80000000, 0x00000000, 0x00000000 }, /* STACK_REG */ \ 742 { 0xffffffff, 0x00000000, 0x00000003 }, /* POINTER_REGS */ \ 743 { 0x00000000, 0x000000ff, 0x00000000 }, /* FP_LO8_REGS */ \ 744 { 0x00000000, 0x0000ffff, 0x00000000 }, /* FP_LO_REGS */ \ 745 { 0x00000000, 0xffffffff, 0x00000000 }, /* FP_REGS */ \ 746 { 0xffffffff, 0xffffffff, 0x00000003 }, /* POINTER_AND_FP_REGS */\ 747 { 0x00000000, 0x00000000, 0x00000ff0 }, /* PR_LO_REGS */ \ 748 { 0x00000000, 0x00000000, 0x000ff000 }, /* PR_HI_REGS */ \ 749 { 0x00000000, 0x00000000, 0x000ffff0 }, /* PR_REGS */ \ 750 { 0x00000000, 0x00000000, 0x00300000 }, /* FFR_REGS */ \ 751 { 0x00000000, 0x00000000, 0x003ffff0 }, /* PR_AND_FFR_REGS */ \ 752 { 0xffffffff, 0xffffffff, 0x000fffff } /* ALL_REGS */ \ 753 } 754 755 #define REGNO_REG_CLASS(REGNO) aarch64_regno_regclass (REGNO) 756 757 #define INDEX_REG_CLASS GENERAL_REGS 758 #define BASE_REG_CLASS POINTER_REGS 759 760 /* Register pairs used to eliminate unneeded registers that point into 761 the stack frame. */ 762 #define ELIMINABLE_REGS \ 763 { \ 764 { ARG_POINTER_REGNUM, STACK_POINTER_REGNUM }, \ 765 { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM }, \ 766 { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM }, \ 767 { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM }, \ 768 } 769 770 #define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \ 771 (OFFSET) = aarch64_initial_elimination_offset (FROM, TO) 772 773 /* CPU/ARCH option handling. */ 774 #include "config/aarch64/aarch64-opts.h" 775 776 enum target_cpus 777 { 778 #define AARCH64_CORE(NAME, INTERNAL_IDENT, SCHED, ARCH, FLAGS, COSTS, IMP, PART, VARIANT) \ 779 TARGET_CPU_##INTERNAL_IDENT, 780 #include "aarch64-cores.def" 781 TARGET_CPU_generic 782 }; 783 784 /* If there is no CPU defined at configure, use generic as default. */ 785 #ifndef TARGET_CPU_DEFAULT 786 #define TARGET_CPU_DEFAULT \ 787 (TARGET_CPU_generic | (AARCH64_CPU_DEFAULT_FLAGS << 6)) 788 #endif 789 790 /* If inserting NOP before a mult-accumulate insn remember to adjust the 791 length so that conditional branching code is updated appropriately. */ 792 #define ADJUST_INSN_LENGTH(insn, length) \ 793 do \ 794 { \ 795 if (aarch64_madd_needs_nop (insn)) \ 796 length += 4; \ 797 } while (0) 798 799 #define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \ 800 aarch64_final_prescan_insn (INSN); \ 801 802 /* The processor for which instructions should be scheduled. */ 803 extern enum aarch64_processor aarch64_tune; 804 805 /* RTL generation support. */ 806 #define INIT_EXPANDERS aarch64_init_expanders () 807 808 809 /* Stack layout; function entry, exit and calling. */ 810 #define STACK_GROWS_DOWNWARD 1 811 812 #define FRAME_GROWS_DOWNWARD 1 813 814 #define ACCUMULATE_OUTGOING_ARGS 1 815 816 #define FIRST_PARM_OFFSET(FNDECL) 0 817 818 /* Fix for VFP */ 819 #define LIBCALL_VALUE(MODE) \ 820 gen_rtx_REG (MODE, FLOAT_MODE_P (MODE) ? V0_REGNUM : R0_REGNUM) 821 822 #define DEFAULT_PCC_STRUCT_RETURN 0 823 824 #ifdef HAVE_POLY_INT_H 825 struct GTY (()) aarch64_frame 826 { 827 poly_int64 reg_offset[LAST_SAVED_REGNUM + 1]; 828 829 /* The number of extra stack bytes taken up by register varargs. 830 This area is allocated by the callee at the very top of the 831 frame. This value is rounded up to a multiple of 832 STACK_BOUNDARY. */ 833 HOST_WIDE_INT saved_varargs_size; 834 835 /* The size of the callee-save registers with a slot in REG_OFFSET. */ 836 poly_int64 saved_regs_size; 837 838 /* The size of the callee-save registers with a slot in REG_OFFSET that 839 are saved below the hard frame pointer. */ 840 poly_int64 below_hard_fp_saved_regs_size; 841 842 /* Offset from the base of the frame (incomming SP) to the 843 top of the locals area. This value is always a multiple of 844 STACK_BOUNDARY. */ 845 poly_int64 locals_offset; 846 847 /* Offset from the base of the frame (incomming SP) to the 848 hard_frame_pointer. This value is always a multiple of 849 STACK_BOUNDARY. */ 850 poly_int64 hard_fp_offset; 851 852 /* The size of the frame. This value is the offset from base of the 853 frame (incomming SP) to the stack_pointer. This value is always 854 a multiple of STACK_BOUNDARY. */ 855 poly_int64 frame_size; 856 857 /* The size of the initial stack adjustment before saving callee-saves. */ 858 poly_int64 initial_adjust; 859 860 /* The writeback value when pushing callee-save registers. 861 It is zero when no push is used. */ 862 HOST_WIDE_INT callee_adjust; 863 864 /* The offset from SP to the callee-save registers after initial_adjust. 865 It may be non-zero if no push is used (ie. callee_adjust == 0). */ 866 poly_int64 callee_offset; 867 868 /* The size of the stack adjustment before saving or after restoring 869 SVE registers. */ 870 poly_int64 sve_callee_adjust; 871 872 /* The size of the stack adjustment after saving callee-saves. */ 873 poly_int64 final_adjust; 874 875 /* Store FP,LR and setup a frame pointer. */ 876 bool emit_frame_chain; 877 878 /* In each frame, we can associate up to two register saves with the 879 initial stack allocation. This happens in one of two ways: 880 881 (1) Using an STR or STP with writeback to perform the initial 882 stack allocation. When EMIT_FRAME_CHAIN, the registers will 883 be those needed to create a frame chain. 884 885 Indicated by CALLEE_ADJUST != 0. 886 887 (2) Using a separate STP to set up the frame record, after the 888 initial stack allocation but before setting up the frame pointer. 889 This is used if the offset is too large to use writeback. 890 891 Indicated by CALLEE_ADJUST == 0 && EMIT_FRAME_CHAIN. 892 893 These fields indicate which registers we've decided to handle using 894 (1) or (2), or INVALID_REGNUM if none. */ 895 unsigned wb_candidate1; 896 unsigned wb_candidate2; 897 898 /* Big-endian SVE frames need a spare predicate register in order 899 to save vector registers in the correct layout for unwinding. 900 This is the register they should use. */ 901 unsigned spare_pred_reg; 902 903 bool laid_out; 904 }; 905 906 typedef struct GTY (()) machine_function 907 { 908 struct aarch64_frame frame; 909 /* One entry for each hard register. */ 910 bool reg_is_wrapped_separately[LAST_SAVED_REGNUM]; 911 /* One entry for each general purpose register. */ 912 rtx call_via[SP_REGNUM]; 913 bool label_is_assembled; 914 } machine_function; 915 #endif 916 917 /* Which ABI to use. */ 918 enum aarch64_abi_type 919 { 920 AARCH64_ABI_LP64 = 0, 921 AARCH64_ABI_ILP32 = 1 922 }; 923 924 #ifndef AARCH64_ABI_DEFAULT 925 #define AARCH64_ABI_DEFAULT AARCH64_ABI_LP64 926 #endif 927 928 #define TARGET_ILP32 (aarch64_abi & AARCH64_ABI_ILP32) 929 930 enum arm_pcs 931 { 932 ARM_PCS_AAPCS64, /* Base standard AAPCS for 64 bit. */ 933 ARM_PCS_SIMD, /* For aarch64_vector_pcs functions. */ 934 ARM_PCS_SVE, /* For functions that pass or return 935 values in SVE registers. */ 936 ARM_PCS_TLSDESC, /* For targets of tlsdesc calls. */ 937 ARM_PCS_UNKNOWN 938 }; 939 940 941 942 943 /* We can't use machine_mode inside a generator file because it 944 hasn't been created yet; we shouldn't be using any code that 945 needs the real definition though, so this ought to be safe. */ 946 #ifdef GENERATOR_FILE 947 #define MACHMODE int 948 #else 949 #include "insn-modes.h" 950 #define MACHMODE machine_mode 951 #endif 952 953 #ifndef USED_FOR_TARGET 954 /* AAPCS related state tracking. */ 955 typedef struct 956 { 957 enum arm_pcs pcs_variant; 958 int aapcs_arg_processed; /* No need to lay out this argument again. */ 959 int aapcs_ncrn; /* Next Core register number. */ 960 int aapcs_nextncrn; /* Next next core register number. */ 961 int aapcs_nvrn; /* Next Vector register number. */ 962 int aapcs_nextnvrn; /* Next Next Vector register number. */ 963 int aapcs_nprn; /* Next Predicate register number. */ 964 int aapcs_nextnprn; /* Next Next Predicate register number. */ 965 rtx aapcs_reg; /* Register assigned to this argument. This 966 is NULL_RTX if this parameter goes on 967 the stack. */ 968 MACHMODE aapcs_vfp_rmode; 969 int aapcs_stack_words; /* If the argument is passed on the stack, this 970 is the number of words needed, after rounding 971 up. Only meaningful when 972 aapcs_reg == NULL_RTX. */ 973 int aapcs_stack_size; /* The total size (in words, per 8 byte) of the 974 stack arg area so far. */ 975 bool silent_p; /* True if we should act silently, rather than 976 raise an error for invalid calls. */ 977 } CUMULATIVE_ARGS; 978 #endif 979 980 #define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \ 981 (aarch64_pad_reg_upward (MODE, TYPE, FIRST) ? PAD_UPWARD : PAD_DOWNWARD) 982 983 #define PAD_VARARGS_DOWN 0 984 985 #define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \ 986 aarch64_init_cumulative_args (&(CUM), FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) 987 988 #define FUNCTION_ARG_REGNO_P(REGNO) \ 989 aarch64_function_arg_regno_p(REGNO) 990 991 992 /* ISA Features. */ 993 994 /* Addressing modes, etc. */ 995 #define HAVE_POST_INCREMENT 1 996 #define HAVE_PRE_INCREMENT 1 997 #define HAVE_POST_DECREMENT 1 998 #define HAVE_PRE_DECREMENT 1 999 #define HAVE_POST_MODIFY_DISP 1 1000 #define HAVE_PRE_MODIFY_DISP 1 1001 1002 #define MAX_REGS_PER_ADDRESS 2 1003 1004 #define CONSTANT_ADDRESS_P(X) aarch64_constant_address_p(X) 1005 1006 #define REGNO_OK_FOR_BASE_P(REGNO) \ 1007 aarch64_regno_ok_for_base_p (REGNO, true) 1008 1009 #define REGNO_OK_FOR_INDEX_P(REGNO) \ 1010 aarch64_regno_ok_for_index_p (REGNO, true) 1011 1012 #define LEGITIMATE_PIC_OPERAND_P(X) \ 1013 aarch64_legitimate_pic_operand_p (X) 1014 1015 #define CASE_VECTOR_MODE Pmode 1016 1017 #define DEFAULT_SIGNED_CHAR 0 1018 1019 /* An integer expression for the size in bits of the largest integer machine 1020 mode that should actually be used. We allow pairs of registers. */ 1021 #define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (TImode) 1022 1023 /* Maximum bytes moved by a single instruction (load/store pair). */ 1024 #define MOVE_MAX (UNITS_PER_WORD * 2) 1025 1026 /* The base cost overhead of a memcpy call, for MOVE_RATIO and friends. */ 1027 #define AARCH64_CALL_RATIO 8 1028 1029 /* MOVE_RATIO dictates when we will use the move_by_pieces infrastructure. 1030 move_by_pieces will continually copy the largest safe chunks. So a 1031 7-byte copy is a 4-byte + 2-byte + byte copy. This proves inefficient 1032 for both size and speed of copy, so we will instead use the "cpymem" 1033 standard name to implement the copy. This logic does not apply when 1034 targeting -mstrict-align, so keep a sensible default in that case. */ 1035 #define MOVE_RATIO(speed) \ 1036 (!STRICT_ALIGNMENT ? 2 : (((speed) ? 15 : AARCH64_CALL_RATIO) / 2)) 1037 1038 /* Like MOVE_RATIO, without -mstrict-align, make decisions in "setmem" when 1039 we would use more than 3 scalar instructions. 1040 Otherwise follow a sensible default: when optimizing for size, give a better 1041 estimate of the length of a memset call, but use the default otherwise. */ 1042 #define CLEAR_RATIO(speed) \ 1043 (!STRICT_ALIGNMENT ? 4 : (speed) ? 15 : AARCH64_CALL_RATIO) 1044 1045 /* SET_RATIO is similar to CLEAR_RATIO, but for a non-zero constant. Without 1046 -mstrict-align, make decisions in "setmem". Otherwise follow a sensible 1047 default: when optimizing for size adjust the ratio to account for the 1048 overhead of loading the constant. */ 1049 #define SET_RATIO(speed) \ 1050 (!STRICT_ALIGNMENT ? 0 : (speed) ? 15 : AARCH64_CALL_RATIO - 2) 1051 1052 /* Disable auto-increment in move_by_pieces et al. Use of auto-increment is 1053 rarely a good idea in straight-line code since it adds an extra address 1054 dependency between each instruction. Better to use incrementing offsets. */ 1055 #define USE_LOAD_POST_INCREMENT(MODE) 0 1056 #define USE_LOAD_POST_DECREMENT(MODE) 0 1057 #define USE_LOAD_PRE_INCREMENT(MODE) 0 1058 #define USE_LOAD_PRE_DECREMENT(MODE) 0 1059 #define USE_STORE_POST_INCREMENT(MODE) 0 1060 #define USE_STORE_POST_DECREMENT(MODE) 0 1061 #define USE_STORE_PRE_INCREMENT(MODE) 0 1062 #define USE_STORE_PRE_DECREMENT(MODE) 0 1063 1064 /* WORD_REGISTER_OPERATIONS does not hold for AArch64. 1065 The assigned word_mode is DImode but operations narrower than SImode 1066 behave as 32-bit operations if using the W-form of the registers rather 1067 than as word_mode (64-bit) operations as WORD_REGISTER_OPERATIONS 1068 expects. */ 1069 #define WORD_REGISTER_OPERATIONS 0 1070 1071 /* Define if loading from memory in MODE, an integral mode narrower than 1072 BITS_PER_WORD will either zero-extend or sign-extend. The value of this 1073 macro should be the code that says which one of the two operations is 1074 implicitly done, or UNKNOWN if none. */ 1075 #define LOAD_EXTEND_OP(MODE) ZERO_EXTEND 1076 1077 /* Define this macro to be non-zero if instructions will fail to work 1078 if given data not on the nominal alignment. */ 1079 #define STRICT_ALIGNMENT TARGET_STRICT_ALIGN 1080 1081 /* Enable wide bitfield accesses for more efficient bitfield code. */ 1082 #define SLOW_BYTE_ACCESS 1 1083 1084 #define NO_FUNCTION_CSE 1 1085 1086 /* Specify the machine mode that the hardware addresses have. 1087 After generation of rtl, the compiler makes no further distinction 1088 between pointers and any other objects of this machine mode. */ 1089 #define Pmode DImode 1090 1091 /* A C expression whose value is zero if pointers that need to be extended 1092 from being `POINTER_SIZE' bits wide to `Pmode' are sign-extended and 1093 greater then zero if they are zero-extended and less then zero if the 1094 ptr_extend instruction should be used. */ 1095 #define POINTERS_EXTEND_UNSIGNED 1 1096 1097 /* Mode of a function address in a call instruction (for indexing purposes). */ 1098 #define FUNCTION_MODE Pmode 1099 1100 #define SELECT_CC_MODE(OP, X, Y) aarch64_select_cc_mode (OP, X, Y) 1101 1102 /* Having an integer comparison mode guarantees that we can use 1103 reverse_condition, but the usual restrictions apply to floating-point 1104 comparisons. */ 1105 #define REVERSIBLE_CC_MODE(MODE) ((MODE) != CCFPmode && (MODE) != CCFPEmode) 1106 1107 #define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \ 1108 ((VALUE) = GET_MODE_UNIT_BITSIZE (MODE), 2) 1109 #define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \ 1110 ((VALUE) = GET_MODE_UNIT_BITSIZE (MODE), 2) 1111 1112 #define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, LR_REGNUM) 1113 1114 #define RETURN_ADDR_RTX aarch64_return_addr 1115 1116 /* BTI c + 3 insns 1117 + sls barrier of DSB + ISB. 1118 + 2 pointer-sized entries. */ 1119 #define TRAMPOLINE_SIZE (24 + (TARGET_ILP32 ? 8 : 16)) 1120 1121 /* Trampolines contain dwords, so must be dword aligned. */ 1122 #define TRAMPOLINE_ALIGNMENT 64 1123 1124 /* Put trampolines in the text section so that mapping symbols work 1125 correctly. */ 1126 #define TRAMPOLINE_SECTION text_section 1127 1128 /* To start with. */ 1129 #define BRANCH_COST(SPEED_P, PREDICTABLE_P) \ 1130 (aarch64_branch_cost (SPEED_P, PREDICTABLE_P)) 1131 1132 1133 /* Assembly output. */ 1134 1135 /* For now we'll make all jump tables pc-relative. */ 1136 #define CASE_VECTOR_PC_RELATIVE 1 1137 1138 #define CASE_VECTOR_SHORTEN_MODE(min, max, body) \ 1139 ((min < -0x1fff0 || max > 0x1fff0) ? SImode \ 1140 : (min < -0x1f0 || max > 0x1f0) ? HImode \ 1141 : QImode) 1142 1143 /* Jump table alignment is explicit in ASM_OUTPUT_CASE_LABEL. */ 1144 #define ADDR_VEC_ALIGN(JUMPTABLE) 0 1145 1146 #define MCOUNT_NAME "_mcount" 1147 1148 #define NO_PROFILE_COUNTERS 1 1149 1150 /* Emit rtl for profiling. Output assembler code to FILE 1151 to call "_mcount" for profiling a function entry. */ 1152 #define PROFILE_HOOK(LABEL) \ 1153 { \ 1154 rtx fun, lr; \ 1155 lr = aarch64_return_addr_rtx (); \ 1156 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_NAME); \ 1157 emit_library_call (fun, LCT_NORMAL, VOIDmode, lr, Pmode); \ 1158 } 1159 1160 /* All the work done in PROFILE_HOOK, but still required. */ 1161 #define FUNCTION_PROFILER(STREAM, LABELNO) do { } while (0) 1162 1163 /* For some reason, the Linux headers think they know how to define 1164 these macros. They don't!!! */ 1165 #undef ASM_APP_ON 1166 #undef ASM_APP_OFF 1167 #define ASM_APP_ON "\t" ASM_COMMENT_START " Start of user assembly\n" 1168 #define ASM_APP_OFF "\t" ASM_COMMENT_START " End of user assembly\n" 1169 1170 #define CONSTANT_POOL_BEFORE_FUNCTION 0 1171 1172 /* This definition should be relocated to aarch64-elf-raw.h. This macro 1173 should be undefined in aarch64-linux.h and a clear_cache pattern 1174 implmented to emit either the call to __aarch64_sync_cache_range() 1175 directly or preferably the appropriate sycall or cache clear 1176 instructions inline. */ 1177 #define CLEAR_INSN_CACHE(beg, end) \ 1178 extern void __aarch64_sync_cache_range (void *, void *); \ 1179 __aarch64_sync_cache_range (beg, end) 1180 1181 #define SHIFT_COUNT_TRUNCATED (!TARGET_SIMD) 1182 1183 /* Choose appropriate mode for caller saves, so we do the minimum 1184 required size of load/store. */ 1185 #define HARD_REGNO_CALLER_SAVE_MODE(REGNO, NREGS, MODE) \ 1186 aarch64_hard_regno_caller_save_mode ((REGNO), (NREGS), (MODE)) 1187 1188 #undef SWITCHABLE_TARGET 1189 #define SWITCHABLE_TARGET 1 1190 1191 /* Check TLS Descriptors mechanism is selected. */ 1192 #define TARGET_TLS_DESC (aarch64_tls_dialect == TLS_DESCRIPTORS) 1193 1194 extern enum aarch64_code_model aarch64_cmodel; 1195 1196 /* When using the tiny addressing model conditional and unconditional branches 1197 can span the whole of the available address space (1MB). */ 1198 #define HAS_LONG_COND_BRANCH \ 1199 (aarch64_cmodel == AARCH64_CMODEL_TINY \ 1200 || aarch64_cmodel == AARCH64_CMODEL_TINY_PIC) 1201 1202 #define HAS_LONG_UNCOND_BRANCH \ 1203 (aarch64_cmodel == AARCH64_CMODEL_TINY \ 1204 || aarch64_cmodel == AARCH64_CMODEL_TINY_PIC) 1205 1206 #define TARGET_SUPPORTS_WIDE_INT 1 1207 1208 /* Modes valid for AdvSIMD D registers, i.e. that fit in half a Q register. */ 1209 #define AARCH64_VALID_SIMD_DREG_MODE(MODE) \ 1210 ((MODE) == V2SImode || (MODE) == V4HImode || (MODE) == V8QImode \ 1211 || (MODE) == V2SFmode || (MODE) == V4HFmode || (MODE) == DImode \ 1212 || (MODE) == DFmode || (MODE) == V4BFmode) 1213 1214 /* Modes valid for AdvSIMD Q registers. */ 1215 #define AARCH64_VALID_SIMD_QREG_MODE(MODE) \ 1216 ((MODE) == V4SImode || (MODE) == V8HImode || (MODE) == V16QImode \ 1217 || (MODE) == V4SFmode || (MODE) == V8HFmode || (MODE) == V2DImode \ 1218 || (MODE) == V2DFmode || (MODE) == V8BFmode) 1219 1220 #define ENDIAN_LANE_N(NUNITS, N) \ 1221 (BYTES_BIG_ENDIAN ? NUNITS - 1 - N : N) 1222 1223 /* Support for configure-time --with-arch, --with-cpu and --with-tune. 1224 --with-arch and --with-cpu are ignored if either -mcpu or -march is used. 1225 --with-tune is ignored if either -mtune or -mcpu is used (but is not 1226 affected by -march). */ 1227 #define OPTION_DEFAULT_SPECS \ 1228 {"arch", "%{!march=*:%{!mcpu=*:-march=%(VALUE)}}" }, \ 1229 {"cpu", "%{!march=*:%{!mcpu=*:-mcpu=%(VALUE)}}" }, \ 1230 {"tune", "%{!mcpu=*:%{!mtune=*:-mtune=%(VALUE)}}"}, 1231 1232 #define MCPU_TO_MARCH_SPEC \ 1233 " %{mcpu=*:-march=%:rewrite_mcpu(%{mcpu=*:%*})}" 1234 1235 extern const char *aarch64_rewrite_mcpu (int argc, const char **argv); 1236 #define MCPU_TO_MARCH_SPEC_FUNCTIONS \ 1237 { "rewrite_mcpu", aarch64_rewrite_mcpu }, 1238 1239 #if defined(__aarch64__) 1240 extern const char *host_detect_local_cpu (int argc, const char **argv); 1241 #define HAVE_LOCAL_CPU_DETECT 1242 # define EXTRA_SPEC_FUNCTIONS \ 1243 { "local_cpu_detect", host_detect_local_cpu }, \ 1244 MCPU_TO_MARCH_SPEC_FUNCTIONS 1245 1246 # define MCPU_MTUNE_NATIVE_SPECS \ 1247 " %{march=native:%<march=native %:local_cpu_detect(arch)}" \ 1248 " %{mcpu=native:%<mcpu=native %:local_cpu_detect(cpu)}" \ 1249 " %{mtune=native:%<mtune=native %:local_cpu_detect(tune)}" 1250 #else 1251 # define MCPU_MTUNE_NATIVE_SPECS "" 1252 # define EXTRA_SPEC_FUNCTIONS MCPU_TO_MARCH_SPEC_FUNCTIONS 1253 #endif 1254 1255 #define ASM_CPU_SPEC \ 1256 MCPU_TO_MARCH_SPEC 1257 1258 #define EXTRA_SPECS \ 1259 { "asm_cpu_spec", ASM_CPU_SPEC } 1260 1261 #define ASM_OUTPUT_POOL_EPILOGUE aarch64_asm_output_pool_epilogue 1262 1263 /* This type is the user-visible __fp16, and a pointer to that type. We 1264 need it in many places in the backend. Defined in aarch64-builtins.c. */ 1265 extern tree aarch64_fp16_type_node; 1266 extern tree aarch64_fp16_ptr_type_node; 1267 1268 /* This type is the user-visible __bf16, and a pointer to that type. Defined 1269 in aarch64-builtins.c. */ 1270 extern tree aarch64_bf16_type_node; 1271 extern tree aarch64_bf16_ptr_type_node; 1272 1273 /* The generic unwind code in libgcc does not initialize the frame pointer. 1274 So in order to unwind a function using a frame pointer, the very first 1275 function that is unwound must save the frame pointer. That way the frame 1276 pointer is restored and its value is now valid - otherwise _Unwind_GetGR 1277 crashes. Libgcc can now be safely built with -fomit-frame-pointer. */ 1278 #define LIBGCC2_UNWIND_ATTRIBUTE \ 1279 __attribute__((optimize ("no-omit-frame-pointer"))) 1280 1281 #ifndef USED_FOR_TARGET 1282 extern poly_uint16 aarch64_sve_vg; 1283 1284 /* The number of bits and bytes in an SVE vector. */ 1285 #define BITS_PER_SVE_VECTOR (poly_uint16 (aarch64_sve_vg * 64)) 1286 #define BYTES_PER_SVE_VECTOR (poly_uint16 (aarch64_sve_vg * 8)) 1287 1288 /* The number of bits and bytes in an SVE predicate. */ 1289 #define BITS_PER_SVE_PRED BYTES_PER_SVE_VECTOR 1290 #define BYTES_PER_SVE_PRED aarch64_sve_vg 1291 1292 /* The SVE mode for a vector of bytes. */ 1293 #define SVE_BYTE_MODE VNx16QImode 1294 1295 /* The maximum number of bytes in a fixed-size vector. This is 256 bytes 1296 (for -msve-vector-bits=2048) multiplied by the maximum number of 1297 vectors in a structure mode (4). 1298 1299 This limit must not be used for variable-size vectors, since 1300 VL-agnostic code must work with arbitary vector lengths. */ 1301 #define MAX_COMPILE_TIME_VEC_BYTES (256 * 4) 1302 #endif 1303 1304 #define REGMODE_NATURAL_SIZE(MODE) aarch64_regmode_natural_size (MODE) 1305 1306 /* Allocate a minimum of STACK_CLASH_MIN_BYTES_OUTGOING_ARGS bytes for the 1307 outgoing arguments if stack clash protection is enabled. This is essential 1308 as the extra arg space allows us to skip a check in alloca. */ 1309 #undef STACK_DYNAMIC_OFFSET 1310 #define STACK_DYNAMIC_OFFSET(FUNDECL) \ 1311 ((flag_stack_clash_protection \ 1312 && cfun->calls_alloca \ 1313 && known_lt (crtl->outgoing_args_size, \ 1314 STACK_CLASH_MIN_BYTES_OUTGOING_ARGS)) \ 1315 ? ROUND_UP (STACK_CLASH_MIN_BYTES_OUTGOING_ARGS, \ 1316 STACK_BOUNDARY / BITS_PER_UNIT) \ 1317 : (crtl->outgoing_args_size + STACK_POINTER_OFFSET)) 1318 1319 #endif /* GCC_AARCH64_H */ 1320