1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 /* 3 * Copyright (C) 2012 ARM Ltd. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 #ifndef _UAPI__ASM_SIGCONTEXT_H 18 #define _UAPI__ASM_SIGCONTEXT_H 19 20 #ifndef __ASSEMBLY__ 21 22 #include <linux/types.h> 23 24 /* 25 * Signal context structure - contains all info to do with the state 26 * before the signal handler was invoked. 27 */ 28 struct sigcontext { 29 __u64 fault_address; 30 /* AArch64 registers */ 31 __u64 regs[31]; 32 __u64 sp; 33 __u64 pc; 34 __u64 pstate; 35 /* 4K reserved for FP/SIMD state and future expansion */ 36 __u8 __reserved[4096] __attribute__((__aligned__(16))); 37 }; 38 39 /* 40 * Allocation of __reserved[]: 41 * (Note: records do not necessarily occur in the order shown here.) 42 * 43 * size description 44 * 45 * 0x210 fpsimd_context 46 * 0x10 esr_context 47 * 0x8a0 sve_context (vl <= 64) (optional) 48 * 0x20 extra_context (optional) 49 * 0x10 terminator (null _aarch64_ctx) 50 * 51 * 0x510 (reserved for future allocation) 52 * 53 * New records that can exceed this space need to be opt-in for userspace, so 54 * that an expanded signal frame is not generated unexpectedly. The mechanism 55 * for opting in will depend on the extension that generates each new record. 56 * The above table documents the maximum set and sizes of records than can be 57 * generated when userspace does not opt in for any such extension. 58 */ 59 60 /* 61 * Header to be used at the beginning of structures extending the user 62 * context. Such structures must be placed after the rt_sigframe on the stack 63 * and be 16-byte aligned. The last structure must be a dummy one with the 64 * magic and size set to 0. 65 * 66 * Note that the values allocated for use as magic should be chosen to 67 * be meaningful in ASCII to aid manual parsing, ZA doesn't follow this 68 * convention due to oversight but it should be observed for future additions. 69 */ 70 struct _aarch64_ctx { 71 __u32 magic; 72 __u32 size; 73 }; 74 75 #define FPSIMD_MAGIC 0x46508001 76 77 struct fpsimd_context { 78 struct _aarch64_ctx head; 79 __u32 fpsr; 80 __u32 fpcr; 81 __uint128_t vregs[32]; 82 }; 83 84 /* 85 * Note: similarly to all other integer fields, each V-register is stored in an 86 * endianness-dependent format, with the byte at offset i from the start of the 87 * in-memory representation of the register value containing 88 * 89 * bits [(7 + 8 * i) : (8 * i)] of the register on little-endian hosts; or 90 * bits [(127 - 8 * i) : (120 - 8 * i)] on big-endian hosts. 91 */ 92 93 /* ESR_EL1 context */ 94 #define ESR_MAGIC 0x45535201 95 96 struct esr_context { 97 struct _aarch64_ctx head; 98 __u64 esr; 99 }; 100 101 #define POE_MAGIC 0x504f4530 102 103 struct poe_context { 104 struct _aarch64_ctx head; 105 __u64 por_el0; 106 }; 107 108 /* 109 * extra_context: describes extra space in the signal frame for 110 * additional structures that don't fit in sigcontext.__reserved[]. 111 * 112 * Note: 113 * 114 * 1) fpsimd_context, esr_context and extra_context must be placed in 115 * sigcontext.__reserved[] if present. They cannot be placed in the 116 * extra space. Any other record can be placed either in the extra 117 * space or in sigcontext.__reserved[], unless otherwise specified in 118 * this file. 119 * 120 * 2) There must not be more than one extra_context. 121 * 122 * 3) If extra_context is present, it must be followed immediately in 123 * sigcontext.__reserved[] by the terminating null _aarch64_ctx. 124 * 125 * 4) The extra space to which datap points must start at the first 126 * 16-byte aligned address immediately after the terminating null 127 * _aarch64_ctx that follows the extra_context structure in 128 * __reserved[]. The extra space may overrun the end of __reserved[], 129 * as indicated by a sufficiently large value for the size field. 130 * 131 * 5) The extra space must itself be terminated with a null 132 * _aarch64_ctx. 133 */ 134 #define EXTRA_MAGIC 0x45585401 135 136 struct extra_context { 137 struct _aarch64_ctx head; 138 __u64 datap; /* 16-byte aligned pointer to extra space cast to __u64 */ 139 __u32 size; /* size in bytes of the extra space */ 140 __u32 __reserved[3]; 141 }; 142 143 #define SVE_MAGIC 0x53564501 144 145 struct sve_context { 146 struct _aarch64_ctx head; 147 __u16 vl; 148 __u16 flags; 149 __u16 __reserved[2]; 150 }; 151 152 #define SVE_SIG_FLAG_SM 0x1 /* Context describes streaming mode */ 153 154 /* TPIDR2_EL0 context */ 155 #define TPIDR2_MAGIC 0x54504902 156 157 struct tpidr2_context { 158 struct _aarch64_ctx head; 159 __u64 tpidr2; 160 }; 161 162 /* FPMR context */ 163 #define FPMR_MAGIC 0x46504d52 164 165 struct fpmr_context { 166 struct _aarch64_ctx head; 167 __u64 fpmr; 168 }; 169 170 #define ZA_MAGIC 0x54366345 171 172 struct za_context { 173 struct _aarch64_ctx head; 174 __u16 vl; 175 __u16 __reserved[3]; 176 }; 177 178 #define ZT_MAGIC 0x5a544e01 179 180 struct zt_context { 181 struct _aarch64_ctx head; 182 __u16 nregs; 183 __u16 __reserved[3]; 184 }; 185 186 #endif /* !__ASSEMBLY__ */ 187 188 #include <asm/sve_context.h> 189 190 /* 191 * The SVE architecture leaves space for future expansion of the 192 * vector length beyond its initial architectural limit of 2048 bits 193 * (16 quadwords). 194 * 195 * See linux/Documentation/arch/arm64/sve.rst for a description of the VL/VQ 196 * terminology. 197 */ 198 #define SVE_VQ_BYTES __SVE_VQ_BYTES /* bytes per quadword */ 199 200 #define SVE_VQ_MIN __SVE_VQ_MIN 201 #define SVE_VQ_MAX __SVE_VQ_MAX 202 203 #define SVE_VL_MIN __SVE_VL_MIN 204 #define SVE_VL_MAX __SVE_VL_MAX 205 206 #define SVE_NUM_ZREGS __SVE_NUM_ZREGS 207 #define SVE_NUM_PREGS __SVE_NUM_PREGS 208 209 #define sve_vl_valid(vl) __sve_vl_valid(vl) 210 #define sve_vq_from_vl(vl) __sve_vq_from_vl(vl) 211 #define sve_vl_from_vq(vq) __sve_vl_from_vq(vq) 212 213 /* 214 * If the SVE registers are currently live for the thread at signal delivery, 215 * sve_context.head.size >= 216 * SVE_SIG_CONTEXT_SIZE(sve_vq_from_vl(sve_context.vl)) 217 * and the register data may be accessed using the SVE_SIG_*() macros. 218 * 219 * If sve_context.head.size < 220 * SVE_SIG_CONTEXT_SIZE(sve_vq_from_vl(sve_context.vl)), 221 * the SVE registers were not live for the thread and no register data 222 * is included: in this case, the SVE_SIG_*() macros should not be 223 * used except for this check. 224 * 225 * The same convention applies when returning from a signal: a caller 226 * will need to remove or resize the sve_context block if it wants to 227 * make the SVE registers live when they were previously non-live or 228 * vice-versa. This may require the caller to allocate fresh 229 * memory and/or move other context blocks in the signal frame. 230 * 231 * Changing the vector length during signal return is not permitted: 232 * sve_context.vl must equal the thread's current vector length when 233 * doing a sigreturn. 234 * 235 * On systems with support for SME the SVE register state may reflect either 236 * streaming or non-streaming mode. In streaming mode the streaming mode 237 * vector length will be used and the flag SVE_SIG_FLAG_SM will be set in 238 * the flags field. It is permitted to enter or leave streaming mode in 239 * a signal return, applications should take care to ensure that any difference 240 * in vector length between the two modes is handled, including any resizing 241 * and movement of context blocks. 242 * 243 * Note: for all these macros, the "vq" argument denotes the vector length 244 * in quadwords (i.e., units of 128 bits). 245 * 246 * The correct way to obtain vq is to use sve_vq_from_vl(vl). The 247 * result is valid if and only if sve_vl_valid(vl) is true. This is 248 * guaranteed for a struct sve_context written by the kernel. 249 * 250 * 251 * Additional macros describe the contents and layout of the payload. 252 * For each, SVE_SIG_x_OFFSET(args) is the start offset relative to 253 * the start of struct sve_context, and SVE_SIG_x_SIZE(args) is the 254 * size in bytes: 255 * 256 * x type description 257 * - ---- ----------- 258 * REGS the entire SVE context 259 * 260 * ZREGS __uint128_t[SVE_NUM_ZREGS][vq] all Z-registers 261 * ZREG __uint128_t[vq] individual Z-register Zn 262 * 263 * PREGS uint16_t[SVE_NUM_PREGS][vq] all P-registers 264 * PREG uint16_t[vq] individual P-register Pn 265 * 266 * FFR uint16_t[vq] first-fault status register 267 * 268 * Additional data might be appended in the future. 269 * 270 * Unlike vregs[] in fpsimd_context, each SVE scalable register (Z-, P- or FFR) 271 * is encoded in memory in an endianness-invariant format, with the byte at 272 * offset i from the start of the in-memory representation containing bits 273 * [(7 + 8 * i) : (8 * i)] of the register value. 274 */ 275 276 #define SVE_SIG_ZREG_SIZE(vq) __SVE_ZREG_SIZE(vq) 277 #define SVE_SIG_PREG_SIZE(vq) __SVE_PREG_SIZE(vq) 278 #define SVE_SIG_FFR_SIZE(vq) __SVE_FFR_SIZE(vq) 279 280 #define SVE_SIG_REGS_OFFSET \ 281 ((sizeof(struct sve_context) + (__SVE_VQ_BYTES - 1)) \ 282 / __SVE_VQ_BYTES * __SVE_VQ_BYTES) 283 284 #define SVE_SIG_ZREGS_OFFSET \ 285 (SVE_SIG_REGS_OFFSET + __SVE_ZREGS_OFFSET) 286 #define SVE_SIG_ZREG_OFFSET(vq, n) \ 287 (SVE_SIG_REGS_OFFSET + __SVE_ZREG_OFFSET(vq, n)) 288 #define SVE_SIG_ZREGS_SIZE(vq) __SVE_ZREGS_SIZE(vq) 289 290 #define SVE_SIG_PREGS_OFFSET(vq) \ 291 (SVE_SIG_REGS_OFFSET + __SVE_PREGS_OFFSET(vq)) 292 #define SVE_SIG_PREG_OFFSET(vq, n) \ 293 (SVE_SIG_REGS_OFFSET + __SVE_PREG_OFFSET(vq, n)) 294 #define SVE_SIG_PREGS_SIZE(vq) __SVE_PREGS_SIZE(vq) 295 296 #define SVE_SIG_FFR_OFFSET(vq) \ 297 (SVE_SIG_REGS_OFFSET + __SVE_FFR_OFFSET(vq)) 298 299 #define SVE_SIG_REGS_SIZE(vq) \ 300 (__SVE_FFR_OFFSET(vq) + __SVE_FFR_SIZE(vq)) 301 302 #define SVE_SIG_CONTEXT_SIZE(vq) \ 303 (SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq)) 304 305 /* 306 * If the ZA register is enabled for the thread at signal delivery then, 307 * za_context.head.size >= ZA_SIG_CONTEXT_SIZE(sve_vq_from_vl(za_context.vl)) 308 * and the register data may be accessed using the ZA_SIG_*() macros. 309 * 310 * If za_context.head.size < ZA_SIG_CONTEXT_SIZE(sve_vq_from_vl(za_context.vl)) 311 * then ZA was not enabled and no register data was included in which case 312 * ZA register was not enabled for the thread and no register data 313 * the ZA_SIG_*() macros should not be used except for this check. 314 * 315 * The same convention applies when returning from a signal: a caller 316 * will need to remove or resize the za_context block if it wants to 317 * enable the ZA register when it was previously non-live or vice-versa. 318 * This may require the caller to allocate fresh memory and/or move other 319 * context blocks in the signal frame. 320 * 321 * Changing the vector length during signal return is not permitted: 322 * za_context.vl must equal the thread's current SME vector length when 323 * doing a sigreturn. 324 */ 325 326 #define ZA_SIG_REGS_OFFSET \ 327 ((sizeof(struct za_context) + (__SVE_VQ_BYTES - 1)) \ 328 / __SVE_VQ_BYTES * __SVE_VQ_BYTES) 329 330 #define ZA_SIG_REGS_SIZE(vq) (((vq) * __SVE_VQ_BYTES) * ((vq) * __SVE_VQ_BYTES)) 331 332 #define ZA_SIG_ZAV_OFFSET(vq, n) (ZA_SIG_REGS_OFFSET + \ 333 (SVE_SIG_ZREG_SIZE(vq) * (n))) 334 335 #define ZA_SIG_CONTEXT_SIZE(vq) \ 336 (ZA_SIG_REGS_OFFSET + ZA_SIG_REGS_SIZE(vq)) 337 338 #define ZT_SIG_REG_SIZE 512 339 340 #define ZT_SIG_REG_BYTES (ZT_SIG_REG_SIZE / 8) 341 342 #define ZT_SIG_REGS_OFFSET sizeof(struct zt_context) 343 344 #define ZT_SIG_REGS_SIZE(n) (ZT_SIG_REG_BYTES * (n)) 345 346 #define ZT_SIG_CONTEXT_SIZE(n) \ 347 (sizeof(struct zt_context) + ZT_SIG_REGS_SIZE(n)) 348 349 #endif /* _UAPI__ASM_SIGCONTEXT_H */ 350