1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * FP/SIMD context switching and fault handling
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 * Author: Catalin Marinas <catalin.marinas@arm.com>
7 */
8
9 #include <linux/bitmap.h>
10 #include <linux/bitops.h>
11 #include <linux/bottom_half.h>
12 #include <linux/bug.h>
13 #include <linux/cache.h>
14 #include <linux/compat.h>
15 #include <linux/compiler.h>
16 #include <linux/cpu.h>
17 #include <linux/cpu_pm.h>
18 #include <linux/kernel.h>
19 #include <linux/linkage.h>
20 #include <linux/irqflags.h>
21 #include <linux/init.h>
22 #include <linux/percpu.h>
23 #include <linux/prctl.h>
24 #include <linux/preempt.h>
25 #include <linux/ptrace.h>
26 #include <linux/sched/signal.h>
27 #include <linux/sched/task_stack.h>
28 #include <linux/signal.h>
29 #include <linux/slab.h>
30 #include <linux/stddef.h>
31 #include <linux/sysctl.h>
32 #include <linux/swab.h>
33
34 #include <asm/esr.h>
35 #include <asm/exception.h>
36 #include <asm/fpsimd.h>
37 #include <asm/cpufeature.h>
38 #include <asm/cputype.h>
39 #include <asm/neon.h>
40 #include <asm/processor.h>
41 #include <asm/simd.h>
42 #include <asm/sigcontext.h>
43 #include <asm/sysreg.h>
44 #include <asm/traps.h>
45 #include <asm/virt.h>
46
47 #define FPEXC_IOF (1 << 0)
48 #define FPEXC_DZF (1 << 1)
49 #define FPEXC_OFF (1 << 2)
50 #define FPEXC_UFF (1 << 3)
51 #define FPEXC_IXF (1 << 4)
52 #define FPEXC_IDF (1 << 7)
53
54 /*
55 * (Note: in this discussion, statements about FPSIMD apply equally to SVE.)
56 *
57 * In order to reduce the number of times the FPSIMD state is needlessly saved
58 * and restored, we need to keep track of two things:
59 * (a) for each task, we need to remember which CPU was the last one to have
60 * the task's FPSIMD state loaded into its FPSIMD registers;
61 * (b) for each CPU, we need to remember which task's userland FPSIMD state has
62 * been loaded into its FPSIMD registers most recently, or whether it has
63 * been used to perform kernel mode NEON in the meantime.
64 *
65 * For (a), we add a fpsimd_cpu field to thread_struct, which gets updated to
66 * the id of the current CPU every time the state is loaded onto a CPU. For (b),
67 * we add the per-cpu variable 'fpsimd_last_state' (below), which contains the
68 * address of the userland FPSIMD state of the task that was loaded onto the CPU
69 * the most recently, or NULL if kernel mode NEON has been performed after that.
70 *
71 * With this in place, we no longer have to restore the next FPSIMD state right
72 * when switching between tasks. Instead, we can defer this check to userland
73 * resume, at which time we verify whether the CPU's fpsimd_last_state and the
74 * task's fpsimd_cpu are still mutually in sync. If this is the case, we
75 * can omit the FPSIMD restore.
76 *
77 * As an optimization, we use the thread_info flag TIF_FOREIGN_FPSTATE to
78 * indicate whether or not the userland FPSIMD state of the current task is
79 * present in the registers. The flag is set unless the FPSIMD registers of this
80 * CPU currently contain the most recent userland FPSIMD state of the current
81 * task.
82 *
83 * In order to allow softirq handlers to use FPSIMD, kernel_neon_begin() may
84 * save the task's FPSIMD context back to task_struct from softirq context.
85 * To prevent this from racing with the manipulation of the task's FPSIMD state
86 * from task context and thereby corrupting the state, it is necessary to
87 * protect any manipulation of a task's fpsimd_state or TIF_FOREIGN_FPSTATE
88 * flag with {, __}get_cpu_fpsimd_context(). This will still allow softirqs to
89 * run but prevent them to use FPSIMD.
90 *
91 * For a certain task, the sequence may look something like this:
92 * - the task gets scheduled in; if both the task's fpsimd_cpu field
93 * contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu
94 * variable points to the task's fpsimd_state, the TIF_FOREIGN_FPSTATE flag is
95 * cleared, otherwise it is set;
96 *
97 * - the task returns to userland; if TIF_FOREIGN_FPSTATE is set, the task's
98 * userland FPSIMD state is copied from memory to the registers, the task's
99 * fpsimd_cpu field is set to the id of the current CPU, the current
100 * CPU's fpsimd_last_state pointer is set to this task's fpsimd_state and the
101 * TIF_FOREIGN_FPSTATE flag is cleared;
102 *
103 * - the task executes an ordinary syscall; upon return to userland, the
104 * TIF_FOREIGN_FPSTATE flag will still be cleared, so no FPSIMD state is
105 * restored;
106 *
107 * - the task executes a syscall which executes some NEON instructions; this is
108 * preceded by a call to kernel_neon_begin(), which copies the task's FPSIMD
109 * register contents to memory, clears the fpsimd_last_state per-cpu variable
110 * and sets the TIF_FOREIGN_FPSTATE flag;
111 *
112 * - the task gets preempted after kernel_neon_end() is called; as we have not
113 * returned from the 2nd syscall yet, TIF_FOREIGN_FPSTATE is still set so
114 * whatever is in the FPSIMD registers is not saved to memory, but discarded.
115 */
116 struct fpsimd_last_state_struct {
117 struct user_fpsimd_state *st;
118 void *sve_state;
119 unsigned int sve_vl;
120 };
121
122 static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
123
124 /* Default VL for tasks that don't set it explicitly: */
125 static int __sve_default_vl = -1;
126
get_sve_default_vl(void)127 static int get_sve_default_vl(void)
128 {
129 return READ_ONCE(__sve_default_vl);
130 }
131
132 #ifdef CONFIG_ARM64_SVE
133
set_sve_default_vl(int val)134 static void set_sve_default_vl(int val)
135 {
136 WRITE_ONCE(__sve_default_vl, val);
137 }
138
139 /* Maximum supported vector length across all CPUs (initially poisoned) */
140 int __ro_after_init sve_max_vl = SVE_VL_MIN;
141 int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN;
142
143 /*
144 * Set of available vector lengths,
145 * where length vq encoded as bit __vq_to_bit(vq):
146 */
147 __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
148 /* Set of vector lengths present on at least one cpu: */
149 static __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX);
150
151 static void __percpu *efi_sve_state;
152
153 #else /* ! CONFIG_ARM64_SVE */
154
155 /* Dummy declaration for code that will be optimised out: */
156 extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
157 extern __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX);
158 extern void __percpu *efi_sve_state;
159
160 #endif /* ! CONFIG_ARM64_SVE */
161
162 DEFINE_PER_CPU(bool, fpsimd_context_busy);
163 EXPORT_PER_CPU_SYMBOL(fpsimd_context_busy);
164
__get_cpu_fpsimd_context(void)165 static void __get_cpu_fpsimd_context(void)
166 {
167 bool busy = __this_cpu_xchg(fpsimd_context_busy, true);
168
169 WARN_ON(busy);
170 }
171
172 /*
173 * Claim ownership of the CPU FPSIMD context for use by the calling context.
174 *
175 * The caller may freely manipulate the FPSIMD context metadata until
176 * put_cpu_fpsimd_context() is called.
177 *
178 * The double-underscore version must only be called if you know the task
179 * can't be preempted.
180 */
get_cpu_fpsimd_context(void)181 static void get_cpu_fpsimd_context(void)
182 {
183 local_bh_disable();
184 __get_cpu_fpsimd_context();
185 }
186
__put_cpu_fpsimd_context(void)187 static void __put_cpu_fpsimd_context(void)
188 {
189 bool busy = __this_cpu_xchg(fpsimd_context_busy, false);
190
191 WARN_ON(!busy); /* No matching get_cpu_fpsimd_context()? */
192 }
193
194 /*
195 * Release the CPU FPSIMD context.
196 *
197 * Must be called from a context in which get_cpu_fpsimd_context() was
198 * previously called, with no call to put_cpu_fpsimd_context() in the
199 * meantime.
200 */
put_cpu_fpsimd_context(void)201 static void put_cpu_fpsimd_context(void)
202 {
203 __put_cpu_fpsimd_context();
204 local_bh_enable();
205 }
206
have_cpu_fpsimd_context(void)207 static bool have_cpu_fpsimd_context(void)
208 {
209 return !preemptible() && __this_cpu_read(fpsimd_context_busy);
210 }
211
212 /*
213 * Call __sve_free() directly only if you know task can't be scheduled
214 * or preempted.
215 */
__sve_free(struct task_struct * task)216 static void __sve_free(struct task_struct *task)
217 {
218 kfree(task->thread.sve_state);
219 task->thread.sve_state = NULL;
220 }
221
sve_free(struct task_struct * task)222 static void sve_free(struct task_struct *task)
223 {
224 WARN_ON(test_tsk_thread_flag(task, TIF_SVE));
225
226 __sve_free(task);
227 }
228
229 /*
230 * TIF_SVE controls whether a task can use SVE without trapping while
231 * in userspace, and also the way a task's FPSIMD/SVE state is stored
232 * in thread_struct.
233 *
234 * The kernel uses this flag to track whether a user task is actively
235 * using SVE, and therefore whether full SVE register state needs to
236 * be tracked. If not, the cheaper FPSIMD context handling code can
237 * be used instead of the more costly SVE equivalents.
238 *
239 * * TIF_SVE set:
240 *
241 * The task can execute SVE instructions while in userspace without
242 * trapping to the kernel.
243 *
244 * When stored, Z0-Z31 (incorporating Vn in bits[127:0] or the
245 * corresponding Zn), P0-P15 and FFR are encoded in in
246 * task->thread.sve_state, formatted appropriately for vector
247 * length task->thread.sve_vl.
248 *
249 * task->thread.sve_state must point to a valid buffer at least
250 * sve_state_size(task) bytes in size.
251 *
252 * During any syscall, the kernel may optionally clear TIF_SVE and
253 * discard the vector state except for the FPSIMD subset.
254 *
255 * * TIF_SVE clear:
256 *
257 * An attempt by the user task to execute an SVE instruction causes
258 * do_sve_acc() to be called, which does some preparation and then
259 * sets TIF_SVE.
260 *
261 * When stored, FPSIMD registers V0-V31 are encoded in
262 * task->thread.uw.fpsimd_state; bits [max : 128] for each of Z0-Z31 are
263 * logically zero but not stored anywhere; P0-P15 and FFR are not
264 * stored and have unspecified values from userspace's point of
265 * view. For hygiene purposes, the kernel zeroes them on next use,
266 * but userspace is discouraged from relying on this.
267 *
268 * task->thread.sve_state does not need to be non-NULL, valid or any
269 * particular size: it must not be dereferenced.
270 *
271 * * FPSR and FPCR are always stored in task->thread.uw.fpsimd_state
272 * irrespective of whether TIF_SVE is clear or set, since these are
273 * not vector length dependent.
274 */
275
276 /*
277 * Update current's FPSIMD/SVE registers from thread_struct.
278 *
279 * This function should be called only when the FPSIMD/SVE state in
280 * thread_struct is known to be up to date, when preparing to enter
281 * userspace.
282 */
task_fpsimd_load(void)283 static void task_fpsimd_load(void)
284 {
285 WARN_ON(!system_supports_fpsimd());
286 WARN_ON(!have_cpu_fpsimd_context());
287
288 if (IS_ENABLED(CONFIG_ARM64_SVE) && test_thread_flag(TIF_SVE))
289 sve_load_state(sve_pffr(¤t->thread),
290 ¤t->thread.uw.fpsimd_state.fpsr,
291 sve_vq_from_vl(current->thread.sve_vl) - 1);
292 else
293 fpsimd_load_state(¤t->thread.uw.fpsimd_state);
294 }
295
296 /*
297 * Ensure FPSIMD/SVE storage in memory for the loaded context is up to
298 * date with respect to the CPU registers.
299 */
fpsimd_save(void)300 static void fpsimd_save(void)
301 {
302 struct fpsimd_last_state_struct const *last =
303 this_cpu_ptr(&fpsimd_last_state);
304 /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
305
306 WARN_ON(!system_supports_fpsimd());
307 WARN_ON(!have_cpu_fpsimd_context());
308
309 if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
310 if (IS_ENABLED(CONFIG_ARM64_SVE) &&
311 test_thread_flag(TIF_SVE)) {
312 if (WARN_ON(sve_get_vl() != last->sve_vl)) {
313 /*
314 * Can't save the user regs, so current would
315 * re-enter user with corrupt state.
316 * There's no way to recover, so kill it:
317 */
318 force_signal_inject(SIGKILL, SI_KERNEL, 0, 0);
319 return;
320 }
321
322 sve_save_state((char *)last->sve_state +
323 sve_ffr_offset(last->sve_vl),
324 &last->st->fpsr);
325 } else
326 fpsimd_save_state(last->st);
327 }
328 }
329
330 /*
331 * All vector length selection from userspace comes through here.
332 * We're on a slow path, so some sanity-checks are included.
333 * If things go wrong there's a bug somewhere, but try to fall back to a
334 * safe choice.
335 */
find_supported_vector_length(unsigned int vl)336 static unsigned int find_supported_vector_length(unsigned int vl)
337 {
338 int bit;
339 int max_vl = sve_max_vl;
340
341 if (WARN_ON(!sve_vl_valid(vl)))
342 vl = SVE_VL_MIN;
343
344 if (WARN_ON(!sve_vl_valid(max_vl)))
345 max_vl = SVE_VL_MIN;
346
347 if (vl > max_vl)
348 vl = max_vl;
349
350 bit = find_next_bit(sve_vq_map, SVE_VQ_MAX,
351 __vq_to_bit(sve_vq_from_vl(vl)));
352 return sve_vl_from_vq(__bit_to_vq(bit));
353 }
354
355 #if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL)
356
sve_proc_do_default_vl(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)357 static int sve_proc_do_default_vl(struct ctl_table *table, int write,
358 void *buffer, size_t *lenp, loff_t *ppos)
359 {
360 int ret;
361 int vl = get_sve_default_vl();
362 struct ctl_table tmp_table = {
363 .data = &vl,
364 .maxlen = sizeof(vl),
365 };
366
367 ret = proc_dointvec(&tmp_table, write, buffer, lenp, ppos);
368 if (ret || !write)
369 return ret;
370
371 /* Writing -1 has the special meaning "set to max": */
372 if (vl == -1)
373 vl = sve_max_vl;
374
375 if (!sve_vl_valid(vl))
376 return -EINVAL;
377
378 set_sve_default_vl(find_supported_vector_length(vl));
379 return 0;
380 }
381
382 static struct ctl_table sve_default_vl_table[] = {
383 {
384 .procname = "sve_default_vector_length",
385 .mode = 0644,
386 .proc_handler = sve_proc_do_default_vl,
387 },
388 { }
389 };
390
sve_sysctl_init(void)391 static int __init sve_sysctl_init(void)
392 {
393 if (system_supports_sve())
394 if (!register_sysctl("abi", sve_default_vl_table))
395 return -EINVAL;
396
397 return 0;
398 }
399
400 #else /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
sve_sysctl_init(void)401 static int __init sve_sysctl_init(void) { return 0; }
402 #endif /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
403
404 #define ZREG(sve_state, vq, n) ((char *)(sve_state) + \
405 (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
406
407 #ifdef CONFIG_CPU_BIG_ENDIAN
arm64_cpu_to_le128(__uint128_t x)408 static __uint128_t arm64_cpu_to_le128(__uint128_t x)
409 {
410 u64 a = swab64(x);
411 u64 b = swab64(x >> 64);
412
413 return ((__uint128_t)a << 64) | b;
414 }
415 #else
arm64_cpu_to_le128(__uint128_t x)416 static __uint128_t arm64_cpu_to_le128(__uint128_t x)
417 {
418 return x;
419 }
420 #endif
421
422 #define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x)
423
__fpsimd_to_sve(void * sst,struct user_fpsimd_state const * fst,unsigned int vq)424 static void __fpsimd_to_sve(void *sst, struct user_fpsimd_state const *fst,
425 unsigned int vq)
426 {
427 unsigned int i;
428 __uint128_t *p;
429
430 for (i = 0; i < SVE_NUM_ZREGS; ++i) {
431 p = (__uint128_t *)ZREG(sst, vq, i);
432 *p = arm64_cpu_to_le128(fst->vregs[i]);
433 }
434 }
435
436 /*
437 * Transfer the FPSIMD state in task->thread.uw.fpsimd_state to
438 * task->thread.sve_state.
439 *
440 * Task can be a non-runnable task, or current. In the latter case,
441 * the caller must have ownership of the cpu FPSIMD context before calling
442 * this function.
443 * task->thread.sve_state must point to at least sve_state_size(task)
444 * bytes of allocated kernel memory.
445 * task->thread.uw.fpsimd_state must be up to date before calling this
446 * function.
447 */
fpsimd_to_sve(struct task_struct * task)448 static void fpsimd_to_sve(struct task_struct *task)
449 {
450 unsigned int vq;
451 void *sst = task->thread.sve_state;
452 struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
453
454 if (!system_supports_sve())
455 return;
456
457 vq = sve_vq_from_vl(task->thread.sve_vl);
458 __fpsimd_to_sve(sst, fst, vq);
459 }
460
461 /*
462 * Transfer the SVE state in task->thread.sve_state to
463 * task->thread.uw.fpsimd_state.
464 *
465 * Task can be a non-runnable task, or current. In the latter case,
466 * the caller must have ownership of the cpu FPSIMD context before calling
467 * this function.
468 * task->thread.sve_state must point to at least sve_state_size(task)
469 * bytes of allocated kernel memory.
470 * task->thread.sve_state must be up to date before calling this function.
471 */
sve_to_fpsimd(struct task_struct * task)472 static void sve_to_fpsimd(struct task_struct *task)
473 {
474 unsigned int vq;
475 void const *sst = task->thread.sve_state;
476 struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state;
477 unsigned int i;
478 __uint128_t const *p;
479
480 if (!system_supports_sve())
481 return;
482
483 vq = sve_vq_from_vl(task->thread.sve_vl);
484 for (i = 0; i < SVE_NUM_ZREGS; ++i) {
485 p = (__uint128_t const *)ZREG(sst, vq, i);
486 fst->vregs[i] = arm64_le128_to_cpu(*p);
487 }
488 }
489
490 #ifdef CONFIG_ARM64_SVE
491
492 /*
493 * Return how many bytes of memory are required to store the full SVE
494 * state for task, given task's currently configured vector length.
495 */
sve_state_size(struct task_struct const * task)496 size_t sve_state_size(struct task_struct const *task)
497 {
498 return SVE_SIG_REGS_SIZE(sve_vq_from_vl(task->thread.sve_vl));
499 }
500
501 /*
502 * Ensure that task->thread.sve_state is allocated and sufficiently large.
503 *
504 * This function should be used only in preparation for replacing
505 * task->thread.sve_state with new data. The memory is always zeroed
506 * here to prevent stale data from showing through: this is done in
507 * the interest of testability and predictability: except in the
508 * do_sve_acc() case, there is no ABI requirement to hide stale data
509 * written previously be task.
510 */
sve_alloc(struct task_struct * task)511 void sve_alloc(struct task_struct *task)
512 {
513 if (task->thread.sve_state) {
514 memset(task->thread.sve_state, 0, sve_state_size(current));
515 return;
516 }
517
518 /* This is a small allocation (maximum ~8KB) and Should Not Fail. */
519 task->thread.sve_state =
520 kzalloc(sve_state_size(task), GFP_KERNEL);
521
522 /*
523 * If future SVE revisions can have larger vectors though,
524 * this may cease to be true:
525 */
526 BUG_ON(!task->thread.sve_state);
527 }
528
529
530 /*
531 * Ensure that task->thread.sve_state is up to date with respect to
532 * the user task, irrespective of when SVE is in use or not.
533 *
534 * This should only be called by ptrace. task must be non-runnable.
535 * task->thread.sve_state must point to at least sve_state_size(task)
536 * bytes of allocated kernel memory.
537 */
fpsimd_sync_to_sve(struct task_struct * task)538 void fpsimd_sync_to_sve(struct task_struct *task)
539 {
540 if (!test_tsk_thread_flag(task, TIF_SVE))
541 fpsimd_to_sve(task);
542 }
543
544 /*
545 * Ensure that task->thread.uw.fpsimd_state is up to date with respect to
546 * the user task, irrespective of whether SVE is in use or not.
547 *
548 * This should only be called by ptrace. task must be non-runnable.
549 * task->thread.sve_state must point to at least sve_state_size(task)
550 * bytes of allocated kernel memory.
551 */
sve_sync_to_fpsimd(struct task_struct * task)552 void sve_sync_to_fpsimd(struct task_struct *task)
553 {
554 if (test_tsk_thread_flag(task, TIF_SVE))
555 sve_to_fpsimd(task);
556 }
557
558 /*
559 * Ensure that task->thread.sve_state is up to date with respect to
560 * the task->thread.uw.fpsimd_state.
561 *
562 * This should only be called by ptrace to merge new FPSIMD register
563 * values into a task for which SVE is currently active.
564 * task must be non-runnable.
565 * task->thread.sve_state must point to at least sve_state_size(task)
566 * bytes of allocated kernel memory.
567 * task->thread.uw.fpsimd_state must already have been initialised with
568 * the new FPSIMD register values to be merged in.
569 */
sve_sync_from_fpsimd_zeropad(struct task_struct * task)570 void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
571 {
572 unsigned int vq;
573 void *sst = task->thread.sve_state;
574 struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
575
576 if (!test_tsk_thread_flag(task, TIF_SVE))
577 return;
578
579 vq = sve_vq_from_vl(task->thread.sve_vl);
580
581 memset(sst, 0, SVE_SIG_REGS_SIZE(vq));
582 __fpsimd_to_sve(sst, fst, vq);
583 }
584
sve_set_vector_length(struct task_struct * task,unsigned long vl,unsigned long flags)585 int sve_set_vector_length(struct task_struct *task,
586 unsigned long vl, unsigned long flags)
587 {
588 if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT |
589 PR_SVE_SET_VL_ONEXEC))
590 return -EINVAL;
591
592 if (!sve_vl_valid(vl))
593 return -EINVAL;
594
595 /*
596 * Clamp to the maximum vector length that VL-agnostic SVE code can
597 * work with. A flag may be assigned in the future to allow setting
598 * of larger vector lengths without confusing older software.
599 */
600 if (vl > SVE_VL_ARCH_MAX)
601 vl = SVE_VL_ARCH_MAX;
602
603 vl = find_supported_vector_length(vl);
604
605 if (flags & (PR_SVE_VL_INHERIT |
606 PR_SVE_SET_VL_ONEXEC))
607 task->thread.sve_vl_onexec = vl;
608 else
609 /* Reset VL to system default on next exec: */
610 task->thread.sve_vl_onexec = 0;
611
612 /* Only actually set the VL if not deferred: */
613 if (flags & PR_SVE_SET_VL_ONEXEC)
614 goto out;
615
616 if (vl == task->thread.sve_vl)
617 goto out;
618
619 /*
620 * To ensure the FPSIMD bits of the SVE vector registers are preserved,
621 * write any live register state back to task_struct, and convert to a
622 * non-SVE thread.
623 */
624 if (task == current) {
625 get_cpu_fpsimd_context();
626
627 fpsimd_save();
628 }
629
630 fpsimd_flush_task_state(task);
631 if (test_and_clear_tsk_thread_flag(task, TIF_SVE))
632 sve_to_fpsimd(task);
633
634 if (task == current)
635 put_cpu_fpsimd_context();
636
637 /*
638 * Force reallocation of task SVE state to the correct size
639 * on next use:
640 */
641 sve_free(task);
642
643 task->thread.sve_vl = vl;
644
645 out:
646 update_tsk_thread_flag(task, TIF_SVE_VL_INHERIT,
647 flags & PR_SVE_VL_INHERIT);
648
649 return 0;
650 }
651
652 /*
653 * Encode the current vector length and flags for return.
654 * This is only required for prctl(): ptrace has separate fields
655 *
656 * flags are as for sve_set_vector_length().
657 */
sve_prctl_status(unsigned long flags)658 static int sve_prctl_status(unsigned long flags)
659 {
660 int ret;
661
662 if (flags & PR_SVE_SET_VL_ONEXEC)
663 ret = current->thread.sve_vl_onexec;
664 else
665 ret = current->thread.sve_vl;
666
667 if (test_thread_flag(TIF_SVE_VL_INHERIT))
668 ret |= PR_SVE_VL_INHERIT;
669
670 return ret;
671 }
672
673 /* PR_SVE_SET_VL */
sve_set_current_vl(unsigned long arg)674 int sve_set_current_vl(unsigned long arg)
675 {
676 unsigned long vl, flags;
677 int ret;
678
679 vl = arg & PR_SVE_VL_LEN_MASK;
680 flags = arg & ~vl;
681
682 if (!system_supports_sve() || is_compat_task())
683 return -EINVAL;
684
685 ret = sve_set_vector_length(current, vl, flags);
686 if (ret)
687 return ret;
688
689 return sve_prctl_status(flags);
690 }
691
692 /* PR_SVE_GET_VL */
sve_get_current_vl(void)693 int sve_get_current_vl(void)
694 {
695 if (!system_supports_sve() || is_compat_task())
696 return -EINVAL;
697
698 return sve_prctl_status(0);
699 }
700
sve_probe_vqs(DECLARE_BITMAP (map,SVE_VQ_MAX))701 static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX))
702 {
703 unsigned int vq, vl;
704 unsigned long zcr;
705
706 bitmap_zero(map, SVE_VQ_MAX);
707
708 zcr = ZCR_ELx_LEN_MASK;
709 zcr = read_sysreg_s(SYS_ZCR_EL1) & ~zcr;
710
711 for (vq = SVE_VQ_MAX; vq >= SVE_VQ_MIN; --vq) {
712 write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1); /* self-syncing */
713 vl = sve_get_vl();
714 vq = sve_vq_from_vl(vl); /* skip intervening lengths */
715 set_bit(__vq_to_bit(vq), map);
716 }
717 }
718
719 /*
720 * Initialise the set of known supported VQs for the boot CPU.
721 * This is called during kernel boot, before secondary CPUs are brought up.
722 */
sve_init_vq_map(void)723 void __init sve_init_vq_map(void)
724 {
725 sve_probe_vqs(sve_vq_map);
726 bitmap_copy(sve_vq_partial_map, sve_vq_map, SVE_VQ_MAX);
727 }
728
729 /*
730 * If we haven't committed to the set of supported VQs yet, filter out
731 * those not supported by the current CPU.
732 * This function is called during the bring-up of early secondary CPUs only.
733 */
sve_update_vq_map(void)734 void sve_update_vq_map(void)
735 {
736 DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
737
738 sve_probe_vqs(tmp_map);
739 bitmap_and(sve_vq_map, sve_vq_map, tmp_map, SVE_VQ_MAX);
740 bitmap_or(sve_vq_partial_map, sve_vq_partial_map, tmp_map, SVE_VQ_MAX);
741 }
742
743 /*
744 * Check whether the current CPU supports all VQs in the committed set.
745 * This function is called during the bring-up of late secondary CPUs only.
746 */
sve_verify_vq_map(void)747 int sve_verify_vq_map(void)
748 {
749 DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
750 unsigned long b;
751
752 sve_probe_vqs(tmp_map);
753
754 bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX);
755 if (bitmap_intersects(tmp_map, sve_vq_map, SVE_VQ_MAX)) {
756 pr_warn("SVE: cpu%d: Required vector length(s) missing\n",
757 smp_processor_id());
758 return -EINVAL;
759 }
760
761 if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
762 return 0;
763
764 /*
765 * For KVM, it is necessary to ensure that this CPU doesn't
766 * support any vector length that guests may have probed as
767 * unsupported.
768 */
769
770 /* Recover the set of supported VQs: */
771 bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX);
772 /* Find VQs supported that are not globally supported: */
773 bitmap_andnot(tmp_map, tmp_map, sve_vq_map, SVE_VQ_MAX);
774
775 /* Find the lowest such VQ, if any: */
776 b = find_last_bit(tmp_map, SVE_VQ_MAX);
777 if (b >= SVE_VQ_MAX)
778 return 0; /* no mismatches */
779
780 /*
781 * Mismatches above sve_max_virtualisable_vl are fine, since
782 * no guest is allowed to configure ZCR_EL2.LEN to exceed this:
783 */
784 if (sve_vl_from_vq(__bit_to_vq(b)) <= sve_max_virtualisable_vl) {
785 pr_warn("SVE: cpu%d: Unsupported vector length(s) present\n",
786 smp_processor_id());
787 return -EINVAL;
788 }
789
790 return 0;
791 }
792
sve_efi_setup(void)793 static void __init sve_efi_setup(void)
794 {
795 if (!IS_ENABLED(CONFIG_EFI))
796 return;
797
798 /*
799 * alloc_percpu() warns and prints a backtrace if this goes wrong.
800 * This is evidence of a crippled system and we are returning void,
801 * so no attempt is made to handle this situation here.
802 */
803 if (!sve_vl_valid(sve_max_vl))
804 goto fail;
805
806 efi_sve_state = __alloc_percpu(
807 SVE_SIG_REGS_SIZE(sve_vq_from_vl(sve_max_vl)), SVE_VQ_BYTES);
808 if (!efi_sve_state)
809 goto fail;
810
811 return;
812
813 fail:
814 panic("Cannot allocate percpu memory for EFI SVE save/restore");
815 }
816
817 /*
818 * Enable SVE for EL1.
819 * Intended for use by the cpufeatures code during CPU boot.
820 */
sve_kernel_enable(const struct arm64_cpu_capabilities * __always_unused p)821 void sve_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
822 {
823 write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1);
824 isb();
825 }
826
827 /*
828 * Read the pseudo-ZCR used by cpufeatures to identify the supported SVE
829 * vector length.
830 *
831 * Use only if SVE is present.
832 * This function clobbers the SVE vector length.
833 */
read_zcr_features(void)834 u64 read_zcr_features(void)
835 {
836 u64 zcr;
837 unsigned int vq_max;
838
839 /*
840 * Set the maximum possible VL, and write zeroes to all other
841 * bits to see if they stick.
842 */
843 sve_kernel_enable(NULL);
844 write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL1);
845
846 zcr = read_sysreg_s(SYS_ZCR_EL1);
847 zcr &= ~(u64)ZCR_ELx_LEN_MASK; /* find sticky 1s outside LEN field */
848 vq_max = sve_vq_from_vl(sve_get_vl());
849 zcr |= vq_max - 1; /* set LEN field to maximum effective value */
850
851 return zcr;
852 }
853
sve_setup(void)854 void __init sve_setup(void)
855 {
856 u64 zcr;
857 DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
858 unsigned long b;
859
860 if (!system_supports_sve())
861 return;
862
863 /*
864 * The SVE architecture mandates support for 128-bit vectors,
865 * so sve_vq_map must have at least SVE_VQ_MIN set.
866 * If something went wrong, at least try to patch it up:
867 */
868 if (WARN_ON(!test_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map)))
869 set_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map);
870
871 zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
872 sve_max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1);
873
874 /*
875 * Sanity-check that the max VL we determined through CPU features
876 * corresponds properly to sve_vq_map. If not, do our best:
877 */
878 if (WARN_ON(sve_max_vl != find_supported_vector_length(sve_max_vl)))
879 sve_max_vl = find_supported_vector_length(sve_max_vl);
880
881 /*
882 * For the default VL, pick the maximum supported value <= 64.
883 * VL == 64 is guaranteed not to grow the signal frame.
884 */
885 set_sve_default_vl(find_supported_vector_length(64));
886
887 bitmap_andnot(tmp_map, sve_vq_partial_map, sve_vq_map,
888 SVE_VQ_MAX);
889
890 b = find_last_bit(tmp_map, SVE_VQ_MAX);
891 if (b >= SVE_VQ_MAX)
892 /* No non-virtualisable VLs found */
893 sve_max_virtualisable_vl = SVE_VQ_MAX;
894 else if (WARN_ON(b == SVE_VQ_MAX - 1))
895 /* No virtualisable VLs? This is architecturally forbidden. */
896 sve_max_virtualisable_vl = SVE_VQ_MIN;
897 else /* b + 1 < SVE_VQ_MAX */
898 sve_max_virtualisable_vl = sve_vl_from_vq(__bit_to_vq(b + 1));
899
900 if (sve_max_virtualisable_vl > sve_max_vl)
901 sve_max_virtualisable_vl = sve_max_vl;
902
903 pr_info("SVE: maximum available vector length %u bytes per vector\n",
904 sve_max_vl);
905 pr_info("SVE: default vector length %u bytes per vector\n",
906 get_sve_default_vl());
907
908 /* KVM decides whether to support mismatched systems. Just warn here: */
909 if (sve_max_virtualisable_vl < sve_max_vl)
910 pr_warn("SVE: unvirtualisable vector lengths present\n");
911
912 sve_efi_setup();
913 }
914
915 /*
916 * Called from the put_task_struct() path, which cannot get here
917 * unless dead_task is really dead and not schedulable.
918 */
fpsimd_release_task(struct task_struct * dead_task)919 void fpsimd_release_task(struct task_struct *dead_task)
920 {
921 __sve_free(dead_task);
922 }
923
924 #endif /* CONFIG_ARM64_SVE */
925
926 /*
927 * Trapped SVE access
928 *
929 * Storage is allocated for the full SVE state, the current FPSIMD
930 * register contents are migrated across, and the access trap is
931 * disabled.
932 *
933 * TIF_SVE should be clear on entry: otherwise, fpsimd_restore_current_state()
934 * would have disabled the SVE access trap for userspace during
935 * ret_to_user, making an SVE access trap impossible in that case.
936 */
do_sve_acc(unsigned int esr,struct pt_regs * regs)937 void do_sve_acc(unsigned int esr, struct pt_regs *regs)
938 {
939 /* Even if we chose not to use SVE, the hardware could still trap: */
940 if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) {
941 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
942 return;
943 }
944
945 sve_alloc(current);
946
947 get_cpu_fpsimd_context();
948
949 if (test_and_set_thread_flag(TIF_SVE))
950 WARN_ON(1); /* SVE access shouldn't have trapped */
951
952 /*
953 * Convert the FPSIMD state to SVE, zeroing all the state that
954 * is not shared with FPSIMD. If (as is likely) the current
955 * state is live in the registers then do this there and
956 * update our metadata for the current task including
957 * disabling the trap, otherwise update our in-memory copy.
958 */
959 if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
960 sve_set_vq(sve_vq_from_vl(current->thread.sve_vl) - 1);
961 sve_flush_live();
962 fpsimd_bind_task_to_cpu();
963 } else {
964 fpsimd_to_sve(current);
965 }
966
967 put_cpu_fpsimd_context();
968 }
969
970 /*
971 * Trapped FP/ASIMD access.
972 */
do_fpsimd_acc(unsigned int esr,struct pt_regs * regs)973 void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
974 {
975 /* TODO: implement lazy context saving/restoring */
976 WARN_ON(1);
977 }
978
979 /*
980 * Raise a SIGFPE for the current process.
981 */
do_fpsimd_exc(unsigned int esr,struct pt_regs * regs)982 void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
983 {
984 unsigned int si_code = FPE_FLTUNK;
985
986 if (esr & ESR_ELx_FP_EXC_TFV) {
987 if (esr & FPEXC_IOF)
988 si_code = FPE_FLTINV;
989 else if (esr & FPEXC_DZF)
990 si_code = FPE_FLTDIV;
991 else if (esr & FPEXC_OFF)
992 si_code = FPE_FLTOVF;
993 else if (esr & FPEXC_UFF)
994 si_code = FPE_FLTUND;
995 else if (esr & FPEXC_IXF)
996 si_code = FPE_FLTRES;
997 }
998
999 send_sig_fault(SIGFPE, si_code,
1000 (void __user *)instruction_pointer(regs),
1001 current);
1002 }
1003
fpsimd_thread_switch(struct task_struct * next)1004 void fpsimd_thread_switch(struct task_struct *next)
1005 {
1006 bool wrong_task, wrong_cpu;
1007
1008 if (!system_supports_fpsimd())
1009 return;
1010
1011 __get_cpu_fpsimd_context();
1012
1013 /* Save unsaved fpsimd state, if any: */
1014 fpsimd_save();
1015
1016 /*
1017 * Fix up TIF_FOREIGN_FPSTATE to correctly describe next's
1018 * state. For kernel threads, FPSIMD registers are never loaded
1019 * and wrong_task and wrong_cpu will always be true.
1020 */
1021 wrong_task = __this_cpu_read(fpsimd_last_state.st) !=
1022 &next->thread.uw.fpsimd_state;
1023 wrong_cpu = next->thread.fpsimd_cpu != smp_processor_id();
1024
1025 update_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE,
1026 wrong_task || wrong_cpu);
1027
1028 __put_cpu_fpsimd_context();
1029 }
1030
fpsimd_flush_thread(void)1031 void fpsimd_flush_thread(void)
1032 {
1033 int vl, supported_vl;
1034
1035 if (!system_supports_fpsimd())
1036 return;
1037
1038 get_cpu_fpsimd_context();
1039
1040 fpsimd_flush_task_state(current);
1041 memset(¤t->thread.uw.fpsimd_state, 0,
1042 sizeof(current->thread.uw.fpsimd_state));
1043
1044 if (system_supports_sve()) {
1045 clear_thread_flag(TIF_SVE);
1046 sve_free(current);
1047
1048 /*
1049 * Reset the task vector length as required.
1050 * This is where we ensure that all user tasks have a valid
1051 * vector length configured: no kernel task can become a user
1052 * task without an exec and hence a call to this function.
1053 * By the time the first call to this function is made, all
1054 * early hardware probing is complete, so __sve_default_vl
1055 * should be valid.
1056 * If a bug causes this to go wrong, we make some noise and
1057 * try to fudge thread.sve_vl to a safe value here.
1058 */
1059 vl = current->thread.sve_vl_onexec ?
1060 current->thread.sve_vl_onexec : get_sve_default_vl();
1061
1062 if (WARN_ON(!sve_vl_valid(vl)))
1063 vl = SVE_VL_MIN;
1064
1065 supported_vl = find_supported_vector_length(vl);
1066 if (WARN_ON(supported_vl != vl))
1067 vl = supported_vl;
1068
1069 current->thread.sve_vl = vl;
1070
1071 /*
1072 * If the task is not set to inherit, ensure that the vector
1073 * length will be reset by a subsequent exec:
1074 */
1075 if (!test_thread_flag(TIF_SVE_VL_INHERIT))
1076 current->thread.sve_vl_onexec = 0;
1077 }
1078
1079 put_cpu_fpsimd_context();
1080 }
1081
1082 /*
1083 * Save the userland FPSIMD state of 'current' to memory, but only if the state
1084 * currently held in the registers does in fact belong to 'current'
1085 */
fpsimd_preserve_current_state(void)1086 void fpsimd_preserve_current_state(void)
1087 {
1088 if (!system_supports_fpsimd())
1089 return;
1090
1091 get_cpu_fpsimd_context();
1092 fpsimd_save();
1093 put_cpu_fpsimd_context();
1094 }
1095
1096 /*
1097 * Like fpsimd_preserve_current_state(), but ensure that
1098 * current->thread.uw.fpsimd_state is updated so that it can be copied to
1099 * the signal frame.
1100 */
fpsimd_signal_preserve_current_state(void)1101 void fpsimd_signal_preserve_current_state(void)
1102 {
1103 fpsimd_preserve_current_state();
1104 if (test_thread_flag(TIF_SVE))
1105 sve_to_fpsimd(current);
1106 }
1107
1108 /*
1109 * Associate current's FPSIMD context with this cpu
1110 * The caller must have ownership of the cpu FPSIMD context before calling
1111 * this function.
1112 */
fpsimd_bind_task_to_cpu(void)1113 void fpsimd_bind_task_to_cpu(void)
1114 {
1115 struct fpsimd_last_state_struct *last =
1116 this_cpu_ptr(&fpsimd_last_state);
1117
1118 WARN_ON(!system_supports_fpsimd());
1119 last->st = ¤t->thread.uw.fpsimd_state;
1120 last->sve_state = current->thread.sve_state;
1121 last->sve_vl = current->thread.sve_vl;
1122 current->thread.fpsimd_cpu = smp_processor_id();
1123
1124 if (system_supports_sve()) {
1125 /* Toggle SVE trapping for userspace if needed */
1126 if (test_thread_flag(TIF_SVE))
1127 sve_user_enable();
1128 else
1129 sve_user_disable();
1130
1131 /* Serialised by exception return to user */
1132 }
1133 }
1134
fpsimd_bind_state_to_cpu(struct user_fpsimd_state * st,void * sve_state,unsigned int sve_vl)1135 void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
1136 unsigned int sve_vl)
1137 {
1138 struct fpsimd_last_state_struct *last =
1139 this_cpu_ptr(&fpsimd_last_state);
1140
1141 WARN_ON(!system_supports_fpsimd());
1142 WARN_ON(!in_softirq() && !irqs_disabled());
1143
1144 last->st = st;
1145 last->sve_state = sve_state;
1146 last->sve_vl = sve_vl;
1147 }
1148
1149 /*
1150 * Load the userland FPSIMD state of 'current' from memory, but only if the
1151 * FPSIMD state already held in the registers is /not/ the most recent FPSIMD
1152 * state of 'current'
1153 */
fpsimd_restore_current_state(void)1154 void fpsimd_restore_current_state(void)
1155 {
1156 /*
1157 * For the tasks that were created before we detected the absence of
1158 * FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(),
1159 * e.g, init. This could be then inherited by the children processes.
1160 * If we later detect that the system doesn't support FP/SIMD,
1161 * we must clear the flag for all the tasks to indicate that the
1162 * FPSTATE is clean (as we can't have one) to avoid looping for ever in
1163 * do_notify_resume().
1164 */
1165 if (!system_supports_fpsimd()) {
1166 clear_thread_flag(TIF_FOREIGN_FPSTATE);
1167 return;
1168 }
1169
1170 get_cpu_fpsimd_context();
1171
1172 if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
1173 task_fpsimd_load();
1174 fpsimd_bind_task_to_cpu();
1175 }
1176
1177 put_cpu_fpsimd_context();
1178 }
1179
1180 /*
1181 * Load an updated userland FPSIMD state for 'current' from memory and set the
1182 * flag that indicates that the FPSIMD register contents are the most recent
1183 * FPSIMD state of 'current'
1184 */
fpsimd_update_current_state(struct user_fpsimd_state const * state)1185 void fpsimd_update_current_state(struct user_fpsimd_state const *state)
1186 {
1187 if (WARN_ON(!system_supports_fpsimd()))
1188 return;
1189
1190 get_cpu_fpsimd_context();
1191
1192 current->thread.uw.fpsimd_state = *state;
1193 if (test_thread_flag(TIF_SVE))
1194 fpsimd_to_sve(current);
1195
1196 task_fpsimd_load();
1197 fpsimd_bind_task_to_cpu();
1198
1199 clear_thread_flag(TIF_FOREIGN_FPSTATE);
1200
1201 put_cpu_fpsimd_context();
1202 }
1203
1204 /*
1205 * Invalidate live CPU copies of task t's FPSIMD state
1206 *
1207 * This function may be called with preemption enabled. The barrier()
1208 * ensures that the assignment to fpsimd_cpu is visible to any
1209 * preemption/softirq that could race with set_tsk_thread_flag(), so
1210 * that TIF_FOREIGN_FPSTATE cannot be spuriously re-cleared.
1211 *
1212 * The final barrier ensures that TIF_FOREIGN_FPSTATE is seen set by any
1213 * subsequent code.
1214 */
fpsimd_flush_task_state(struct task_struct * t)1215 void fpsimd_flush_task_state(struct task_struct *t)
1216 {
1217 t->thread.fpsimd_cpu = NR_CPUS;
1218 /*
1219 * If we don't support fpsimd, bail out after we have
1220 * reset the fpsimd_cpu for this task and clear the
1221 * FPSTATE.
1222 */
1223 if (!system_supports_fpsimd())
1224 return;
1225 barrier();
1226 set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE);
1227
1228 barrier();
1229 }
1230
1231 /*
1232 * Invalidate any task's FPSIMD state that is present on this cpu.
1233 * The FPSIMD context should be acquired with get_cpu_fpsimd_context()
1234 * before calling this function.
1235 */
fpsimd_flush_cpu_state(void)1236 static void fpsimd_flush_cpu_state(void)
1237 {
1238 WARN_ON(!system_supports_fpsimd());
1239 __this_cpu_write(fpsimd_last_state.st, NULL);
1240 set_thread_flag(TIF_FOREIGN_FPSTATE);
1241 }
1242
1243 /*
1244 * Save the FPSIMD state to memory and invalidate cpu view.
1245 * This function must be called with preemption disabled.
1246 */
fpsimd_save_and_flush_cpu_state(void)1247 void fpsimd_save_and_flush_cpu_state(void)
1248 {
1249 if (!system_supports_fpsimd())
1250 return;
1251 WARN_ON(preemptible());
1252 __get_cpu_fpsimd_context();
1253 fpsimd_save();
1254 fpsimd_flush_cpu_state();
1255 __put_cpu_fpsimd_context();
1256 }
1257
1258 #ifdef CONFIG_KERNEL_MODE_NEON
1259
1260 /*
1261 * Kernel-side NEON support functions
1262 */
1263
1264 /*
1265 * kernel_neon_begin(): obtain the CPU FPSIMD registers for use by the calling
1266 * context
1267 *
1268 * Must not be called unless may_use_simd() returns true.
1269 * Task context in the FPSIMD registers is saved back to memory as necessary.
1270 *
1271 * A matching call to kernel_neon_end() must be made before returning from the
1272 * calling context.
1273 *
1274 * The caller may freely use the FPSIMD registers until kernel_neon_end() is
1275 * called.
1276 */
kernel_neon_begin(void)1277 void kernel_neon_begin(void)
1278 {
1279 if (WARN_ON(!system_supports_fpsimd()))
1280 return;
1281
1282 BUG_ON(!may_use_simd());
1283
1284 get_cpu_fpsimd_context();
1285
1286 /* Save unsaved fpsimd state, if any: */
1287 fpsimd_save();
1288
1289 /* Invalidate any task state remaining in the fpsimd regs: */
1290 fpsimd_flush_cpu_state();
1291 }
1292 EXPORT_SYMBOL(kernel_neon_begin);
1293
1294 /*
1295 * kernel_neon_end(): give the CPU FPSIMD registers back to the current task
1296 *
1297 * Must be called from a context in which kernel_neon_begin() was previously
1298 * called, with no call to kernel_neon_end() in the meantime.
1299 *
1300 * The caller must not use the FPSIMD registers after this function is called,
1301 * unless kernel_neon_begin() is called again in the meantime.
1302 */
kernel_neon_end(void)1303 void kernel_neon_end(void)
1304 {
1305 if (!system_supports_fpsimd())
1306 return;
1307
1308 put_cpu_fpsimd_context();
1309 }
1310 EXPORT_SYMBOL(kernel_neon_end);
1311
1312 #ifdef CONFIG_EFI
1313
1314 static DEFINE_PER_CPU(struct user_fpsimd_state, efi_fpsimd_state);
1315 static DEFINE_PER_CPU(bool, efi_fpsimd_state_used);
1316 static DEFINE_PER_CPU(bool, efi_sve_state_used);
1317
1318 /*
1319 * EFI runtime services support functions
1320 *
1321 * The ABI for EFI runtime services allows EFI to use FPSIMD during the call.
1322 * This means that for EFI (and only for EFI), we have to assume that FPSIMD
1323 * is always used rather than being an optional accelerator.
1324 *
1325 * These functions provide the necessary support for ensuring FPSIMD
1326 * save/restore in the contexts from which EFI is used.
1327 *
1328 * Do not use them for any other purpose -- if tempted to do so, you are
1329 * either doing something wrong or you need to propose some refactoring.
1330 */
1331
1332 /*
1333 * __efi_fpsimd_begin(): prepare FPSIMD for making an EFI runtime services call
1334 */
__efi_fpsimd_begin(void)1335 void __efi_fpsimd_begin(void)
1336 {
1337 if (!system_supports_fpsimd())
1338 return;
1339
1340 WARN_ON(preemptible());
1341
1342 if (may_use_simd()) {
1343 kernel_neon_begin();
1344 } else {
1345 /*
1346 * If !efi_sve_state, SVE can't be in use yet and doesn't need
1347 * preserving:
1348 */
1349 if (system_supports_sve() && likely(efi_sve_state)) {
1350 char *sve_state = this_cpu_ptr(efi_sve_state);
1351
1352 __this_cpu_write(efi_sve_state_used, true);
1353
1354 sve_save_state(sve_state + sve_ffr_offset(sve_max_vl),
1355 &this_cpu_ptr(&efi_fpsimd_state)->fpsr);
1356 } else {
1357 fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state));
1358 }
1359
1360 __this_cpu_write(efi_fpsimd_state_used, true);
1361 }
1362 }
1363
1364 /*
1365 * __efi_fpsimd_end(): clean up FPSIMD after an EFI runtime services call
1366 */
__efi_fpsimd_end(void)1367 void __efi_fpsimd_end(void)
1368 {
1369 if (!system_supports_fpsimd())
1370 return;
1371
1372 if (!__this_cpu_xchg(efi_fpsimd_state_used, false)) {
1373 kernel_neon_end();
1374 } else {
1375 if (system_supports_sve() &&
1376 likely(__this_cpu_read(efi_sve_state_used))) {
1377 char const *sve_state = this_cpu_ptr(efi_sve_state);
1378
1379 sve_load_state(sve_state + sve_ffr_offset(sve_max_vl),
1380 &this_cpu_ptr(&efi_fpsimd_state)->fpsr,
1381 sve_vq_from_vl(sve_get_vl()) - 1);
1382
1383 __this_cpu_write(efi_sve_state_used, false);
1384 } else {
1385 fpsimd_load_state(this_cpu_ptr(&efi_fpsimd_state));
1386 }
1387 }
1388 }
1389
1390 #endif /* CONFIG_EFI */
1391
1392 #endif /* CONFIG_KERNEL_MODE_NEON */
1393
1394 #ifdef CONFIG_CPU_PM
fpsimd_cpu_pm_notifier(struct notifier_block * self,unsigned long cmd,void * v)1395 static int fpsimd_cpu_pm_notifier(struct notifier_block *self,
1396 unsigned long cmd, void *v)
1397 {
1398 switch (cmd) {
1399 case CPU_PM_ENTER:
1400 fpsimd_save_and_flush_cpu_state();
1401 break;
1402 case CPU_PM_EXIT:
1403 break;
1404 case CPU_PM_ENTER_FAILED:
1405 default:
1406 return NOTIFY_DONE;
1407 }
1408 return NOTIFY_OK;
1409 }
1410
1411 static struct notifier_block fpsimd_cpu_pm_notifier_block = {
1412 .notifier_call = fpsimd_cpu_pm_notifier,
1413 };
1414
fpsimd_pm_init(void)1415 static void __init fpsimd_pm_init(void)
1416 {
1417 cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block);
1418 }
1419
1420 #else
fpsimd_pm_init(void)1421 static inline void fpsimd_pm_init(void) { }
1422 #endif /* CONFIG_CPU_PM */
1423
1424 #ifdef CONFIG_HOTPLUG_CPU
fpsimd_cpu_dead(unsigned int cpu)1425 static int fpsimd_cpu_dead(unsigned int cpu)
1426 {
1427 per_cpu(fpsimd_last_state.st, cpu) = NULL;
1428 return 0;
1429 }
1430
fpsimd_hotplug_init(void)1431 static inline void fpsimd_hotplug_init(void)
1432 {
1433 cpuhp_setup_state_nocalls(CPUHP_ARM64_FPSIMD_DEAD, "arm64/fpsimd:dead",
1434 NULL, fpsimd_cpu_dead);
1435 }
1436
1437 #else
fpsimd_hotplug_init(void)1438 static inline void fpsimd_hotplug_init(void) { }
1439 #endif
1440
1441 /*
1442 * FP/SIMD support code initialisation.
1443 */
fpsimd_init(void)1444 static int __init fpsimd_init(void)
1445 {
1446 if (cpu_have_named_feature(FP)) {
1447 fpsimd_pm_init();
1448 fpsimd_hotplug_init();
1449 } else {
1450 pr_notice("Floating-point is not implemented\n");
1451 }
1452
1453 if (!cpu_have_named_feature(ASIMD))
1454 pr_notice("Advanced SIMD is not implemented\n");
1455
1456 return sve_sysctl_init();
1457 }
1458 core_initcall(fpsimd_init);
1459