xref: /freebsd/sys/arm/arm/vfp.c (revision 06c3fb27)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2014 Ian Lepore <ian@freebsd.org>
5  * Copyright (c) 2012 Mark Tinguely
6  *
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 #ifdef VFP
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/limits.h>
36 #include <sys/proc.h>
37 #include <sys/imgact_elf.h>
38 #include <sys/kernel.h>
39 
40 #include <machine/armreg.h>
41 #include <machine/elf.h>
42 #include <machine/frame.h>
43 #include <machine/md_var.h>
44 #include <machine/pcb.h>
45 #include <machine/undefined.h>
46 #include <machine/vfp.h>
47 
48 /* function prototypes */
49 static int vfp_bounce(u_int, u_int, struct trapframe *, int);
50 static void vfp_restore(struct vfp_state *);
51 
52 extern int vfp_exists;
53 static struct undefined_handler vfp10_uh, vfp11_uh;
54 /* If true the VFP unit has 32 double registers, otherwise it has 16 */
55 static int is_d32;
56 
57 struct fpu_kern_ctx {
58 	struct vfp_state	*prev;
59 #define	FPU_KERN_CTX_DUMMY	0x01	/* avoided save for the kern thread */
60 #define	FPU_KERN_CTX_INUSE	0x02
61 	uint32_t	 flags;
62 	struct vfp_state	 state;
63 };
64 
65 /*
66  * About .fpu directives in this file...
67  *
68  * We should need simply .fpu vfpv3, but clang 3.5 has a quirk where setting
69  * vfpv3 doesn't imply that vfp2 features are also available -- both have to be
70  * explicitly set to get all the features of both.  This is probably a bug in
71  * clang, so it may get fixed and require changes here some day.  Other changes
72  * are probably coming in clang too, because there is email and open PRs
73  * indicating they want to completely disable the ability to use .fpu and
74  * similar directives in inline asm.  That would be catastrophic for us,
75  * hopefully they come to their senses.  There was also some discusion of a new
76  * syntax such as .push fpu=vfpv3; ...; .pop fpu; and that would be ideal for
77  * us, better than what we have now really.
78  *
79  * For gcc, each .fpu directive completely overrides the prior directive, unlike
80  * with clang, but luckily on gcc saying v3 implies all the v2 features as well.
81  */
82 
83 #define fmxr(reg, val) \
84     __asm __volatile("	.fpu vfpv2\n .fpu vfpv3\n"			\
85 		     "	vmsr	" __STRING(reg) ", %0"   :: "r"(val));
86 
87 #define fmrx(reg) \
88 ({ u_int val = 0;\
89     __asm __volatile(" .fpu vfpv2\n .fpu vfpv3\n"			\
90 		     "	vmrs	%0, " __STRING(reg) : "=r"(val));	\
91     val; \
92 })
93 
94 static u_int
95 get_coprocessorACR(void)
96 {
97 	u_int val;
98 	__asm __volatile("mrc p15, 0, %0, c1, c0, 2" : "=r" (val) : : "cc");
99 	return val;
100 }
101 
102 static void
103 set_coprocessorACR(u_int val)
104 {
105 	__asm __volatile("mcr p15, 0, %0, c1, c0, 2\n\t"
106 	 : : "r" (val) : "cc");
107 	isb();
108 }
109 
110 static void
111 vfp_enable(void)
112 {
113 	uint32_t fpexc;
114 
115 	fpexc = fmrx(fpexc);
116 	fmxr(fpexc, fpexc | VFPEXC_EN);
117 	isb();
118 }
119 
120 static void
121 vfp_disable(void)
122 {
123 	uint32_t fpexc;
124 
125 	fpexc = fmrx(fpexc);
126 	fmxr(fpexc, fpexc & ~VFPEXC_EN);
127 	isb();
128 }
129 
130 	/* called for each cpu */
131 void
132 vfp_init(void)
133 {
134 	u_int fpsid, tmp;
135 	u_int coproc, vfp_arch;
136 
137 	coproc = get_coprocessorACR();
138 	coproc |= COPROC10 | COPROC11;
139 	set_coprocessorACR(coproc);
140 
141 	fpsid = fmrx(fpsid);		/* read the vfp system id */
142 
143 	if (!(fpsid & VFPSID_HARDSOFT_IMP)) {
144 		vfp_exists = 1;
145 		is_d32 = 0;
146 		PCPU_SET(vfpsid, fpsid);	/* save the fpsid */
147 		elf_hwcap |= HWCAP_VFP;
148 
149 		vfp_arch =
150 		    (fpsid & VFPSID_SUBVERSION2_MASK) >> VFPSID_SUBVERSION_OFF;
151 
152 		if (vfp_arch >= VFP_ARCH3) {
153 			tmp = fmrx(mvfr0);
154 			PCPU_SET(vfpmvfr0, tmp);
155 			elf_hwcap |= HWCAP_VFPv3;
156 
157 			if ((tmp & VMVFR0_RB_MASK) == 2) {
158 				elf_hwcap |= HWCAP_VFPD32;
159 				is_d32 = 1;
160 			} else
161 				elf_hwcap |= HWCAP_VFPv3D16;
162 
163 			tmp = fmrx(mvfr1);
164 			PCPU_SET(vfpmvfr1, tmp);
165 
166 			if (PCPU_GET(cpuid) == 0) {
167 				if ((tmp & VMVFR1_FZ_MASK) == 0x1) {
168 					/* Denormals arithmetic support */
169 					initial_fpscr &= ~VFPSCR_FZ;
170 					thread0.td_pcb->pcb_vfpstate.fpscr =
171 					    initial_fpscr;
172 				}
173 			}
174 
175 			if ((tmp & VMVFR1_LS_MASK) >> VMVFR1_LS_OFF == 1 &&
176 			    (tmp & VMVFR1_I_MASK) >> VMVFR1_I_OFF == 1 &&
177 			    (tmp & VMVFR1_SP_MASK) >> VMVFR1_SP_OFF == 1)
178 				elf_hwcap |= HWCAP_NEON;
179 			if ((tmp & VMVFR1_FMAC_MASK) >>  VMVFR1_FMAC_OFF == 1)
180 				elf_hwcap |= HWCAP_VFPv4;
181 		}
182 
183 		/* initialize the coprocess 10 and 11 calls
184 		 * These are called to restore the registers and enable
185 		 * the VFP hardware.
186 		 */
187 		if (vfp10_uh.uh_handler == NULL) {
188 			vfp10_uh.uh_handler = vfp_bounce;
189 			vfp11_uh.uh_handler = vfp_bounce;
190 			install_coproc_handler_static(10, &vfp10_uh);
191 			install_coproc_handler_static(11, &vfp11_uh);
192 		}
193 	}
194 }
195 
196 SYSINIT(vfp, SI_SUB_CPU, SI_ORDER_ANY, vfp_init, NULL);
197 
198 /* start VFP unit, restore the vfp registers from the PCB  and retry
199  * the instruction
200  */
201 static int
202 vfp_bounce(u_int addr, u_int insn, struct trapframe *frame, int code)
203 {
204 	u_int cpu, fpexc;
205 	struct pcb *curpcb;
206 	ksiginfo_t ksi;
207 
208 	if ((code & FAULT_USER) == 0)
209 		panic("undefined floating point instruction in supervisor mode");
210 
211 	critical_enter();
212 
213 	/*
214 	 * If the VFP is already on and we got an undefined instruction, then
215 	 * something tried to executate a truly invalid instruction that maps to
216 	 * the VFP.
217 	 */
218 	fpexc = fmrx(fpexc);
219 	if (fpexc & VFPEXC_EN) {
220 		/* Clear any exceptions */
221 		fmxr(fpexc, fpexc & ~(VFPEXC_EX | VFPEXC_FP2V));
222 
223 		/* kill the process - we do not handle emulation */
224 		critical_exit();
225 
226 		if (fpexc & VFPEXC_EX) {
227 			/* We have an exception, signal a SIGFPE */
228 			ksiginfo_init_trap(&ksi);
229 			ksi.ksi_signo = SIGFPE;
230 			if (fpexc & VFPEXC_UFC)
231 				ksi.ksi_code = FPE_FLTUND;
232 			else if (fpexc & VFPEXC_OFC)
233 				ksi.ksi_code = FPE_FLTOVF;
234 			else if (fpexc & VFPEXC_IOC)
235 				ksi.ksi_code = FPE_FLTINV;
236 			ksi.ksi_addr = (void *)addr;
237 			trapsignal(curthread, &ksi);
238 			return 0;
239 		}
240 
241 		return 1;
242 	}
243 
244 	/*
245 	 * If the last time this thread used the VFP it was on this core, and
246 	 * the last thread to use the VFP on this core was this thread, then the
247 	 * VFP state is valid, otherwise restore this thread's state to the VFP.
248 	 */
249 	fmxr(fpexc, fpexc | VFPEXC_EN);
250 	curpcb = curthread->td_pcb;
251 	cpu = PCPU_GET(cpuid);
252 	if (curpcb->pcb_vfpcpu != cpu || curthread != PCPU_GET(fpcurthread)) {
253 		vfp_restore(curpcb->pcb_vfpsaved);
254 		curpcb->pcb_vfpcpu = cpu;
255 		PCPU_SET(fpcurthread, curthread);
256 	}
257 
258 	critical_exit();
259 
260 	KASSERT(curpcb->pcb_vfpsaved == &curpcb->pcb_vfpstate,
261 	    ("Kernel VFP state in use when entering userspace"));
262 
263 	return (0);
264 }
265 
266 /*
267  * Update the VFP state for a forked process or new thread. The PCB will
268  * have been copied from the old thread.
269  * The code is heavily based on arm64 logic.
270  */
271 void
272 vfp_new_thread(struct thread *newtd, struct thread *oldtd, bool fork)
273 {
274 	struct pcb *newpcb;
275 
276 	newpcb = newtd->td_pcb;
277 
278 	/* Kernel threads start with clean VFP */
279 	if ((oldtd->td_pflags & TDP_KTHREAD) != 0) {
280 		newpcb->pcb_fpflags &=
281 		    ~(PCB_FP_STARTED | PCB_FP_KERN | PCB_FP_NOSAVE);
282 	} else {
283 		MPASS((newpcb->pcb_fpflags & (PCB_FP_KERN|PCB_FP_NOSAVE)) == 0);
284 		if (!fork) {
285 			newpcb->pcb_fpflags &= ~PCB_FP_STARTED;
286 		}
287 	}
288 
289 	newpcb->pcb_vfpsaved = &newpcb->pcb_vfpstate;
290 	newpcb->pcb_vfpcpu = UINT_MAX;
291 }
292 /*
293  * Restore the given state to the VFP hardware.
294  */
295 static void
296 vfp_restore(struct vfp_state *vfpsave)
297 {
298 	uint32_t fpexc;
299 
300 	/* On vfpv3 we may need to restore FPINST and FPINST2 */
301 	fpexc = vfpsave->fpexec;
302 	if (fpexc & VFPEXC_EX) {
303 		fmxr(fpinst, vfpsave->fpinst);
304 		if (fpexc & VFPEXC_FP2V)
305 			fmxr(fpinst2, vfpsave->fpinst2);
306 	}
307 	fmxr(fpscr, vfpsave->fpscr);
308 
309 	__asm __volatile(
310 	    " .fpu	vfpv2\n"
311 	    " .fpu	vfpv3\n"
312 	    " vldmia	%0!, {d0-d15}\n"	/* d0-d15 */
313 	    " cmp	%1, #0\n"		/* -D16 or -D32? */
314 	    " vldmiane	%0!, {d16-d31}\n"	/* d16-d31 */
315 	    " addeq	%0, %0, #128\n"		/* skip missing regs */
316 	    : "+&r" (vfpsave) : "r" (is_d32) : "cc"
317 	    );
318 
319 	fmxr(fpexc, fpexc);
320 }
321 
322 /*
323  * If the VFP is on, save its current state and turn it off if requested to do
324  * so.  If the VFP is not on, does not change the values at *vfpsave.  Caller is
325  * responsible for preventing a context switch while this is running.
326  */
327 void
328 vfp_store(struct vfp_state *vfpsave, boolean_t disable_vfp)
329 {
330 	uint32_t fpexc;
331 
332 	fpexc = fmrx(fpexc);		/* Is the vfp enabled? */
333 	if (fpexc & VFPEXC_EN) {
334 		vfpsave->fpexec = fpexc;
335 		vfpsave->fpscr = fmrx(fpscr);
336 
337 		/* On vfpv3 we may need to save FPINST and FPINST2 */
338 		if (fpexc & VFPEXC_EX) {
339 			vfpsave->fpinst = fmrx(fpinst);
340 			if (fpexc & VFPEXC_FP2V)
341 				vfpsave->fpinst2 = fmrx(fpinst2);
342 			fpexc &= ~VFPEXC_EX;
343 		}
344 
345 		__asm __volatile(
346 		    " .fpu	vfpv2\n"
347 		    " .fpu	vfpv3\n"
348 		    " vstmia	%0!, {d0-d15}\n"	/* d0-d15 */
349 		    " cmp	%1, #0\n"		/* -D16 or -D32? */
350 		    " vstmiane	%0!, {d16-d31}\n"	/* d16-d31 */
351 		    " addeq	%0, %0, #128\n"		/* skip missing regs */
352 		    : "+&r" (vfpsave) : "r" (is_d32) : "cc"
353 		    );
354 
355 		if (disable_vfp)
356 			fmxr(fpexc , fpexc & ~VFPEXC_EN);
357 	}
358 }
359 
360 /*
361  * The current thread is dying.  If the state currently in the hardware belongs
362  * to the current thread, set fpcurthread to NULL to indicate that the VFP
363  * hardware state does not belong to any thread.  If the VFP is on, turn it off.
364  */
365 void
366 vfp_discard(struct thread *td)
367 {
368 	u_int tmp;
369 
370 	if (PCPU_GET(fpcurthread) == td)
371 		PCPU_SET(fpcurthread, NULL);
372 
373 	tmp = fmrx(fpexc);
374 	if (tmp & VFPEXC_EN)
375 		fmxr(fpexc, tmp & ~VFPEXC_EN);
376 }
377 
378 void
379 vfp_save_state(struct thread *td, struct pcb *pcb)
380 {
381 	int32_t fpexc;
382 
383 	KASSERT(pcb != NULL, ("NULL vfp pcb"));
384 	KASSERT(td == NULL || td->td_pcb == pcb, ("Invalid vfp pcb"));
385 
386 	/*
387 	 * savectx() will be called on panic with dumppcb as an argument,
388 	 * dumppcb doesn't have pcb_vfpsaved set, so set it to save
389 	 * the VFP registers.
390 	 */
391 	if (pcb->pcb_vfpsaved == NULL)
392 		pcb->pcb_vfpsaved = &pcb->pcb_vfpstate;
393 
394 	if (td == NULL)
395 		td = curthread;
396 
397 	critical_enter();
398 	/*
399 	 * Only store the registers if the VFP is enabled,
400 	 * i.e. return if we are trapping on FP access.
401 	 */
402 	fpexc = fmrx(fpexc);
403 	if (fpexc & VFPEXC_EN) {
404 		KASSERT(PCPU_GET(fpcurthread) == td,
405 		    ("Storing an invalid VFP state"));
406 
407 		vfp_store(pcb->pcb_vfpsaved, true);
408 	}
409 	critical_exit();
410 }
411 
412 void
413 fpu_kern_enter(struct thread *td, struct fpu_kern_ctx *ctx, u_int flags)
414 {
415 	struct pcb *pcb;
416 
417 	pcb = td->td_pcb;
418 	KASSERT((flags & FPU_KERN_NOCTX) != 0 || ctx != NULL,
419 	    ("ctx is required when !FPU_KERN_NOCTX"));
420 	KASSERT(ctx == NULL || (ctx->flags & FPU_KERN_CTX_INUSE) == 0,
421 	    ("using inuse ctx"));
422 	KASSERT((pcb->pcb_fpflags & PCB_FP_NOSAVE) == 0,
423 	    ("recursive fpu_kern_enter while in PCB_FP_NOSAVE state"));
424 
425 	if ((flags & FPU_KERN_NOCTX) != 0) {
426 		critical_enter();
427 		if (curthread == PCPU_GET(fpcurthread)) {
428 			vfp_save_state(curthread, pcb);
429 		}
430 		PCPU_SET(fpcurthread, NULL);
431 
432 		vfp_enable();
433 		pcb->pcb_fpflags |= PCB_FP_KERN | PCB_FP_NOSAVE |
434 		    PCB_FP_STARTED;
435 		return;
436 	}
437 
438 	if ((flags & FPU_KERN_KTHR) != 0 && is_fpu_kern_thread(0)) {
439 		ctx->flags = FPU_KERN_CTX_DUMMY | FPU_KERN_CTX_INUSE;
440 		return;
441 	}
442 	/*
443 	 * Check either we are already using the VFP in the kernel, or
444 	 * the the saved state points to the default user space.
445 	 */
446 	KASSERT((pcb->pcb_fpflags & PCB_FP_KERN) != 0 ||
447 	    pcb->pcb_vfpsaved == &pcb->pcb_vfpstate,
448 	    ("Mangled pcb_vfpsaved %x %p %p", pcb->pcb_fpflags, pcb->pcb_vfpsaved,
449 	     &pcb->pcb_vfpstate));
450 	ctx->flags = FPU_KERN_CTX_INUSE;
451 	vfp_save_state(curthread, pcb);
452 	ctx->prev = pcb->pcb_vfpsaved;
453 	pcb->pcb_vfpsaved = &ctx->state;
454 	pcb->pcb_fpflags |= PCB_FP_KERN;
455 	pcb->pcb_fpflags &= ~PCB_FP_STARTED;
456 
457 	return;
458 }
459 
460 int
461 fpu_kern_leave(struct thread *td, struct fpu_kern_ctx *ctx)
462 {
463 	struct pcb *pcb;
464 
465 	pcb = td->td_pcb;
466 
467 	if ((pcb->pcb_fpflags & PCB_FP_NOSAVE) != 0) {
468 		KASSERT(ctx == NULL, ("non-null ctx after FPU_KERN_NOCTX"));
469 		KASSERT(PCPU_GET(fpcurthread) == NULL,
470 		    ("non-NULL fpcurthread for PCB_FP_NOSAVE"));
471 		CRITICAL_ASSERT(td);
472 
473 		vfp_disable();
474 		pcb->pcb_fpflags &= ~(PCB_FP_NOSAVE | PCB_FP_STARTED);
475 		critical_exit();
476 	} else {
477 		KASSERT((ctx->flags & FPU_KERN_CTX_INUSE) != 0,
478 		    ("FPU context not inuse"));
479 		ctx->flags &= ~FPU_KERN_CTX_INUSE;
480 
481 		if (is_fpu_kern_thread(0) &&
482 		    (ctx->flags & FPU_KERN_CTX_DUMMY) != 0)
483 			return (0);
484 		KASSERT((ctx->flags & FPU_KERN_CTX_DUMMY) == 0, ("dummy ctx"));
485 		critical_enter();
486 		vfp_discard(td);
487 		critical_exit();
488 		pcb->pcb_fpflags &= ~PCB_FP_STARTED;
489 		pcb->pcb_vfpsaved = ctx->prev;
490 	}
491 
492 	if (pcb->pcb_vfpsaved == &pcb->pcb_vfpstate) {
493 		pcb->pcb_fpflags &= ~PCB_FP_KERN;
494 	} else {
495 		KASSERT((pcb->pcb_fpflags & PCB_FP_KERN) != 0,
496 		    ("unpaired fpu_kern_leave"));
497 	}
498 
499 	return (0);
500 }
501 
502 int
503 fpu_kern_thread(u_int flags __unused)
504 {
505 	struct pcb *pcb = curthread->td_pcb;
506 
507 	KASSERT((curthread->td_pflags & TDP_KTHREAD) != 0,
508 	    ("Only kthread may use fpu_kern_thread"));
509 	KASSERT(pcb->pcb_vfpsaved == &pcb->pcb_vfpstate,
510 	    ("Mangled pcb_vfpsaved"));
511 	KASSERT((pcb->pcb_fpflags & PCB_FP_KERN) == 0,
512 	    ("Thread already setup for the VFP"));
513 	pcb->pcb_fpflags |= PCB_FP_KERN;
514 	return (0);
515 }
516 
517 int
518 is_fpu_kern_thread(u_int flags __unused)
519 {
520 	struct pcb *curpcb;
521 
522 	if ((curthread->td_pflags & TDP_KTHREAD) == 0)
523 		return (0);
524 	curpcb = curthread->td_pcb;
525 	return ((curpcb->pcb_fpflags & PCB_FP_KERN) != 0);
526 }
527 
528 #endif
529