1 /*- 2 * Copyright (c) 2015-2016 The FreeBSD Foundation 3 * All rights reserved. 4 * 5 * This software was developed by Andrew Turner under 6 * sponsorship from the FreeBSD Foundation. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #ifdef VFP 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/malloc.h> 38 #include <sys/pcpu.h> 39 #include <sys/proc.h> 40 41 #include <machine/armreg.h> 42 #include <machine/pcb.h> 43 #include <machine/vfp.h> 44 45 /* Sanity check we can store all the VFP registers */ 46 CTASSERT(sizeof(((struct pcb *)0)->pcb_fpustate.vfp_regs) == 16 * 32); 47 48 static MALLOC_DEFINE(M_FPUKERN_CTX, "fpukern_ctx", 49 "Kernel contexts for VFP state"); 50 51 struct fpu_kern_ctx { 52 struct vfpstate *prev; 53 #define FPU_KERN_CTX_DUMMY 0x01 /* avoided save for the kern thread */ 54 #define FPU_KERN_CTX_INUSE 0x02 55 uint32_t flags; 56 struct vfpstate state; 57 }; 58 59 static void 60 vfp_enable(void) 61 { 62 uint32_t cpacr; 63 64 cpacr = READ_SPECIALREG(cpacr_el1); 65 cpacr = (cpacr & ~CPACR_FPEN_MASK) | CPACR_FPEN_TRAP_NONE; 66 WRITE_SPECIALREG(cpacr_el1, cpacr); 67 isb(); 68 } 69 70 static void 71 vfp_disable(void) 72 { 73 uint32_t cpacr; 74 75 cpacr = READ_SPECIALREG(cpacr_el1); 76 cpacr = (cpacr & ~CPACR_FPEN_MASK) | CPACR_FPEN_TRAP_ALL1; 77 WRITE_SPECIALREG(cpacr_el1, cpacr); 78 isb(); 79 } 80 81 /* 82 * Called when the thread is dying or when discarding the kernel VFP state. 83 * If the thread was the last to use the VFP unit mark it as unused to tell 84 * the kernel the fp state is unowned. Ensure the VFP unit is off so we get 85 * an exception on the next access. 86 */ 87 void 88 vfp_discard(struct thread *td) 89 { 90 91 #ifdef INVARIANTS 92 if (td != NULL) 93 CRITICAL_ASSERT(td); 94 #endif 95 if (PCPU_GET(fpcurthread) == td) 96 PCPU_SET(fpcurthread, NULL); 97 98 vfp_disable(); 99 } 100 101 static void 102 vfp_store(struct vfpstate *state) 103 { 104 __int128_t *vfp_state; 105 uint64_t fpcr, fpsr; 106 107 vfp_state = state->vfp_regs; 108 __asm __volatile( 109 "mrs %0, fpcr \n" 110 "mrs %1, fpsr \n" 111 "stp q0, q1, [%2, #16 * 0]\n" 112 "stp q2, q3, [%2, #16 * 2]\n" 113 "stp q4, q5, [%2, #16 * 4]\n" 114 "stp q6, q7, [%2, #16 * 6]\n" 115 "stp q8, q9, [%2, #16 * 8]\n" 116 "stp q10, q11, [%2, #16 * 10]\n" 117 "stp q12, q13, [%2, #16 * 12]\n" 118 "stp q14, q15, [%2, #16 * 14]\n" 119 "stp q16, q17, [%2, #16 * 16]\n" 120 "stp q18, q19, [%2, #16 * 18]\n" 121 "stp q20, q21, [%2, #16 * 20]\n" 122 "stp q22, q23, [%2, #16 * 22]\n" 123 "stp q24, q25, [%2, #16 * 24]\n" 124 "stp q26, q27, [%2, #16 * 26]\n" 125 "stp q28, q29, [%2, #16 * 28]\n" 126 "stp q30, q31, [%2, #16 * 30]\n" 127 : "=&r"(fpcr), "=&r"(fpsr) : "r"(vfp_state)); 128 129 state->vfp_fpcr = fpcr; 130 state->vfp_fpsr = fpsr; 131 } 132 133 static void 134 vfp_restore(struct vfpstate *state) 135 { 136 __int128_t *vfp_state; 137 uint64_t fpcr, fpsr; 138 139 vfp_state = state->vfp_regs; 140 fpcr = state->vfp_fpcr; 141 fpsr = state->vfp_fpsr; 142 143 __asm __volatile( 144 "ldp q0, q1, [%2, #16 * 0]\n" 145 "ldp q2, q3, [%2, #16 * 2]\n" 146 "ldp q4, q5, [%2, #16 * 4]\n" 147 "ldp q6, q7, [%2, #16 * 6]\n" 148 "ldp q8, q9, [%2, #16 * 8]\n" 149 "ldp q10, q11, [%2, #16 * 10]\n" 150 "ldp q12, q13, [%2, #16 * 12]\n" 151 "ldp q14, q15, [%2, #16 * 14]\n" 152 "ldp q16, q17, [%2, #16 * 16]\n" 153 "ldp q18, q19, [%2, #16 * 18]\n" 154 "ldp q20, q21, [%2, #16 * 20]\n" 155 "ldp q22, q23, [%2, #16 * 22]\n" 156 "ldp q24, q25, [%2, #16 * 24]\n" 157 "ldp q26, q27, [%2, #16 * 26]\n" 158 "ldp q28, q29, [%2, #16 * 28]\n" 159 "ldp q30, q31, [%2, #16 * 30]\n" 160 "msr fpcr, %0 \n" 161 "msr fpsr, %1 \n" 162 : : "r"(fpcr), "r"(fpsr), "r"(vfp_state)); 163 } 164 165 void 166 vfp_save_state(struct thread *td, struct pcb *pcb) 167 { 168 uint32_t cpacr; 169 170 KASSERT(pcb != NULL, ("NULL vfp pcb")); 171 KASSERT(td == NULL || td->td_pcb == pcb, ("Invalid vfp pcb")); 172 173 /* 174 * savectx() will be called on panic with dumppcb as an argument, 175 * dumppcb doesn't have pcb_fpusaved set, so set it to save 176 * the VFP registers. 177 */ 178 if (pcb->pcb_fpusaved == NULL) 179 pcb->pcb_fpusaved = &pcb->pcb_fpustate; 180 181 if (td == NULL) 182 td = curthread; 183 184 critical_enter(); 185 /* 186 * Only store the registers if the VFP is enabled, 187 * i.e. return if we are trapping on FP access. 188 */ 189 cpacr = READ_SPECIALREG(cpacr_el1); 190 if ((cpacr & CPACR_FPEN_MASK) == CPACR_FPEN_TRAP_NONE) { 191 KASSERT(PCPU_GET(fpcurthread) == td, 192 ("Storing an invalid VFP state")); 193 194 vfp_store(pcb->pcb_fpusaved); 195 dsb(ish); 196 vfp_disable(); 197 } 198 critical_exit(); 199 } 200 201 void 202 vfp_restore_state(void) 203 { 204 struct pcb *curpcb; 205 u_int cpu; 206 207 critical_enter(); 208 209 cpu = PCPU_GET(cpuid); 210 curpcb = curthread->td_pcb; 211 curpcb->pcb_fpflags |= PCB_FP_STARTED; 212 213 vfp_enable(); 214 215 /* 216 * If the previous thread on this cpu to use the VFP was not the 217 * current thread, or the current thread last used it on a different 218 * cpu we need to restore the old state. 219 */ 220 if (PCPU_GET(fpcurthread) != curthread || cpu != curpcb->pcb_vfpcpu) { 221 222 vfp_restore(curthread->td_pcb->pcb_fpusaved); 223 PCPU_SET(fpcurthread, curthread); 224 curpcb->pcb_vfpcpu = cpu; 225 } 226 227 critical_exit(); 228 } 229 230 void 231 vfp_init(void) 232 { 233 uint64_t pfr; 234 235 /* Check if there is a vfp unit present */ 236 pfr = READ_SPECIALREG(id_aa64pfr0_el1); 237 if ((pfr & ID_AA64PFR0_FP_MASK) == ID_AA64PFR0_FP_NONE) 238 return; 239 240 /* Disable to be enabled when it's used */ 241 vfp_disable(); 242 } 243 244 SYSINIT(vfp, SI_SUB_CPU, SI_ORDER_ANY, vfp_init, NULL); 245 246 struct fpu_kern_ctx * 247 fpu_kern_alloc_ctx(u_int flags) 248 { 249 struct fpu_kern_ctx *res; 250 size_t sz; 251 252 sz = sizeof(struct fpu_kern_ctx); 253 res = malloc(sz, M_FPUKERN_CTX, ((flags & FPU_KERN_NOWAIT) ? 254 M_NOWAIT : M_WAITOK) | M_ZERO); 255 return (res); 256 } 257 258 void 259 fpu_kern_free_ctx(struct fpu_kern_ctx *ctx) 260 { 261 262 KASSERT((ctx->flags & FPU_KERN_CTX_INUSE) == 0, ("free'ing inuse ctx")); 263 /* XXXAndrew clear the memory ? */ 264 free(ctx, M_FPUKERN_CTX); 265 } 266 267 void 268 fpu_kern_enter(struct thread *td, struct fpu_kern_ctx *ctx, u_int flags) 269 { 270 struct pcb *pcb; 271 272 pcb = td->td_pcb; 273 KASSERT((flags & FPU_KERN_NOCTX) != 0 || ctx != NULL, 274 ("ctx is required when !FPU_KERN_NOCTX")); 275 KASSERT(ctx == NULL || (ctx->flags & FPU_KERN_CTX_INUSE) == 0, 276 ("using inuse ctx")); 277 KASSERT((pcb->pcb_fpflags & PCB_FP_NOSAVE) == 0, 278 ("recursive fpu_kern_enter while in PCB_FP_NOSAVE state")); 279 280 if ((flags & FPU_KERN_NOCTX) != 0) { 281 critical_enter(); 282 if (curthread == PCPU_GET(fpcurthread)) { 283 vfp_save_state(curthread, pcb); 284 } 285 PCPU_SET(fpcurthread, NULL); 286 287 vfp_enable(); 288 pcb->pcb_fpflags |= PCB_FP_KERN | PCB_FP_NOSAVE | 289 PCB_FP_STARTED; 290 return; 291 } 292 293 if ((flags & FPU_KERN_KTHR) != 0 && is_fpu_kern_thread(0)) { 294 ctx->flags = FPU_KERN_CTX_DUMMY | FPU_KERN_CTX_INUSE; 295 return; 296 } 297 /* 298 * Check either we are already using the VFP in the kernel, or 299 * the the saved state points to the default user space. 300 */ 301 KASSERT((pcb->pcb_fpflags & PCB_FP_KERN) != 0 || 302 pcb->pcb_fpusaved == &pcb->pcb_fpustate, 303 ("Mangled pcb_fpusaved %x %p %p", pcb->pcb_fpflags, pcb->pcb_fpusaved, &pcb->pcb_fpustate)); 304 ctx->flags = FPU_KERN_CTX_INUSE; 305 vfp_save_state(curthread, pcb); 306 ctx->prev = pcb->pcb_fpusaved; 307 pcb->pcb_fpusaved = &ctx->state; 308 pcb->pcb_fpflags |= PCB_FP_KERN; 309 pcb->pcb_fpflags &= ~PCB_FP_STARTED; 310 311 return; 312 } 313 314 int 315 fpu_kern_leave(struct thread *td, struct fpu_kern_ctx *ctx) 316 { 317 struct pcb *pcb; 318 319 pcb = td->td_pcb; 320 321 if ((pcb->pcb_fpflags & PCB_FP_NOSAVE) != 0) { 322 KASSERT(ctx == NULL, ("non-null ctx after FPU_KERN_NOCTX")); 323 KASSERT(PCPU_GET(fpcurthread) == NULL, 324 ("non-NULL fpcurthread for PCB_FP_NOSAVE")); 325 CRITICAL_ASSERT(td); 326 327 vfp_disable(); 328 pcb->pcb_fpflags &= ~(PCB_FP_NOSAVE | PCB_FP_STARTED); 329 critical_exit(); 330 } else { 331 KASSERT((ctx->flags & FPU_KERN_CTX_INUSE) != 0, 332 ("FPU context not inuse")); 333 ctx->flags &= ~FPU_KERN_CTX_INUSE; 334 335 if (is_fpu_kern_thread(0) && 336 (ctx->flags & FPU_KERN_CTX_DUMMY) != 0) 337 return (0); 338 KASSERT((ctx->flags & FPU_KERN_CTX_DUMMY) == 0, ("dummy ctx")); 339 critical_enter(); 340 vfp_discard(td); 341 critical_exit(); 342 pcb->pcb_fpflags &= ~PCB_FP_STARTED; 343 pcb->pcb_fpusaved = ctx->prev; 344 } 345 346 if (pcb->pcb_fpusaved == &pcb->pcb_fpustate) { 347 pcb->pcb_fpflags &= ~PCB_FP_KERN; 348 } else { 349 KASSERT((pcb->pcb_fpflags & PCB_FP_KERN) != 0, 350 ("unpaired fpu_kern_leave")); 351 } 352 353 return (0); 354 } 355 356 int 357 fpu_kern_thread(u_int flags) 358 { 359 struct pcb *pcb = curthread->td_pcb; 360 361 KASSERT((curthread->td_pflags & TDP_KTHREAD) != 0, 362 ("Only kthread may use fpu_kern_thread")); 363 KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate, 364 ("Mangled pcb_fpusaved")); 365 KASSERT((pcb->pcb_fpflags & PCB_FP_KERN) == 0, 366 ("Thread already setup for the VFP")); 367 pcb->pcb_fpflags |= PCB_FP_KERN; 368 return (0); 369 } 370 371 int 372 is_fpu_kern_thread(u_int flags) 373 { 374 struct pcb *curpcb; 375 376 if ((curthread->td_pflags & TDP_KTHREAD) == 0) 377 return (0); 378 curpcb = curthread->td_pcb; 379 return ((curpcb->pcb_fpflags & PCB_FP_KERN) != 0); 380 } 381 #endif 382