1 /* 2 * Copyright (c) 1990 William Jolitz. 3 * Copyright (c) 1991 The Regents of the University of California. 4 * Copyright (c) 2006 The DragonFly Project. 5 * Copyright (c) 2006 Matthew Dillon. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * from: @(#)npx.c 7.2 (Berkeley) 5/12/91 36 * $FreeBSD: src/sys/i386/isa/npx.c,v 1.80.2.3 2001/10/20 19:04:38 tegge Exp $ 37 */ 38 39 #include "opt_cpu.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/bus.h> 44 #include <sys/kernel.h> 45 #include <sys/malloc.h> 46 #include <sys/module.h> 47 #include <sys/sysctl.h> 48 #include <sys/proc.h> 49 #include <sys/rman.h> 50 #include <sys/signalvar.h> 51 52 #include <sys/thread2.h> 53 #include <sys/mplock2.h> 54 55 #include <machine/cputypes.h> 56 #include <machine/frame.h> 57 #include <machine/md_var.h> 58 #include <machine/pcb.h> 59 #include <machine/psl.h> 60 #include <machine/specialreg.h> 61 #include <machine/segments.h> 62 #include <machine/globaldata.h> 63 64 #define fldcw(addr) __asm("fldcw %0" : : "m" (*(addr))) 65 #define fnclex() __asm("fnclex") 66 #define fninit() __asm("fninit") 67 #define fnop() __asm("fnop") 68 #define fnsave(addr) __asm __volatile("fnsave %0" : "=m" (*(addr))) 69 #define fnstcw(addr) __asm __volatile("fnstcw %0" : "=m" (*(addr))) 70 #define fnstsw(addr) __asm __volatile("fnstsw %0" : "=m" (*(addr))) 71 #define frstor(addr) __asm("frstor %0" : : "m" (*(addr))) 72 #define fxrstor(addr) __asm("fxrstor %0" : : "m" (*(addr))) 73 #define fxsave(addr) __asm __volatile("fxsave %0" : "=m" (*(addr))) 74 #ifndef CPU_DISABLE_AVX 75 #define xrstor(eax,edx,addr) __asm __volatile(".byte 0x0f,0xae,0x2f" : : "D" (addr), "a" (eax), "d" (edx)) 76 #define xsave(eax,edx,addr) __asm __volatile(".byte 0x0f,0xae,0x27" : : "D" (addr), "a" (eax), "d" (edx) : "memory") 77 #endif 78 #define start_emulating() __asm("smsw %%ax; orb %0,%%al; lmsw %%ax" \ 79 : : "n" (CR0_TS) : "ax") 80 #define stop_emulating() __asm("clts") 81 82 typedef u_char bool_t; 83 static void fpu_clean_state(void); 84 #define ldmxcsr(csr) __asm __volatile("ldmxcsr %0" : : "m" (csr)) 85 86 static struct krate badfprate = { 1 }; 87 88 static void fpusave (union savefpu *); 89 static void fpurstor (union savefpu *); 90 91 uint32_t npx_mxcsr_mask = 0xFFBF; /* this is the default */ 92 93 static int npx_fpu_heuristic = 32; 94 SYSCTL_INT(_machdep, OID_AUTO, npx_fpu_heuristic, CTLFLAG_RW, 95 &npx_fpu_heuristic, 0, "FPU active restore 0=never 1=always N=after-N"); 96 97 98 /* 99 * Probe the npx_mxcsr_mask as described in the intel document 100 * "Intel processor identification and the CPUID instruction" Section 7 101 * "Denormals are Zero". 102 * Note that for fxsave to work reliably, the os support bit for 103 * FXSAVE/FXRESTORE operations in CR4 has to be set as per 104 * Intel 64 and IA-32 Architectures Developer's Manual: Vol. 1, 105 * 10.5.1.2. 106 */ 107 void npxprobemask(void) 108 { 109 /*64-Byte alignment required for xsave*/ 110 static union savefpu dummy __aligned(64); 111 112 crit_enter(); 113 stop_emulating(); 114 load_cr4(rcr4() | CR4_FXSR); 115 fxsave(&dummy); 116 npx_mxcsr_mask = ((uint32_t *)&dummy)[7]; 117 start_emulating(); 118 crit_exit(); 119 } 120 121 /* 122 * Initialize the floating point unit. 123 */ 124 void npxinit(void) 125 { 126 /*64-Byte alignment required for xsave*/ 127 static union savefpu dummy __aligned(64); 128 u_short control = __INITIAL_FPUCW__; 129 u_int mxcsr = __INITIAL_MXCSR__; 130 131 /* 132 * fninit has the same h/w bugs as fnsave. Use the detoxified 133 * fnsave to throw away any junk in the fpu. npxsave() initializes 134 * the fpu and sets npxthread = NULL as important side effects. 135 */ 136 npxsave(&dummy); 137 crit_enter(); 138 stop_emulating(); 139 fldcw(&control); 140 ldmxcsr(mxcsr); 141 fpusave(curthread->td_savefpu); 142 mdcpu->gd_npxthread = NULL; 143 start_emulating(); 144 crit_exit(); 145 } 146 147 /* 148 * Free coprocessor (if we have it). 149 */ 150 void 151 npxexit(void) 152 { 153 if (curthread == mdcpu->gd_npxthread) 154 npxsave(curthread->td_savefpu); 155 } 156 157 #if 0 158 /* 159 * The following mechanism is used to ensure that the FPE_... value 160 * that is passed as a trapcode to the signal handler of the user 161 * process does not have more than one bit set. 162 * 163 * Multiple bits may be set if the user process modifies the control 164 * word while a status word bit is already set. While this is a sign 165 * of bad coding, we have no choise than to narrow them down to one 166 * bit, since we must not send a trapcode that is not exactly one of 167 * the FPE_ macros. 168 * 169 * The mechanism has a static table with 127 entries. Each combination 170 * of the 7 FPU status word exception bits directly translates to a 171 * position in this table, where a single FPE_... value is stored. 172 * This FPE_... value stored there is considered the "most important" 173 * of the exception bits and will be sent as the signal code. The 174 * precedence of the bits is based upon Intel Document "Numerical 175 * Applications", Chapter "Special Computational Situations". 176 * 177 * The macro to choose one of these values does these steps: 1) Throw 178 * away status word bits that cannot be masked. 2) Throw away the bits 179 * currently masked in the control word, assuming the user isn't 180 * interested in them anymore. 3) Reinsert status word bit 7 (stack 181 * fault) if it is set, which cannot be masked but must be presered. 182 * 4) Use the remaining bits to point into the trapcode table. 183 * 184 * The 6 maskable bits in order of their preference, as stated in the 185 * above referenced Intel manual: 186 * 1 Invalid operation (FP_X_INV) 187 * 1a Stack underflow 188 * 1b Stack overflow 189 * 1c Operand of unsupported format 190 * 1d SNaN operand. 191 * 2 QNaN operand (not an exception, irrelavant here) 192 * 3 Any other invalid-operation not mentioned above or zero divide 193 * (FP_X_INV, FP_X_DZ) 194 * 4 Denormal operand (FP_X_DNML) 195 * 5 Numeric over/underflow (FP_X_OFL, FP_X_UFL) 196 * 6 Inexact result (FP_X_IMP) 197 */ 198 static char fpetable[128] = { 199 0, 200 FPE_FLTINV, /* 1 - INV */ 201 FPE_FLTUND, /* 2 - DNML */ 202 FPE_FLTINV, /* 3 - INV | DNML */ 203 FPE_FLTDIV, /* 4 - DZ */ 204 FPE_FLTINV, /* 5 - INV | DZ */ 205 FPE_FLTDIV, /* 6 - DNML | DZ */ 206 FPE_FLTINV, /* 7 - INV | DNML | DZ */ 207 FPE_FLTOVF, /* 8 - OFL */ 208 FPE_FLTINV, /* 9 - INV | OFL */ 209 FPE_FLTUND, /* A - DNML | OFL */ 210 FPE_FLTINV, /* B - INV | DNML | OFL */ 211 FPE_FLTDIV, /* C - DZ | OFL */ 212 FPE_FLTINV, /* D - INV | DZ | OFL */ 213 FPE_FLTDIV, /* E - DNML | DZ | OFL */ 214 FPE_FLTINV, /* F - INV | DNML | DZ | OFL */ 215 FPE_FLTUND, /* 10 - UFL */ 216 FPE_FLTINV, /* 11 - INV | UFL */ 217 FPE_FLTUND, /* 12 - DNML | UFL */ 218 FPE_FLTINV, /* 13 - INV | DNML | UFL */ 219 FPE_FLTDIV, /* 14 - DZ | UFL */ 220 FPE_FLTINV, /* 15 - INV | DZ | UFL */ 221 FPE_FLTDIV, /* 16 - DNML | DZ | UFL */ 222 FPE_FLTINV, /* 17 - INV | DNML | DZ | UFL */ 223 FPE_FLTOVF, /* 18 - OFL | UFL */ 224 FPE_FLTINV, /* 19 - INV | OFL | UFL */ 225 FPE_FLTUND, /* 1A - DNML | OFL | UFL */ 226 FPE_FLTINV, /* 1B - INV | DNML | OFL | UFL */ 227 FPE_FLTDIV, /* 1C - DZ | OFL | UFL */ 228 FPE_FLTINV, /* 1D - INV | DZ | OFL | UFL */ 229 FPE_FLTDIV, /* 1E - DNML | DZ | OFL | UFL */ 230 FPE_FLTINV, /* 1F - INV | DNML | DZ | OFL | UFL */ 231 FPE_FLTRES, /* 20 - IMP */ 232 FPE_FLTINV, /* 21 - INV | IMP */ 233 FPE_FLTUND, /* 22 - DNML | IMP */ 234 FPE_FLTINV, /* 23 - INV | DNML | IMP */ 235 FPE_FLTDIV, /* 24 - DZ | IMP */ 236 FPE_FLTINV, /* 25 - INV | DZ | IMP */ 237 FPE_FLTDIV, /* 26 - DNML | DZ | IMP */ 238 FPE_FLTINV, /* 27 - INV | DNML | DZ | IMP */ 239 FPE_FLTOVF, /* 28 - OFL | IMP */ 240 FPE_FLTINV, /* 29 - INV | OFL | IMP */ 241 FPE_FLTUND, /* 2A - DNML | OFL | IMP */ 242 FPE_FLTINV, /* 2B - INV | DNML | OFL | IMP */ 243 FPE_FLTDIV, /* 2C - DZ | OFL | IMP */ 244 FPE_FLTINV, /* 2D - INV | DZ | OFL | IMP */ 245 FPE_FLTDIV, /* 2E - DNML | DZ | OFL | IMP */ 246 FPE_FLTINV, /* 2F - INV | DNML | DZ | OFL | IMP */ 247 FPE_FLTUND, /* 30 - UFL | IMP */ 248 FPE_FLTINV, /* 31 - INV | UFL | IMP */ 249 FPE_FLTUND, /* 32 - DNML | UFL | IMP */ 250 FPE_FLTINV, /* 33 - INV | DNML | UFL | IMP */ 251 FPE_FLTDIV, /* 34 - DZ | UFL | IMP */ 252 FPE_FLTINV, /* 35 - INV | DZ | UFL | IMP */ 253 FPE_FLTDIV, /* 36 - DNML | DZ | UFL | IMP */ 254 FPE_FLTINV, /* 37 - INV | DNML | DZ | UFL | IMP */ 255 FPE_FLTOVF, /* 38 - OFL | UFL | IMP */ 256 FPE_FLTINV, /* 39 - INV | OFL | UFL | IMP */ 257 FPE_FLTUND, /* 3A - DNML | OFL | UFL | IMP */ 258 FPE_FLTINV, /* 3B - INV | DNML | OFL | UFL | IMP */ 259 FPE_FLTDIV, /* 3C - DZ | OFL | UFL | IMP */ 260 FPE_FLTINV, /* 3D - INV | DZ | OFL | UFL | IMP */ 261 FPE_FLTDIV, /* 3E - DNML | DZ | OFL | UFL | IMP */ 262 FPE_FLTINV, /* 3F - INV | DNML | DZ | OFL | UFL | IMP */ 263 FPE_FLTSUB, /* 40 - STK */ 264 FPE_FLTSUB, /* 41 - INV | STK */ 265 FPE_FLTUND, /* 42 - DNML | STK */ 266 FPE_FLTSUB, /* 43 - INV | DNML | STK */ 267 FPE_FLTDIV, /* 44 - DZ | STK */ 268 FPE_FLTSUB, /* 45 - INV | DZ | STK */ 269 FPE_FLTDIV, /* 46 - DNML | DZ | STK */ 270 FPE_FLTSUB, /* 47 - INV | DNML | DZ | STK */ 271 FPE_FLTOVF, /* 48 - OFL | STK */ 272 FPE_FLTSUB, /* 49 - INV | OFL | STK */ 273 FPE_FLTUND, /* 4A - DNML | OFL | STK */ 274 FPE_FLTSUB, /* 4B - INV | DNML | OFL | STK */ 275 FPE_FLTDIV, /* 4C - DZ | OFL | STK */ 276 FPE_FLTSUB, /* 4D - INV | DZ | OFL | STK */ 277 FPE_FLTDIV, /* 4E - DNML | DZ | OFL | STK */ 278 FPE_FLTSUB, /* 4F - INV | DNML | DZ | OFL | STK */ 279 FPE_FLTUND, /* 50 - UFL | STK */ 280 FPE_FLTSUB, /* 51 - INV | UFL | STK */ 281 FPE_FLTUND, /* 52 - DNML | UFL | STK */ 282 FPE_FLTSUB, /* 53 - INV | DNML | UFL | STK */ 283 FPE_FLTDIV, /* 54 - DZ | UFL | STK */ 284 FPE_FLTSUB, /* 55 - INV | DZ | UFL | STK */ 285 FPE_FLTDIV, /* 56 - DNML | DZ | UFL | STK */ 286 FPE_FLTSUB, /* 57 - INV | DNML | DZ | UFL | STK */ 287 FPE_FLTOVF, /* 58 - OFL | UFL | STK */ 288 FPE_FLTSUB, /* 59 - INV | OFL | UFL | STK */ 289 FPE_FLTUND, /* 5A - DNML | OFL | UFL | STK */ 290 FPE_FLTSUB, /* 5B - INV | DNML | OFL | UFL | STK */ 291 FPE_FLTDIV, /* 5C - DZ | OFL | UFL | STK */ 292 FPE_FLTSUB, /* 5D - INV | DZ | OFL | UFL | STK */ 293 FPE_FLTDIV, /* 5E - DNML | DZ | OFL | UFL | STK */ 294 FPE_FLTSUB, /* 5F - INV | DNML | DZ | OFL | UFL | STK */ 295 FPE_FLTRES, /* 60 - IMP | STK */ 296 FPE_FLTSUB, /* 61 - INV | IMP | STK */ 297 FPE_FLTUND, /* 62 - DNML | IMP | STK */ 298 FPE_FLTSUB, /* 63 - INV | DNML | IMP | STK */ 299 FPE_FLTDIV, /* 64 - DZ | IMP | STK */ 300 FPE_FLTSUB, /* 65 - INV | DZ | IMP | STK */ 301 FPE_FLTDIV, /* 66 - DNML | DZ | IMP | STK */ 302 FPE_FLTSUB, /* 67 - INV | DNML | DZ | IMP | STK */ 303 FPE_FLTOVF, /* 68 - OFL | IMP | STK */ 304 FPE_FLTSUB, /* 69 - INV | OFL | IMP | STK */ 305 FPE_FLTUND, /* 6A - DNML | OFL | IMP | STK */ 306 FPE_FLTSUB, /* 6B - INV | DNML | OFL | IMP | STK */ 307 FPE_FLTDIV, /* 6C - DZ | OFL | IMP | STK */ 308 FPE_FLTSUB, /* 6D - INV | DZ | OFL | IMP | STK */ 309 FPE_FLTDIV, /* 6E - DNML | DZ | OFL | IMP | STK */ 310 FPE_FLTSUB, /* 6F - INV | DNML | DZ | OFL | IMP | STK */ 311 FPE_FLTUND, /* 70 - UFL | IMP | STK */ 312 FPE_FLTSUB, /* 71 - INV | UFL | IMP | STK */ 313 FPE_FLTUND, /* 72 - DNML | UFL | IMP | STK */ 314 FPE_FLTSUB, /* 73 - INV | DNML | UFL | IMP | STK */ 315 FPE_FLTDIV, /* 74 - DZ | UFL | IMP | STK */ 316 FPE_FLTSUB, /* 75 - INV | DZ | UFL | IMP | STK */ 317 FPE_FLTDIV, /* 76 - DNML | DZ | UFL | IMP | STK */ 318 FPE_FLTSUB, /* 77 - INV | DNML | DZ | UFL | IMP | STK */ 319 FPE_FLTOVF, /* 78 - OFL | UFL | IMP | STK */ 320 FPE_FLTSUB, /* 79 - INV | OFL | UFL | IMP | STK */ 321 FPE_FLTUND, /* 7A - DNML | OFL | UFL | IMP | STK */ 322 FPE_FLTSUB, /* 7B - INV | DNML | OFL | UFL | IMP | STK */ 323 FPE_FLTDIV, /* 7C - DZ | OFL | UFL | IMP | STK */ 324 FPE_FLTSUB, /* 7D - INV | DZ | OFL | UFL | IMP | STK */ 325 FPE_FLTDIV, /* 7E - DNML | DZ | OFL | UFL | IMP | STK */ 326 FPE_FLTSUB, /* 7F - INV | DNML | DZ | OFL | UFL | IMP | STK */ 327 }; 328 329 #endif 330 331 /* 332 * Implement the device not available (DNA) exception. gd_npxthread had 333 * better be NULL. Restore the current thread's FP state and set gd_npxthread 334 * to curthread. 335 * 336 * Interrupts are enabled and preemption can occur. Enter a critical 337 * section to stabilize the FP state. 338 */ 339 int 340 npxdna(void) 341 { 342 struct mdglobaldata *md = mdcpu; 343 thread_t td; 344 int didinit = 0; 345 346 td = md->mi.gd_curthread; 347 348 /* 349 * npxthread is almost always NULL. When it isn't NULL it can 350 * only be exactly equal to 'td'. This case occurs when the switch 351 * code pro-actively restores the FPU state due to the trap() code 352 * being interruptable (e.g. such as by an interrupt thread). 353 */ 354 if (__predict_false(md->gd_npxthread != NULL)) { 355 if (md->gd_npxthread == td) { 356 return 1; 357 } 358 kprintf("npxdna: npxthread = %p, curthread = %p\n", 359 md->gd_npxthread, td); 360 panic("npxdna"); 361 } 362 363 /* 364 * Setup the initial saved state if the thread has never before 365 * used the FP unit. This also occurs when a thread pushes a 366 * signal handler and uses FP in the handler. 367 */ 368 crit_enter(); 369 if ((td->td_flags & (TDF_USINGFP | TDF_KERNELFP)) == 0) { 370 td->td_flags |= TDF_USINGFP; 371 npxinit(); 372 didinit = 1; 373 } 374 375 /* 376 * Actively restore the fpu state after N npxdna faults instead of 377 * soaking the npxdna fault overhead on each switch. 378 */ 379 if (npx_fpu_heuristic && ++td->td_fpu_heur >= npx_fpu_heuristic) { 380 td->td_fpu_heur = npx_fpu_heuristic; 381 td->td_flags |= TDF_FPU_HEUR; 382 } 383 384 /* 385 * The setting of gd_npxthread and the call to fpurstor() must not 386 * be preempted by an interrupt thread or we will take an npxdna 387 * trap and potentially save our current fpstate (which is garbage) 388 * and then restore the garbage rather then the originally saved 389 * fpstate. 390 */ 391 stop_emulating(); 392 393 /* 394 * Record new context early in case frstor causes an IRQ13. 395 */ 396 md->gd_npxthread = td; 397 398 /* 399 * The following frstor may cause an IRQ13 when the state being 400 * restored has a pending error. The error will appear to have been 401 * triggered by the current (npx) user instruction even when that 402 * instruction is a no-wait instruction that should not trigger an 403 * error (e.g., fnclex). On at least one 486 system all of the 404 * no-wait instructions are broken the same as frstor, so our 405 * treatment does not amplify the breakage. On at least one 406 * 386/Cyrix 387 system, fnclex works correctly while frstor and 407 * fnsave are broken, so our treatment breaks fnclex if it is the 408 * first FPU instruction after a context switch. 409 */ 410 if ((td->td_savefpu->sv_xmm.sv_env.en_mxcsr & ~npx_mxcsr_mask) && 411 cpu_fxsr) { 412 krateprintf(&badfprate, 413 "%s: FXRSTR: illegal FP MXCSR %08x didinit = %d\n", 414 td->td_comm, td->td_savefpu->sv_xmm.sv_env.en_mxcsr, 415 didinit); 416 td->td_savefpu->sv_xmm.sv_env.en_mxcsr &= npx_mxcsr_mask; 417 lwpsignal(td->td_proc, td->td_lwp, SIGFPE); 418 } 419 fpurstor(td->td_savefpu); 420 crit_exit(); 421 422 return (1); 423 } 424 425 /* 426 * From cpu heavy restore (already in critical section, gd_npxthread is NULL), 427 * and TDF_USINGFP is already set. Actively restore the FPU state to avoid 428 * excessive npxdna traps. 429 */ 430 void 431 npxdna_quick(thread_t newtd) 432 { 433 stop_emulating(); 434 mdcpu->gd_npxthread = newtd; 435 if ((newtd->td_savefpu->sv_xmm.sv_env.en_mxcsr & ~npx_mxcsr_mask) && 436 cpu_fxsr) { 437 krateprintf(&badfprate, 438 "%s: FXRSTR: illegal FP MXCSR %08x\n", 439 newtd->td_comm, 440 newtd->td_savefpu->sv_xmm.sv_env.en_mxcsr); 441 newtd->td_savefpu->sv_xmm.sv_env.en_mxcsr &= npx_mxcsr_mask; 442 lwpsignal(newtd->td_proc, newtd->td_lwp, SIGFPE); 443 } 444 fpurstor(newtd->td_savefpu); 445 446 /* 447 * If npx_fpu_heuristic is larger than 1 we reset the heuristic 448 * after N switches and shift to probe mode. Any npxdna trap will 449 * retrigger active fpu state loading, then probe again after N 450 * switches. 451 * 452 * If npx_fpu_heuristic is 1 active mode is simply left on forever. 453 */ 454 if (npx_fpu_heuristic > 1 && --newtd->td_fpu_heur <= 0) { 455 newtd->td_fpu_heur = npx_fpu_heuristic - 1; 456 newtd->td_flags &= ~TDF_FPU_HEUR; 457 } 458 } 459 460 /* 461 * Wrapper for the fnsave instruction to handle h/w bugs. If there is an error 462 * pending, then fnsave generates a bogus IRQ13 on some systems. Force 463 * any IRQ13 to be handled immediately, and then ignore it. This routine is 464 * often called at splhigh so it must not use many system services. In 465 * particular, it's much easier to install a special handler than to 466 * guarantee that it's safe to use npxintr() and its supporting code. 467 * 468 * WARNING! This call is made during a switch and the MP lock will be 469 * setup for the new target thread rather then the current thread, so we 470 * cannot do anything here that depends on the *_mplock() functions as 471 * we may trip over their assertions. 472 * 473 * WARNING! When using fxsave we MUST fninit after saving the FP state. The 474 * kernel will always assume that the FP state is 'safe' (will not cause 475 * exceptions) for mmx/xmm use if npxthread is NULL. The kernel must still 476 * setup a custom save area before actually using the FP unit, but it will 477 * not bother calling fninit. This greatly improves kernel performance when 478 * it wishes to use the FP unit. 479 */ 480 void 481 npxsave(union savefpu *addr) 482 { 483 struct mdglobaldata *md; 484 485 md = mdcpu; 486 crit_enter(); 487 stop_emulating(); 488 fpusave(addr); 489 md->gd_npxthread = NULL; 490 fninit(); 491 fpurstor(&md->gd_zerofpu); /* security wipe */ 492 start_emulating(); 493 crit_exit(); 494 } 495 496 static void 497 fpusave(union savefpu *addr) 498 { 499 #ifndef CPU_DISABLE_AVX 500 if (cpu_xsave) 501 xsave(CPU_XFEATURE_X87 | CPU_XFEATURE_SSE | CPU_XFEATURE_YMM, 0, addr); 502 else 503 #endif 504 if (cpu_fxsr) 505 fxsave(addr); 506 else 507 fnsave(addr); 508 } 509 510 /* 511 * Save the FP state to the mcontext structure. 512 * 513 * WARNING: If you want to try to npxsave() directly to mctx->mc_fpregs, 514 * then it MUST be 16-byte aligned. Currently this is not guarenteed. 515 */ 516 void 517 npxpush(mcontext_t *mctx) 518 { 519 thread_t td = curthread; 520 521 KKASSERT((td->td_flags & TDF_KERNELFP) == 0); 522 523 if (td->td_flags & TDF_USINGFP) { 524 if (mdcpu->gd_npxthread == td) { 525 /* 526 * XXX Note: This is a bit inefficient if the signal 527 * handler uses floating point, extra faults will 528 * occur. 529 */ 530 mctx->mc_ownedfp = _MC_FPOWNED_FPU; 531 npxsave(td->td_savefpu); 532 } else { 533 mctx->mc_ownedfp = _MC_FPOWNED_PCB; 534 } 535 KKASSERT(sizeof(*td->td_savefpu) <= sizeof(mctx->mc_fpregs)); 536 bcopy(td->td_savefpu, mctx->mc_fpregs, sizeof(*td->td_savefpu)); 537 td->td_flags &= ~TDF_USINGFP; 538 #ifndef CPU_DISABLE_AVX 539 if (cpu_xsave) 540 mctx->mc_fpformat = _MC_FPFMT_YMM; 541 else 542 #endif 543 { 544 if (cpu_fxsr) 545 mctx->mc_fpformat = _MC_FPFMT_XMM; 546 else 547 mctx->mc_fpformat = _MC_FPFMT_387; 548 } 549 } else { 550 mctx->mc_ownedfp = _MC_FPOWNED_NONE; 551 mctx->mc_fpformat = _MC_FPFMT_NODEV; 552 } 553 } 554 555 /* 556 * Restore the FP state from the mcontext structure. 557 */ 558 void 559 npxpop(mcontext_t *mctx) 560 { 561 thread_t td = curthread; 562 563 switch(mctx->mc_ownedfp) { 564 case _MC_FPOWNED_NONE: 565 /* 566 * If the signal handler used the FP unit but the interrupted 567 * code did not, release the FP unit. Clear TDF_USINGFP will 568 * force the FP unit to reinit so the interrupted code sees 569 * a clean slate. 570 */ 571 if (td->td_flags & TDF_USINGFP) { 572 if (td == mdcpu->gd_npxthread) 573 npxsave(td->td_savefpu); 574 td->td_flags &= ~TDF_USINGFP; 575 } 576 break; 577 case _MC_FPOWNED_FPU: 578 case _MC_FPOWNED_PCB: 579 /* 580 * Clear ownership of the FP unit and restore our saved state. 581 * 582 * NOTE: The signal handler may have set-up some FP state and 583 * enabled the FP unit, so we have to restore no matter what. 584 * 585 * XXX: This is bit inefficient, if the code being returned 586 * to is actively using the FP this results in multiple 587 * kernel faults. 588 * 589 * WARNING: The saved state was exposed to userland and may 590 * have to be sanitized to avoid a GP fault in the kernel. 591 */ 592 if (td == mdcpu->gd_npxthread) 593 npxsave(td->td_savefpu); 594 KKASSERT(sizeof(*td->td_savefpu) <= sizeof(mctx->mc_fpregs)); 595 bcopy(mctx->mc_fpregs, td->td_savefpu, sizeof(*td->td_savefpu)); 596 if ((td->td_savefpu->sv_xmm.sv_env.en_mxcsr & ~npx_mxcsr_mask) && 597 cpu_fxsr) { 598 krateprintf(&badfprate, 599 "pid %d (%s) signal return from user: " 600 "illegal FP MXCSR %08x\n", 601 td->td_proc->p_pid, 602 td->td_proc->p_comm, 603 td->td_savefpu->sv_xmm.sv_env.en_mxcsr); 604 } 605 td->td_flags |= TDF_USINGFP; 606 break; 607 } 608 } 609 610 611 /* 612 * On AuthenticAMD processors, the fxrstor instruction does not restore 613 * the x87's stored last instruction pointer, last data pointer, and last 614 * opcode values, except in the rare case in which the exception summary 615 * (ES) bit in the x87 status word is set to 1. 616 * 617 * In order to avoid leaking this information across processes, we clean 618 * these values by performing a dummy load before executing fxrstor(). 619 */ 620 static double dummy_variable = 0.0; 621 static void 622 fpu_clean_state(void) 623 { 624 u_short status; 625 626 /* 627 * Clear the ES bit in the x87 status word if it is currently 628 * set, in order to avoid causing a fault in the upcoming load. 629 */ 630 fnstsw(&status); 631 if (status & 0x80) 632 fnclex(); 633 634 /* 635 * Load the dummy variable into the x87 stack. This mangles 636 * the x87 stack, but we don't care since we're about to call 637 * fxrstor() anyway. 638 */ 639 __asm __volatile("ffree %%st(7); flds %0" : : "m" (dummy_variable)); 640 } 641 642 static void 643 fpurstor(union savefpu *addr) 644 { 645 #ifndef CPU_DISABLE_AVX 646 if (cpu_xsave) 647 xrstor(CPU_XFEATURE_X87 | CPU_XFEATURE_SSE | CPU_XFEATURE_YMM, 0, addr); 648 else 649 #endif 650 if (cpu_fxsr) { 651 fpu_clean_state(); 652 fxrstor(addr); 653 } else { 654 frstor(addr); 655 } 656 } 657 658