1 /* 2 * Copyright (c) 1990 William Jolitz. 3 * Copyright (c) 1991 The Regents of the University of California. 4 * Copyright (c) 2006 The DragonFly Project. 5 * Copyright (c) 2006 Matthew Dillon. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * from: @(#)npx.c 7.2 (Berkeley) 5/12/91 36 * $FreeBSD: src/sys/i386/isa/npx.c,v 1.80.2.3 2001/10/20 19:04:38 tegge Exp $ 37 */ 38 39 #include "opt_cpu.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/bus.h> 44 #include <sys/kernel.h> 45 #include <sys/malloc.h> 46 #include <sys/module.h> 47 #include <sys/sysctl.h> 48 #include <sys/proc.h> 49 #include <sys/rman.h> 50 #include <sys/signalvar.h> 51 52 #include <sys/thread2.h> 53 #include <sys/mplock2.h> 54 55 #include <machine/cputypes.h> 56 #include <machine/frame.h> 57 #include <machine/md_var.h> 58 #include <machine/pcb.h> 59 #include <machine/psl.h> 60 #include <machine/specialreg.h> 61 #include <machine/segments.h> 62 #include <machine/globaldata.h> 63 64 #define fldcw(addr) __asm("fldcw %0" : : "m" (*(addr))) 65 #define fnclex() __asm("fnclex") 66 #define fninit() __asm("fninit") 67 #define fnop() __asm("fnop") 68 #define fnsave(addr) __asm __volatile("fnsave %0" : "=m" (*(addr))) 69 #define fnstcw(addr) __asm __volatile("fnstcw %0" : "=m" (*(addr))) 70 #define fnstsw(addr) __asm __volatile("fnstsw %0" : "=m" (*(addr))) 71 #define frstor(addr) __asm("frstor %0" : : "m" (*(addr))) 72 #define fxrstor(addr) __asm("fxrstor %0" : : "m" (*(addr))) 73 #define fxsave(addr) __asm __volatile("fxsave %0" : "=m" (*(addr))) 74 #ifndef CPU_DISABLE_AVX 75 #define xrstor(eax,edx,addr) __asm __volatile(".byte 0x0f,0xae,0x2f" : : "D" (addr), "a" (eax), "d" (edx)) 76 #define xsave(eax,edx,addr) __asm __volatile(".byte 0x0f,0xae,0x27" : : "D" (addr), "a" (eax), "d" (edx) : "memory") 77 #endif 78 #define start_emulating() __asm("smsw %%ax; orb %0,%%al; lmsw %%ax" \ 79 : : "n" (CR0_TS) : "ax") 80 #define stop_emulating() __asm("clts") 81 82 typedef u_char bool_t; 83 static void fpu_clean_state(void); 84 #define ldmxcsr(csr) __asm __volatile("ldmxcsr %0" : : "m" (csr)) 85 86 static struct krate badfprate = { 1 }; 87 88 static void fpusave (union savefpu *); 89 static void fpurstor (union savefpu *); 90 91 __read_mostly uint32_t npx_mxcsr_mask = 0xFFBF; /* this is the default */ 92 93 /* 94 * Probe the npx_mxcsr_mask as described in the intel document 95 * "Intel processor identification and the CPUID instruction" Section 7 96 * "Denormals are Zero". 97 * Note that for fxsave to work reliably, the os support bit for 98 * FXSAVE/FXRESTORE operations in CR4 has to be set as per 99 * Intel 64 and IA-32 Architectures Developer's Manual: Vol. 1, 100 * 10.5.1.2. 101 */ 102 void npxprobemask(void) 103 { 104 /*64-Byte alignment required for xsave*/ 105 static union savefpu dummy __aligned(64); 106 107 crit_enter(); 108 stop_emulating(); 109 load_cr4(rcr4() | CR4_FXSR); 110 fxsave(&dummy); 111 npx_mxcsr_mask = ((uint32_t *)&dummy)[7]; 112 start_emulating(); 113 crit_exit(); 114 } 115 116 /* 117 * Initialize the floating point unit. 118 */ 119 void npxinit(void) 120 { 121 /*64-Byte alignment required for xsave*/ 122 static union savefpu dummy __aligned(64); 123 u_short control = __INITIAL_FPUCW__; 124 u_int mxcsr = __INITIAL_MXCSR__; 125 126 /* 127 * fninit has the same h/w bugs as fnsave. Use the detoxified 128 * fnsave to throw away any junk in the fpu. npxsave() initializes 129 * the fpu and sets npxthread = NULL as important side effects. 130 */ 131 npxsave(&dummy); 132 crit_enter(); 133 stop_emulating(); 134 fldcw(&control); 135 ldmxcsr(mxcsr); 136 fpusave(curthread->td_savefpu); 137 mdcpu->gd_npxthread = NULL; 138 start_emulating(); 139 crit_exit(); 140 } 141 142 /* 143 * Free coprocessor (if we have it). 144 */ 145 void 146 npxexit(void) 147 { 148 if (curthread == mdcpu->gd_npxthread) 149 npxsave(curthread->td_savefpu); 150 } 151 152 #if 0 153 /* 154 * The following mechanism is used to ensure that the FPE_... value 155 * that is passed as a trapcode to the signal handler of the user 156 * process does not have more than one bit set. 157 * 158 * Multiple bits may be set if the user process modifies the control 159 * word while a status word bit is already set. While this is a sign 160 * of bad coding, we have no choise than to narrow them down to one 161 * bit, since we must not send a trapcode that is not exactly one of 162 * the FPE_ macros. 163 * 164 * The mechanism has a static table with 127 entries. Each combination 165 * of the 7 FPU status word exception bits directly translates to a 166 * position in this table, where a single FPE_... value is stored. 167 * This FPE_... value stored there is considered the "most important" 168 * of the exception bits and will be sent as the signal code. The 169 * precedence of the bits is based upon Intel Document "Numerical 170 * Applications", Chapter "Special Computational Situations". 171 * 172 * The macro to choose one of these values does these steps: 1) Throw 173 * away status word bits that cannot be masked. 2) Throw away the bits 174 * currently masked in the control word, assuming the user isn't 175 * interested in them anymore. 3) Reinsert status word bit 7 (stack 176 * fault) if it is set, which cannot be masked but must be presered. 177 * 4) Use the remaining bits to point into the trapcode table. 178 * 179 * The 6 maskable bits in order of their preference, as stated in the 180 * above referenced Intel manual: 181 * 1 Invalid operation (FP_X_INV) 182 * 1a Stack underflow 183 * 1b Stack overflow 184 * 1c Operand of unsupported format 185 * 1d SNaN operand. 186 * 2 QNaN operand (not an exception, irrelavant here) 187 * 3 Any other invalid-operation not mentioned above or zero divide 188 * (FP_X_INV, FP_X_DZ) 189 * 4 Denormal operand (FP_X_DNML) 190 * 5 Numeric over/underflow (FP_X_OFL, FP_X_UFL) 191 * 6 Inexact result (FP_X_IMP) 192 */ 193 static char fpetable[128] = { 194 0, 195 FPE_FLTINV, /* 1 - INV */ 196 FPE_FLTUND, /* 2 - DNML */ 197 FPE_FLTINV, /* 3 - INV | DNML */ 198 FPE_FLTDIV, /* 4 - DZ */ 199 FPE_FLTINV, /* 5 - INV | DZ */ 200 FPE_FLTDIV, /* 6 - DNML | DZ */ 201 FPE_FLTINV, /* 7 - INV | DNML | DZ */ 202 FPE_FLTOVF, /* 8 - OFL */ 203 FPE_FLTINV, /* 9 - INV | OFL */ 204 FPE_FLTUND, /* A - DNML | OFL */ 205 FPE_FLTINV, /* B - INV | DNML | OFL */ 206 FPE_FLTDIV, /* C - DZ | OFL */ 207 FPE_FLTINV, /* D - INV | DZ | OFL */ 208 FPE_FLTDIV, /* E - DNML | DZ | OFL */ 209 FPE_FLTINV, /* F - INV | DNML | DZ | OFL */ 210 FPE_FLTUND, /* 10 - UFL */ 211 FPE_FLTINV, /* 11 - INV | UFL */ 212 FPE_FLTUND, /* 12 - DNML | UFL */ 213 FPE_FLTINV, /* 13 - INV | DNML | UFL */ 214 FPE_FLTDIV, /* 14 - DZ | UFL */ 215 FPE_FLTINV, /* 15 - INV | DZ | UFL */ 216 FPE_FLTDIV, /* 16 - DNML | DZ | UFL */ 217 FPE_FLTINV, /* 17 - INV | DNML | DZ | UFL */ 218 FPE_FLTOVF, /* 18 - OFL | UFL */ 219 FPE_FLTINV, /* 19 - INV | OFL | UFL */ 220 FPE_FLTUND, /* 1A - DNML | OFL | UFL */ 221 FPE_FLTINV, /* 1B - INV | DNML | OFL | UFL */ 222 FPE_FLTDIV, /* 1C - DZ | OFL | UFL */ 223 FPE_FLTINV, /* 1D - INV | DZ | OFL | UFL */ 224 FPE_FLTDIV, /* 1E - DNML | DZ | OFL | UFL */ 225 FPE_FLTINV, /* 1F - INV | DNML | DZ | OFL | UFL */ 226 FPE_FLTRES, /* 20 - IMP */ 227 FPE_FLTINV, /* 21 - INV | IMP */ 228 FPE_FLTUND, /* 22 - DNML | IMP */ 229 FPE_FLTINV, /* 23 - INV | DNML | IMP */ 230 FPE_FLTDIV, /* 24 - DZ | IMP */ 231 FPE_FLTINV, /* 25 - INV | DZ | IMP */ 232 FPE_FLTDIV, /* 26 - DNML | DZ | IMP */ 233 FPE_FLTINV, /* 27 - INV | DNML | DZ | IMP */ 234 FPE_FLTOVF, /* 28 - OFL | IMP */ 235 FPE_FLTINV, /* 29 - INV | OFL | IMP */ 236 FPE_FLTUND, /* 2A - DNML | OFL | IMP */ 237 FPE_FLTINV, /* 2B - INV | DNML | OFL | IMP */ 238 FPE_FLTDIV, /* 2C - DZ | OFL | IMP */ 239 FPE_FLTINV, /* 2D - INV | DZ | OFL | IMP */ 240 FPE_FLTDIV, /* 2E - DNML | DZ | OFL | IMP */ 241 FPE_FLTINV, /* 2F - INV | DNML | DZ | OFL | IMP */ 242 FPE_FLTUND, /* 30 - UFL | IMP */ 243 FPE_FLTINV, /* 31 - INV | UFL | IMP */ 244 FPE_FLTUND, /* 32 - DNML | UFL | IMP */ 245 FPE_FLTINV, /* 33 - INV | DNML | UFL | IMP */ 246 FPE_FLTDIV, /* 34 - DZ | UFL | IMP */ 247 FPE_FLTINV, /* 35 - INV | DZ | UFL | IMP */ 248 FPE_FLTDIV, /* 36 - DNML | DZ | UFL | IMP */ 249 FPE_FLTINV, /* 37 - INV | DNML | DZ | UFL | IMP */ 250 FPE_FLTOVF, /* 38 - OFL | UFL | IMP */ 251 FPE_FLTINV, /* 39 - INV | OFL | UFL | IMP */ 252 FPE_FLTUND, /* 3A - DNML | OFL | UFL | IMP */ 253 FPE_FLTINV, /* 3B - INV | DNML | OFL | UFL | IMP */ 254 FPE_FLTDIV, /* 3C - DZ | OFL | UFL | IMP */ 255 FPE_FLTINV, /* 3D - INV | DZ | OFL | UFL | IMP */ 256 FPE_FLTDIV, /* 3E - DNML | DZ | OFL | UFL | IMP */ 257 FPE_FLTINV, /* 3F - INV | DNML | DZ | OFL | UFL | IMP */ 258 FPE_FLTSUB, /* 40 - STK */ 259 FPE_FLTSUB, /* 41 - INV | STK */ 260 FPE_FLTUND, /* 42 - DNML | STK */ 261 FPE_FLTSUB, /* 43 - INV | DNML | STK */ 262 FPE_FLTDIV, /* 44 - DZ | STK */ 263 FPE_FLTSUB, /* 45 - INV | DZ | STK */ 264 FPE_FLTDIV, /* 46 - DNML | DZ | STK */ 265 FPE_FLTSUB, /* 47 - INV | DNML | DZ | STK */ 266 FPE_FLTOVF, /* 48 - OFL | STK */ 267 FPE_FLTSUB, /* 49 - INV | OFL | STK */ 268 FPE_FLTUND, /* 4A - DNML | OFL | STK */ 269 FPE_FLTSUB, /* 4B - INV | DNML | OFL | STK */ 270 FPE_FLTDIV, /* 4C - DZ | OFL | STK */ 271 FPE_FLTSUB, /* 4D - INV | DZ | OFL | STK */ 272 FPE_FLTDIV, /* 4E - DNML | DZ | OFL | STK */ 273 FPE_FLTSUB, /* 4F - INV | DNML | DZ | OFL | STK */ 274 FPE_FLTUND, /* 50 - UFL | STK */ 275 FPE_FLTSUB, /* 51 - INV | UFL | STK */ 276 FPE_FLTUND, /* 52 - DNML | UFL | STK */ 277 FPE_FLTSUB, /* 53 - INV | DNML | UFL | STK */ 278 FPE_FLTDIV, /* 54 - DZ | UFL | STK */ 279 FPE_FLTSUB, /* 55 - INV | DZ | UFL | STK */ 280 FPE_FLTDIV, /* 56 - DNML | DZ | UFL | STK */ 281 FPE_FLTSUB, /* 57 - INV | DNML | DZ | UFL | STK */ 282 FPE_FLTOVF, /* 58 - OFL | UFL | STK */ 283 FPE_FLTSUB, /* 59 - INV | OFL | UFL | STK */ 284 FPE_FLTUND, /* 5A - DNML | OFL | UFL | STK */ 285 FPE_FLTSUB, /* 5B - INV | DNML | OFL | UFL | STK */ 286 FPE_FLTDIV, /* 5C - DZ | OFL | UFL | STK */ 287 FPE_FLTSUB, /* 5D - INV | DZ | OFL | UFL | STK */ 288 FPE_FLTDIV, /* 5E - DNML | DZ | OFL | UFL | STK */ 289 FPE_FLTSUB, /* 5F - INV | DNML | DZ | OFL | UFL | STK */ 290 FPE_FLTRES, /* 60 - IMP | STK */ 291 FPE_FLTSUB, /* 61 - INV | IMP | STK */ 292 FPE_FLTUND, /* 62 - DNML | IMP | STK */ 293 FPE_FLTSUB, /* 63 - INV | DNML | IMP | STK */ 294 FPE_FLTDIV, /* 64 - DZ | IMP | STK */ 295 FPE_FLTSUB, /* 65 - INV | DZ | IMP | STK */ 296 FPE_FLTDIV, /* 66 - DNML | DZ | IMP | STK */ 297 FPE_FLTSUB, /* 67 - INV | DNML | DZ | IMP | STK */ 298 FPE_FLTOVF, /* 68 - OFL | IMP | STK */ 299 FPE_FLTSUB, /* 69 - INV | OFL | IMP | STK */ 300 FPE_FLTUND, /* 6A - DNML | OFL | IMP | STK */ 301 FPE_FLTSUB, /* 6B - INV | DNML | OFL | IMP | STK */ 302 FPE_FLTDIV, /* 6C - DZ | OFL | IMP | STK */ 303 FPE_FLTSUB, /* 6D - INV | DZ | OFL | IMP | STK */ 304 FPE_FLTDIV, /* 6E - DNML | DZ | OFL | IMP | STK */ 305 FPE_FLTSUB, /* 6F - INV | DNML | DZ | OFL | IMP | STK */ 306 FPE_FLTUND, /* 70 - UFL | IMP | STK */ 307 FPE_FLTSUB, /* 71 - INV | UFL | IMP | STK */ 308 FPE_FLTUND, /* 72 - DNML | UFL | IMP | STK */ 309 FPE_FLTSUB, /* 73 - INV | DNML | UFL | IMP | STK */ 310 FPE_FLTDIV, /* 74 - DZ | UFL | IMP | STK */ 311 FPE_FLTSUB, /* 75 - INV | DZ | UFL | IMP | STK */ 312 FPE_FLTDIV, /* 76 - DNML | DZ | UFL | IMP | STK */ 313 FPE_FLTSUB, /* 77 - INV | DNML | DZ | UFL | IMP | STK */ 314 FPE_FLTOVF, /* 78 - OFL | UFL | IMP | STK */ 315 FPE_FLTSUB, /* 79 - INV | OFL | UFL | IMP | STK */ 316 FPE_FLTUND, /* 7A - DNML | OFL | UFL | IMP | STK */ 317 FPE_FLTSUB, /* 7B - INV | DNML | OFL | UFL | IMP | STK */ 318 FPE_FLTDIV, /* 7C - DZ | OFL | UFL | IMP | STK */ 319 FPE_FLTSUB, /* 7D - INV | DZ | OFL | UFL | IMP | STK */ 320 FPE_FLTDIV, /* 7E - DNML | DZ | OFL | UFL | IMP | STK */ 321 FPE_FLTSUB, /* 7F - INV | DNML | DZ | OFL | UFL | IMP | STK */ 322 }; 323 324 #endif 325 326 /* 327 * Implement the device not available (DNA) exception. gd_npxthread had 328 * better be NULL. Restore the current thread's FP state and set gd_npxthread 329 * to curthread. 330 * 331 * Interrupts are enabled and preemption can occur. Enter a critical 332 * section to stabilize the FP state. 333 */ 334 int 335 npxdna(void) 336 { 337 struct mdglobaldata *md = mdcpu; 338 thread_t td; 339 int didinit = 0; 340 341 td = md->mi.gd_curthread; 342 343 /* 344 * npxthread is almost always NULL. When it isn't NULL it can 345 * only be exactly equal to 'td'. This case occurs when the switch 346 * code pro-actively restores the FPU state due to the trap() code 347 * being interruptable (e.g. such as by an interrupt thread). 348 */ 349 if (__predict_false(md->gd_npxthread != NULL)) { 350 if (md->gd_npxthread == td) { 351 return 1; 352 } 353 kprintf("npxdna: npxthread = %p, curthread = %p\n", 354 md->gd_npxthread, td); 355 panic("npxdna"); 356 } 357 358 /* 359 * Setup the initial saved state if the thread has never before 360 * used the FP unit. This also occurs when a thread pushes a 361 * signal handler and uses FP in the handler. 362 */ 363 crit_enter(); 364 if ((td->td_flags & (TDF_USINGFP | TDF_KERNELFP)) == 0) { 365 td->td_flags |= TDF_USINGFP; 366 npxinit(); 367 didinit = 1; 368 } 369 370 /* 371 * The setting of gd_npxthread and the call to fpurstor() must not 372 * be preempted by an interrupt thread or we will take an npxdna 373 * trap and potentially save our current fpstate (which is garbage) 374 * and then restore the garbage rather then the originally saved 375 * fpstate. 376 */ 377 stop_emulating(); 378 379 /* 380 * Record new context early in case frstor causes an IRQ13. 381 */ 382 md->gd_npxthread = td; 383 384 /* 385 * The following frstor may cause an IRQ13 when the state being 386 * restored has a pending error. The error will appear to have been 387 * triggered by the current (npx) user instruction even when that 388 * instruction is a no-wait instruction that should not trigger an 389 * error (e.g., fnclex). On at least one 486 system all of the 390 * no-wait instructions are broken the same as frstor, so our 391 * treatment does not amplify the breakage. On at least one 392 * 386/Cyrix 387 system, fnclex works correctly while frstor and 393 * fnsave are broken, so our treatment breaks fnclex if it is the 394 * first FPU instruction after a context switch. 395 */ 396 if ((td->td_savefpu->sv_xmm.sv_env.en_mxcsr & ~npx_mxcsr_mask) && 397 cpu_fxsr) { 398 krateprintf(&badfprate, 399 "%s: FXRSTR: illegal FP MXCSR %08x didinit = %d\n", 400 td->td_comm, td->td_savefpu->sv_xmm.sv_env.en_mxcsr, 401 didinit); 402 td->td_savefpu->sv_xmm.sv_env.en_mxcsr &= npx_mxcsr_mask; 403 lwpsignal(td->td_proc, td->td_lwp, SIGFPE); 404 } 405 fpurstor(td->td_savefpu); 406 crit_exit(); 407 408 return (1); 409 } 410 411 /* 412 * From cpu heavy restore (already in critical section, gd_npxthread is NULL), 413 * and TDF_USINGFP is already set. Actively restore the FPU state to avoid 414 * excessive npxdna traps. 415 */ 416 void 417 npxdna_quick(thread_t newtd) 418 { 419 stop_emulating(); 420 mdcpu->gd_npxthread = newtd; 421 if ((newtd->td_savefpu->sv_xmm.sv_env.en_mxcsr & ~npx_mxcsr_mask) && 422 cpu_fxsr) { 423 krateprintf(&badfprate, 424 "%s: FXRSTR: illegal FP MXCSR %08x\n", 425 newtd->td_comm, 426 newtd->td_savefpu->sv_xmm.sv_env.en_mxcsr); 427 newtd->td_savefpu->sv_xmm.sv_env.en_mxcsr &= npx_mxcsr_mask; 428 lwpsignal(newtd->td_proc, newtd->td_lwp, SIGFPE); 429 } 430 fpurstor(newtd->td_savefpu); 431 } 432 433 /* 434 * Wrapper for the fnsave instruction to handle h/w bugs. If there is an error 435 * pending, then fnsave generates a bogus IRQ13 on some systems. Force 436 * any IRQ13 to be handled immediately, and then ignore it. This routine is 437 * often called at splhigh so it must not use many system services. In 438 * particular, it's much easier to install a special handler than to 439 * guarantee that it's safe to use npxintr() and its supporting code. 440 * 441 * WARNING! This call is made during a switch and the MP lock will be 442 * setup for the new target thread rather then the current thread, so we 443 * cannot do anything here that depends on the *_mplock() functions as 444 * we may trip over their assertions. 445 * 446 * WARNING! When using fxsave we MUST fninit after saving the FP state. The 447 * kernel will always assume that the FP state is 'safe' (will not cause 448 * exceptions) for mmx/xmm use if npxthread is NULL. The kernel must still 449 * setup a custom save area before actually using the FP unit, but it will 450 * not bother calling fninit. This greatly improves kernel performance when 451 * it wishes to use the FP unit. 452 */ 453 void 454 npxsave(union savefpu *addr) 455 { 456 struct mdglobaldata *md; 457 458 md = mdcpu; 459 crit_enter(); 460 stop_emulating(); 461 fpusave(addr); 462 md->gd_npxthread = NULL; 463 fninit(); 464 fpurstor(&md->gd_zerofpu); /* security wipe */ 465 start_emulating(); 466 crit_exit(); 467 } 468 469 static void 470 fpusave(union savefpu *addr) 471 { 472 #ifndef CPU_DISABLE_AVX 473 if (cpu_xsave) 474 xsave(CPU_XFEATURE_X87 | CPU_XFEATURE_SSE | CPU_XFEATURE_YMM, 0, addr); 475 else 476 #endif 477 if (cpu_fxsr) 478 fxsave(addr); 479 else 480 fnsave(addr); 481 } 482 483 /* 484 * Save the FP state to the mcontext structure. 485 * 486 * WARNING: If you want to try to npxsave() directly to mctx->mc_fpregs, 487 * then it MUST be 16-byte aligned. Currently this is not guarenteed. 488 */ 489 void 490 npxpush(mcontext_t *mctx) 491 { 492 thread_t td = curthread; 493 494 KKASSERT((td->td_flags & TDF_KERNELFP) == 0); 495 496 if (td->td_flags & TDF_USINGFP) { 497 if (mdcpu->gd_npxthread == td) { 498 /* 499 * XXX Note: This is a bit inefficient if the signal 500 * handler uses floating point, extra faults will 501 * occur. 502 */ 503 mctx->mc_ownedfp = _MC_FPOWNED_FPU; 504 npxsave(td->td_savefpu); 505 } else { 506 mctx->mc_ownedfp = _MC_FPOWNED_PCB; 507 } 508 KKASSERT(sizeof(*td->td_savefpu) <= sizeof(mctx->mc_fpregs)); 509 bcopy(td->td_savefpu, mctx->mc_fpregs, sizeof(*td->td_savefpu)); 510 td->td_flags &= ~TDF_USINGFP; 511 #ifndef CPU_DISABLE_AVX 512 if (cpu_xsave) 513 mctx->mc_fpformat = _MC_FPFMT_YMM; 514 else 515 #endif 516 { 517 if (cpu_fxsr) 518 mctx->mc_fpformat = _MC_FPFMT_XMM; 519 else 520 mctx->mc_fpformat = _MC_FPFMT_387; 521 } 522 } else { 523 mctx->mc_ownedfp = _MC_FPOWNED_NONE; 524 mctx->mc_fpformat = _MC_FPFMT_NODEV; 525 } 526 } 527 528 /* 529 * Restore the FP state from the mcontext structure. 530 */ 531 void 532 npxpop(mcontext_t *mctx) 533 { 534 thread_t td = curthread; 535 536 switch(mctx->mc_ownedfp) { 537 case _MC_FPOWNED_NONE: 538 /* 539 * If the signal handler used the FP unit but the interrupted 540 * code did not, release the FP unit. Clear TDF_USINGFP will 541 * force the FP unit to reinit so the interrupted code sees 542 * a clean slate. 543 */ 544 if (td->td_flags & TDF_USINGFP) { 545 if (td == mdcpu->gd_npxthread) 546 npxsave(td->td_savefpu); 547 td->td_flags &= ~TDF_USINGFP; 548 } 549 break; 550 case _MC_FPOWNED_FPU: 551 case _MC_FPOWNED_PCB: 552 /* 553 * Clear ownership of the FP unit and restore our saved state. 554 * 555 * NOTE: The signal handler may have set-up some FP state and 556 * enabled the FP unit, so we have to restore no matter what. 557 * 558 * XXX: This is bit inefficient, if the code being returned 559 * to is actively using the FP this results in multiple 560 * kernel faults. 561 * 562 * WARNING: The saved state was exposed to userland and may 563 * have to be sanitized to avoid a GP fault in the kernel. 564 */ 565 if (td == mdcpu->gd_npxthread) 566 npxsave(td->td_savefpu); 567 KKASSERT(sizeof(*td->td_savefpu) <= sizeof(mctx->mc_fpregs)); 568 bcopy(mctx->mc_fpregs, td->td_savefpu, sizeof(*td->td_savefpu)); 569 if ((td->td_savefpu->sv_xmm.sv_env.en_mxcsr & ~npx_mxcsr_mask) && 570 cpu_fxsr) { 571 krateprintf(&badfprate, 572 "pid %d (%s) signal return from user: " 573 "illegal FP MXCSR %08x\n", 574 td->td_proc->p_pid, 575 td->td_proc->p_comm, 576 td->td_savefpu->sv_xmm.sv_env.en_mxcsr); 577 } 578 td->td_flags |= TDF_USINGFP; 579 break; 580 } 581 } 582 583 584 /* 585 * On AuthenticAMD processors, the fxrstor instruction does not restore 586 * the x87's stored last instruction pointer, last data pointer, and last 587 * opcode values, except in the rare case in which the exception summary 588 * (ES) bit in the x87 status word is set to 1. 589 * 590 * In order to avoid leaking this information across processes, we clean 591 * these values by performing a dummy load before executing fxrstor(). 592 */ 593 static double dummy_variable = 0.0; 594 static void 595 fpu_clean_state(void) 596 { 597 u_short status; 598 599 /* 600 * Clear the ES bit in the x87 status word if it is currently 601 * set, in order to avoid causing a fault in the upcoming load. 602 */ 603 fnstsw(&status); 604 if (status & 0x80) 605 fnclex(); 606 607 /* 608 * Load the dummy variable into the x87 stack. This mangles 609 * the x87 stack, but we don't care since we're about to call 610 * fxrstor() anyway. 611 */ 612 __asm __volatile("ffree %%st(7); flds %0" : : "m" (dummy_variable)); 613 } 614 615 static void 616 fpurstor(union savefpu *addr) 617 { 618 #ifndef CPU_DISABLE_AVX 619 if (cpu_xsave) 620 xrstor(CPU_XFEATURE_X87 | CPU_XFEATURE_SSE | CPU_XFEATURE_YMM, 0, addr); 621 else 622 #endif 623 if (cpu_fxsr) { 624 fpu_clean_state(); 625 fxrstor(addr); 626 } else { 627 frstor(addr); 628 } 629 } 630 631