1 /* 2 * Copyright (c) 1990 William Jolitz. 3 * Copyright (c) 1991 The Regents of the University of California. 4 * Copyright (c) 2006 The DragonFly Project. 5 * Copyright (c) 2006 Matthew Dillon. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * from: @(#)npx.c 7.2 (Berkeley) 5/12/91 36 * $FreeBSD: src/sys/i386/isa/npx.c,v 1.80.2.3 2001/10/20 19:04:38 tegge Exp $ 37 */ 38 39 #include "opt_debug_npx.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/bus.h> 44 #include <sys/kernel.h> 45 #include <sys/malloc.h> 46 #include <sys/module.h> 47 #include <sys/sysctl.h> 48 #include <sys/proc.h> 49 #include <sys/rman.h> 50 #ifdef NPX_DEBUG 51 #include <sys/syslog.h> 52 #endif 53 #include <sys/signalvar.h> 54 #include <sys/thread2.h> 55 56 #ifndef SMP 57 #include <machine/asmacros.h> 58 #endif 59 #include <machine/cputypes.h> 60 #include <machine/frame.h> 61 #include <machine/md_var.h> 62 #include <machine/pcb.h> 63 #include <machine/psl.h> 64 #ifndef SMP 65 #include <machine/clock.h> 66 #endif 67 #include <machine/specialreg.h> 68 #include <machine/segments.h> 69 #include <machine/globaldata.h> 70 71 #define fldcw(addr) __asm("fldcw %0" : : "m" (*(addr))) 72 #define fnclex() __asm("fnclex") 73 #define fninit() __asm("fninit") 74 #define fnop() __asm("fnop") 75 #define fnsave(addr) __asm __volatile("fnsave %0" : "=m" (*(addr))) 76 #define fnstcw(addr) __asm __volatile("fnstcw %0" : "=m" (*(addr))) 77 #define fnstsw(addr) __asm __volatile("fnstsw %0" : "=m" (*(addr))) 78 #define frstor(addr) __asm("frstor %0" : : "m" (*(addr))) 79 #ifndef CPU_DISABLE_SSE 80 #define fxrstor(addr) __asm("fxrstor %0" : : "m" (*(addr))) 81 #define fxsave(addr) __asm __volatile("fxsave %0" : "=m" (*(addr))) 82 #endif 83 #define start_emulating() __asm("smsw %%ax; orb %0,%%al; lmsw %%ax" \ 84 : : "n" (CR0_TS) : "ax") 85 #define stop_emulating() __asm("clts") 86 87 #ifndef CPU_DISABLE_SSE 88 #define GET_FPU_EXSW_PTR(td) \ 89 (cpu_fxsr ? \ 90 &(td)->td_savefpu->sv_xmm.sv_ex_sw : \ 91 &(td)->td_savefpu->sv_87.sv_ex_sw) 92 #else /* CPU_DISABLE_SSE */ 93 #define GET_FPU_EXSW_PTR(td) \ 94 (&(td)->td_savefpu->sv_87.sv_ex_sw) 95 #endif /* CPU_DISABLE_SSE */ 96 97 typedef u_char bool_t; 98 #ifndef CPU_DISABLE_SSE 99 static void fpu_clean_state(void); 100 #endif 101 102 static struct krate badfprate = { 1 }; 103 104 static void fpusave (union savefpu *); 105 static void fpurstor (union savefpu *); 106 107 /* 108 * Initialize the floating point unit. 109 */ 110 void 111 npxinit(u_short control) 112 { 113 static union savefpu dummy __aligned(16); 114 115 /* 116 * fninit has the same h/w bugs as fnsave. Use the detoxified 117 * fnsave to throw away any junk in the fpu. npxsave() initializes 118 * the fpu and sets npxthread = NULL as important side effects. 119 */ 120 npxsave(&dummy); 121 crit_enter(); 122 stop_emulating(); 123 fldcw(&control); 124 fpusave(curthread->td_savefpu); 125 mdcpu->gd_npxthread = NULL; 126 start_emulating(); 127 crit_exit(); 128 } 129 130 /* 131 * Free coprocessor (if we have it). 132 */ 133 void 134 npxexit(void) 135 { 136 if (curthread == mdcpu->gd_npxthread) 137 npxsave(curthread->td_savefpu); 138 } 139 140 #if 0 141 /* 142 * The following mechanism is used to ensure that the FPE_... value 143 * that is passed as a trapcode to the signal handler of the user 144 * process does not have more than one bit set. 145 * 146 * Multiple bits may be set if the user process modifies the control 147 * word while a status word bit is already set. While this is a sign 148 * of bad coding, we have no choise than to narrow them down to one 149 * bit, since we must not send a trapcode that is not exactly one of 150 * the FPE_ macros. 151 * 152 * The mechanism has a static table with 127 entries. Each combination 153 * of the 7 FPU status word exception bits directly translates to a 154 * position in this table, where a single FPE_... value is stored. 155 * This FPE_... value stored there is considered the "most important" 156 * of the exception bits and will be sent as the signal code. The 157 * precedence of the bits is based upon Intel Document "Numerical 158 * Applications", Chapter "Special Computational Situations". 159 * 160 * The macro to choose one of these values does these steps: 1) Throw 161 * away status word bits that cannot be masked. 2) Throw away the bits 162 * currently masked in the control word, assuming the user isn't 163 * interested in them anymore. 3) Reinsert status word bit 7 (stack 164 * fault) if it is set, which cannot be masked but must be presered. 165 * 4) Use the remaining bits to point into the trapcode table. 166 * 167 * The 6 maskable bits in order of their preference, as stated in the 168 * above referenced Intel manual: 169 * 1 Invalid operation (FP_X_INV) 170 * 1a Stack underflow 171 * 1b Stack overflow 172 * 1c Operand of unsupported format 173 * 1d SNaN operand. 174 * 2 QNaN operand (not an exception, irrelavant here) 175 * 3 Any other invalid-operation not mentioned above or zero divide 176 * (FP_X_INV, FP_X_DZ) 177 * 4 Denormal operand (FP_X_DNML) 178 * 5 Numeric over/underflow (FP_X_OFL, FP_X_UFL) 179 * 6 Inexact result (FP_X_IMP) 180 */ 181 static char fpetable[128] = { 182 0, 183 FPE_FLTINV, /* 1 - INV */ 184 FPE_FLTUND, /* 2 - DNML */ 185 FPE_FLTINV, /* 3 - INV | DNML */ 186 FPE_FLTDIV, /* 4 - DZ */ 187 FPE_FLTINV, /* 5 - INV | DZ */ 188 FPE_FLTDIV, /* 6 - DNML | DZ */ 189 FPE_FLTINV, /* 7 - INV | DNML | DZ */ 190 FPE_FLTOVF, /* 8 - OFL */ 191 FPE_FLTINV, /* 9 - INV | OFL */ 192 FPE_FLTUND, /* A - DNML | OFL */ 193 FPE_FLTINV, /* B - INV | DNML | OFL */ 194 FPE_FLTDIV, /* C - DZ | OFL */ 195 FPE_FLTINV, /* D - INV | DZ | OFL */ 196 FPE_FLTDIV, /* E - DNML | DZ | OFL */ 197 FPE_FLTINV, /* F - INV | DNML | DZ | OFL */ 198 FPE_FLTUND, /* 10 - UFL */ 199 FPE_FLTINV, /* 11 - INV | UFL */ 200 FPE_FLTUND, /* 12 - DNML | UFL */ 201 FPE_FLTINV, /* 13 - INV | DNML | UFL */ 202 FPE_FLTDIV, /* 14 - DZ | UFL */ 203 FPE_FLTINV, /* 15 - INV | DZ | UFL */ 204 FPE_FLTDIV, /* 16 - DNML | DZ | UFL */ 205 FPE_FLTINV, /* 17 - INV | DNML | DZ | UFL */ 206 FPE_FLTOVF, /* 18 - OFL | UFL */ 207 FPE_FLTINV, /* 19 - INV | OFL | UFL */ 208 FPE_FLTUND, /* 1A - DNML | OFL | UFL */ 209 FPE_FLTINV, /* 1B - INV | DNML | OFL | UFL */ 210 FPE_FLTDIV, /* 1C - DZ | OFL | UFL */ 211 FPE_FLTINV, /* 1D - INV | DZ | OFL | UFL */ 212 FPE_FLTDIV, /* 1E - DNML | DZ | OFL | UFL */ 213 FPE_FLTINV, /* 1F - INV | DNML | DZ | OFL | UFL */ 214 FPE_FLTRES, /* 20 - IMP */ 215 FPE_FLTINV, /* 21 - INV | IMP */ 216 FPE_FLTUND, /* 22 - DNML | IMP */ 217 FPE_FLTINV, /* 23 - INV | DNML | IMP */ 218 FPE_FLTDIV, /* 24 - DZ | IMP */ 219 FPE_FLTINV, /* 25 - INV | DZ | IMP */ 220 FPE_FLTDIV, /* 26 - DNML | DZ | IMP */ 221 FPE_FLTINV, /* 27 - INV | DNML | DZ | IMP */ 222 FPE_FLTOVF, /* 28 - OFL | IMP */ 223 FPE_FLTINV, /* 29 - INV | OFL | IMP */ 224 FPE_FLTUND, /* 2A - DNML | OFL | IMP */ 225 FPE_FLTINV, /* 2B - INV | DNML | OFL | IMP */ 226 FPE_FLTDIV, /* 2C - DZ | OFL | IMP */ 227 FPE_FLTINV, /* 2D - INV | DZ | OFL | IMP */ 228 FPE_FLTDIV, /* 2E - DNML | DZ | OFL | IMP */ 229 FPE_FLTINV, /* 2F - INV | DNML | DZ | OFL | IMP */ 230 FPE_FLTUND, /* 30 - UFL | IMP */ 231 FPE_FLTINV, /* 31 - INV | UFL | IMP */ 232 FPE_FLTUND, /* 32 - DNML | UFL | IMP */ 233 FPE_FLTINV, /* 33 - INV | DNML | UFL | IMP */ 234 FPE_FLTDIV, /* 34 - DZ | UFL | IMP */ 235 FPE_FLTINV, /* 35 - INV | DZ | UFL | IMP */ 236 FPE_FLTDIV, /* 36 - DNML | DZ | UFL | IMP */ 237 FPE_FLTINV, /* 37 - INV | DNML | DZ | UFL | IMP */ 238 FPE_FLTOVF, /* 38 - OFL | UFL | IMP */ 239 FPE_FLTINV, /* 39 - INV | OFL | UFL | IMP */ 240 FPE_FLTUND, /* 3A - DNML | OFL | UFL | IMP */ 241 FPE_FLTINV, /* 3B - INV | DNML | OFL | UFL | IMP */ 242 FPE_FLTDIV, /* 3C - DZ | OFL | UFL | IMP */ 243 FPE_FLTINV, /* 3D - INV | DZ | OFL | UFL | IMP */ 244 FPE_FLTDIV, /* 3E - DNML | DZ | OFL | UFL | IMP */ 245 FPE_FLTINV, /* 3F - INV | DNML | DZ | OFL | UFL | IMP */ 246 FPE_FLTSUB, /* 40 - STK */ 247 FPE_FLTSUB, /* 41 - INV | STK */ 248 FPE_FLTUND, /* 42 - DNML | STK */ 249 FPE_FLTSUB, /* 43 - INV | DNML | STK */ 250 FPE_FLTDIV, /* 44 - DZ | STK */ 251 FPE_FLTSUB, /* 45 - INV | DZ | STK */ 252 FPE_FLTDIV, /* 46 - DNML | DZ | STK */ 253 FPE_FLTSUB, /* 47 - INV | DNML | DZ | STK */ 254 FPE_FLTOVF, /* 48 - OFL | STK */ 255 FPE_FLTSUB, /* 49 - INV | OFL | STK */ 256 FPE_FLTUND, /* 4A - DNML | OFL | STK */ 257 FPE_FLTSUB, /* 4B - INV | DNML | OFL | STK */ 258 FPE_FLTDIV, /* 4C - DZ | OFL | STK */ 259 FPE_FLTSUB, /* 4D - INV | DZ | OFL | STK */ 260 FPE_FLTDIV, /* 4E - DNML | DZ | OFL | STK */ 261 FPE_FLTSUB, /* 4F - INV | DNML | DZ | OFL | STK */ 262 FPE_FLTUND, /* 50 - UFL | STK */ 263 FPE_FLTSUB, /* 51 - INV | UFL | STK */ 264 FPE_FLTUND, /* 52 - DNML | UFL | STK */ 265 FPE_FLTSUB, /* 53 - INV | DNML | UFL | STK */ 266 FPE_FLTDIV, /* 54 - DZ | UFL | STK */ 267 FPE_FLTSUB, /* 55 - INV | DZ | UFL | STK */ 268 FPE_FLTDIV, /* 56 - DNML | DZ | UFL | STK */ 269 FPE_FLTSUB, /* 57 - INV | DNML | DZ | UFL | STK */ 270 FPE_FLTOVF, /* 58 - OFL | UFL | STK */ 271 FPE_FLTSUB, /* 59 - INV | OFL | UFL | STK */ 272 FPE_FLTUND, /* 5A - DNML | OFL | UFL | STK */ 273 FPE_FLTSUB, /* 5B - INV | DNML | OFL | UFL | STK */ 274 FPE_FLTDIV, /* 5C - DZ | OFL | UFL | STK */ 275 FPE_FLTSUB, /* 5D - INV | DZ | OFL | UFL | STK */ 276 FPE_FLTDIV, /* 5E - DNML | DZ | OFL | UFL | STK */ 277 FPE_FLTSUB, /* 5F - INV | DNML | DZ | OFL | UFL | STK */ 278 FPE_FLTRES, /* 60 - IMP | STK */ 279 FPE_FLTSUB, /* 61 - INV | IMP | STK */ 280 FPE_FLTUND, /* 62 - DNML | IMP | STK */ 281 FPE_FLTSUB, /* 63 - INV | DNML | IMP | STK */ 282 FPE_FLTDIV, /* 64 - DZ | IMP | STK */ 283 FPE_FLTSUB, /* 65 - INV | DZ | IMP | STK */ 284 FPE_FLTDIV, /* 66 - DNML | DZ | IMP | STK */ 285 FPE_FLTSUB, /* 67 - INV | DNML | DZ | IMP | STK */ 286 FPE_FLTOVF, /* 68 - OFL | IMP | STK */ 287 FPE_FLTSUB, /* 69 - INV | OFL | IMP | STK */ 288 FPE_FLTUND, /* 6A - DNML | OFL | IMP | STK */ 289 FPE_FLTSUB, /* 6B - INV | DNML | OFL | IMP | STK */ 290 FPE_FLTDIV, /* 6C - DZ | OFL | IMP | STK */ 291 FPE_FLTSUB, /* 6D - INV | DZ | OFL | IMP | STK */ 292 FPE_FLTDIV, /* 6E - DNML | DZ | OFL | IMP | STK */ 293 FPE_FLTSUB, /* 6F - INV | DNML | DZ | OFL | IMP | STK */ 294 FPE_FLTUND, /* 70 - UFL | IMP | STK */ 295 FPE_FLTSUB, /* 71 - INV | UFL | IMP | STK */ 296 FPE_FLTUND, /* 72 - DNML | UFL | IMP | STK */ 297 FPE_FLTSUB, /* 73 - INV | DNML | UFL | IMP | STK */ 298 FPE_FLTDIV, /* 74 - DZ | UFL | IMP | STK */ 299 FPE_FLTSUB, /* 75 - INV | DZ | UFL | IMP | STK */ 300 FPE_FLTDIV, /* 76 - DNML | DZ | UFL | IMP | STK */ 301 FPE_FLTSUB, /* 77 - INV | DNML | DZ | UFL | IMP | STK */ 302 FPE_FLTOVF, /* 78 - OFL | UFL | IMP | STK */ 303 FPE_FLTSUB, /* 79 - INV | OFL | UFL | IMP | STK */ 304 FPE_FLTUND, /* 7A - DNML | OFL | UFL | IMP | STK */ 305 FPE_FLTSUB, /* 7B - INV | DNML | OFL | UFL | IMP | STK */ 306 FPE_FLTDIV, /* 7C - DZ | OFL | UFL | IMP | STK */ 307 FPE_FLTSUB, /* 7D - INV | DZ | OFL | UFL | IMP | STK */ 308 FPE_FLTDIV, /* 7E - DNML | DZ | OFL | UFL | IMP | STK */ 309 FPE_FLTSUB, /* 7F - INV | DNML | DZ | OFL | UFL | IMP | STK */ 310 }; 311 312 #endif 313 314 #if 0 315 316 /* 317 * Preserve the FP status word, clear FP exceptions, then generate a SIGFPE. 318 * 319 * Clearing exceptions is necessary mainly to avoid IRQ13 bugs. We now 320 * depend on longjmp() restoring a usable state. Restoring the state 321 * or examining it might fail if we didn't clear exceptions. 322 * 323 * The error code chosen will be one of the FPE_... macros. It will be 324 * sent as the second argument to old BSD-style signal handlers and as 325 * "siginfo_t->si_code" (second argument) to SA_SIGINFO signal handlers. 326 * 327 * XXX the FP state is not preserved across signal handlers. So signal 328 * handlers cannot afford to do FP unless they preserve the state or 329 * longjmp() out. Both preserving the state and longjmp()ing may be 330 * destroyed by IRQ13 bugs. Clearing FP exceptions is not an acceptable 331 * solution for signals other than SIGFPE. 332 * 333 * The MP lock is not held on entry (see i386/i386/exception.s) and 334 * should not be held on exit. Interrupts are enabled. We must enter 335 * a critical section to stabilize the FP system and prevent an interrupt 336 * or preemption from changing the FP state out from under us. 337 */ 338 void 339 npx_intr(void *dummy) 340 { 341 int code; 342 u_short control; 343 struct intrframe *frame; 344 u_long *exstat; 345 346 crit_enter(); 347 348 /* 349 * This exception can only occur with CR0_TS clear, otherwise we 350 * would get a DNA exception. However, since interrupts were 351 * enabled a preemption could have sneaked in and used the FP system 352 * before we entered our critical section. If that occured, the 353 * TS bit will be set and npxthread will be NULL. 354 */ 355 panic("npx_intr: not coded"); 356 /* XXX FP STATE FLAG MUST BE PART OF CONTEXT SUPPLIED BY REAL KERNEL */ 357 #if 0 358 if (rcr0() & CR0_TS) { 359 KASSERT(mdcpu->gd_npxthread == NULL, ("gd_npxthread was %p with TS set!", mdcpu->gd_npxthread)); 360 npxdna(); 361 crit_exit(); 362 return; 363 } 364 #endif 365 if (mdcpu->gd_npxthread == NULL) { 366 get_mplock(); 367 kprintf("npxintr: npxthread = %p, curthread = %p\n", 368 mdcpu->gd_npxthread, curthread); 369 panic("npxintr from nowhere"); 370 } 371 if (mdcpu->gd_npxthread != curthread) { 372 get_mplock(); 373 kprintf("npxintr: npxthread = %p, curthread = %p\n", 374 mdcpu->gd_npxthread, curthread); 375 panic("npxintr from non-current process"); 376 } 377 378 exstat = GET_FPU_EXSW_PTR(curthread); 379 outb(0xf0, 0); 380 fnstsw(exstat); 381 fnstcw(&control); 382 fnclex(); 383 384 get_mplock(); 385 386 /* 387 * Pass exception to process. 388 */ 389 frame = (struct intrframe *)&dummy; /* XXX */ 390 if ((ISPL(frame->if_cs) == SEL_UPL) /*||(frame->if_eflags&PSL_VM)*/) { 391 /* 392 * Interrupt is essentially a trap, so we can afford to call 393 * the SIGFPE handler (if any) as soon as the interrupt 394 * returns. 395 * 396 * XXX little or nothing is gained from this, and plenty is 397 * lost - the interrupt frame has to contain the trap frame 398 * (this is otherwise only necessary for the rescheduling trap 399 * in doreti, and the frame for that could easily be set up 400 * just before it is used). 401 */ 402 curthread->td_lwp->lwp_md.md_regs = INTR_TO_TRAPFRAME(frame); 403 /* 404 * Encode the appropriate code for detailed information on 405 * this exception. 406 */ 407 code = 408 fpetable[(*exstat & ~control & 0x3f) | (*exstat & 0x40)]; 409 trapsignal(curthread->td_lwp, SIGFPE, code); 410 } else { 411 /* 412 * Nested interrupt. These losers occur when: 413 * o an IRQ13 is bogusly generated at a bogus time, e.g.: 414 * o immediately after an fnsave or frstor of an 415 * error state. 416 * o a couple of 386 instructions after 417 * "fstpl _memvar" causes a stack overflow. 418 * These are especially nasty when combined with a 419 * trace trap. 420 * o an IRQ13 occurs at the same time as another higher- 421 * priority interrupt. 422 * 423 * Treat them like a true async interrupt. 424 */ 425 lwpsignal(curproc, curthread->td_lwp, SIGFPE); 426 } 427 rel_mplock(); 428 crit_exit(); 429 } 430 431 #endif 432 433 /* 434 * Implement the device not available (DNA) exception. gd_npxthread had 435 * better be NULL. Restore the current thread's FP state and set gd_npxthread 436 * to curthread. 437 * 438 * Interrupts are enabled and preemption can occur. Enter a critical 439 * section to stabilize the FP state. 440 */ 441 int 442 npxdna(void) 443 { 444 thread_t td = curthread; 445 u_long *exstat; 446 int didinit = 0; 447 448 if (mdcpu->gd_npxthread != NULL) { 449 kprintf("npxdna: npxthread = %p, curthread = %p\n", 450 mdcpu->gd_npxthread, curthread); 451 panic("npxdna"); 452 } 453 454 /* 455 * Setup the initial saved state if the thread has never before 456 * used the FP unit. This also occurs when a thread pushes a 457 * signal handler and uses FP in the handler. 458 */ 459 if ((td->td_flags & (TDF_USINGFP | TDF_KERNELFP)) == 0) { 460 td->td_flags |= TDF_USINGFP; 461 npxinit(__INITIAL_NPXCW__); 462 didinit = 1; 463 } 464 465 /* 466 * The setting of gd_npxthread and the call to fpurstor() must not 467 * be preempted by an interrupt thread or we will take an npxdna 468 * trap and potentially save our current fpstate (which is garbage) 469 * and then restore the garbage rather then the originally saved 470 * fpstate. 471 */ 472 crit_enter(); 473 stop_emulating(); 474 /* 475 * Record new context early in case frstor causes an IRQ13. 476 */ 477 mdcpu->gd_npxthread = td; 478 exstat = GET_FPU_EXSW_PTR(td); 479 *exstat = 0; 480 /* 481 * The following frstor may cause an IRQ13 when the state being 482 * restored has a pending error. The error will appear to have been 483 * triggered by the current (npx) user instruction even when that 484 * instruction is a no-wait instruction that should not trigger an 485 * error (e.g., fnclex). On at least one 486 system all of the 486 * no-wait instructions are broken the same as frstor, so our 487 * treatment does not amplify the breakage. On at least one 488 * 386/Cyrix 387 system, fnclex works correctly while frstor and 489 * fnsave are broken, so our treatment breaks fnclex if it is the 490 * first FPU instruction after a context switch. 491 */ 492 if ((td->td_savefpu->sv_xmm.sv_env.en_mxcsr & ~0xFFBF) 493 #ifndef CPU_DISABLE_SSE 494 && cpu_fxsr 495 #endif 496 ) { 497 krateprintf(&badfprate, 498 "FXRSTR: illegal FP MXCSR %08x didinit = %d\n", 499 td->td_savefpu->sv_xmm.sv_env.en_mxcsr, didinit); 500 td->td_savefpu->sv_xmm.sv_env.en_mxcsr &= 0xFFBF; 501 lwpsignal(curproc, curthread->td_lwp, SIGFPE); 502 } 503 fpurstor(td->td_savefpu); 504 crit_exit(); 505 506 return (1); 507 } 508 509 /* 510 * Wrapper for the fnsave instruction to handle h/w bugs. If there is an error 511 * pending, then fnsave generates a bogus IRQ13 on some systems. Force 512 * any IRQ13 to be handled immediately, and then ignore it. This routine is 513 * often called at splhigh so it must not use many system services. In 514 * particular, it's much easier to install a special handler than to 515 * guarantee that it's safe to use npxintr() and its supporting code. 516 * 517 * WARNING! This call is made during a switch and the MP lock will be 518 * setup for the new target thread rather then the current thread, so we 519 * cannot do anything here that depends on the *_mplock() functions as 520 * we may trip over their assertions. 521 * 522 * WARNING! When using fxsave we MUST fninit after saving the FP state. The 523 * kernel will always assume that the FP state is 'safe' (will not cause 524 * exceptions) for mmx/xmm use if npxthread is NULL. The kernel must still 525 * setup a custom save area before actually using the FP unit, but it will 526 * not bother calling fninit. This greatly improves kernel performance when 527 * it wishes to use the FP unit. 528 */ 529 void 530 npxsave(union savefpu *addr) 531 { 532 crit_enter(); 533 stop_emulating(); 534 fpusave(addr); 535 mdcpu->gd_npxthread = NULL; 536 fninit(); 537 start_emulating(); 538 crit_exit(); 539 } 540 541 static void 542 fpusave(union savefpu *addr) 543 { 544 #ifndef CPU_DISABLE_SSE 545 if (cpu_fxsr) 546 fxsave(addr); 547 else 548 #endif 549 fnsave(addr); 550 } 551 552 /* 553 * Save the FP state to the mcontext structure. 554 * 555 * WARNING: If you want to try to npxsave() directly to mctx->mc_fpregs, 556 * then it MUST be 16-byte aligned. Currently this is not guarenteed. 557 */ 558 void 559 npxpush(mcontext_t *mctx) 560 { 561 thread_t td = curthread; 562 563 KKASSERT((td->td_flags & TDF_KERNELFP) == 0); 564 565 if (td->td_flags & TDF_USINGFP) { 566 if (mdcpu->gd_npxthread == td) { 567 /* 568 * XXX Note: This is a bit inefficient if the signal 569 * handler uses floating point, extra faults will 570 * occur. 571 */ 572 mctx->mc_ownedfp = _MC_FPOWNED_FPU; 573 npxsave(td->td_savefpu); 574 } else { 575 mctx->mc_ownedfp = _MC_FPOWNED_PCB; 576 } 577 bcopy(td->td_savefpu, mctx->mc_fpregs, sizeof(mctx->mc_fpregs)); 578 td->td_flags &= ~TDF_USINGFP; 579 mctx->mc_fpformat = 580 #ifndef CPU_DISABLE_SSE 581 (cpu_fxsr) ? _MC_FPFMT_XMM : 582 #endif 583 _MC_FPFMT_387; 584 } else { 585 mctx->mc_ownedfp = _MC_FPOWNED_NONE; 586 mctx->mc_fpformat = _MC_FPFMT_NODEV; 587 } 588 } 589 590 /* 591 * Restore the FP state from the mcontext structure. 592 */ 593 void 594 npxpop(mcontext_t *mctx) 595 { 596 thread_t td = curthread; 597 598 switch(mctx->mc_ownedfp) { 599 case _MC_FPOWNED_NONE: 600 /* 601 * If the signal handler used the FP unit but the interrupted 602 * code did not, release the FP unit. Clear TDF_USINGFP will 603 * force the FP unit to reinit so the interrupted code sees 604 * a clean slate. 605 */ 606 if (td->td_flags & TDF_USINGFP) { 607 if (td == mdcpu->gd_npxthread) 608 npxsave(td->td_savefpu); 609 td->td_flags &= ~TDF_USINGFP; 610 } 611 break; 612 case _MC_FPOWNED_FPU: 613 case _MC_FPOWNED_PCB: 614 /* 615 * Clear ownership of the FP unit and restore our saved state. 616 * 617 * NOTE: The signal handler may have set-up some FP state and 618 * enabled the FP unit, so we have to restore no matter what. 619 * 620 * XXX: This is bit inefficient, if the code being returned 621 * to is actively using the FP this results in multiple 622 * kernel faults. 623 * 624 * WARNING: The saved state was exposed to userland and may 625 * have to be sanitized to avoid a GP fault in the kernel. 626 */ 627 if (td == mdcpu->gd_npxthread) 628 npxsave(td->td_savefpu); 629 bcopy(mctx->mc_fpregs, td->td_savefpu, sizeof(*td->td_savefpu)); 630 if ((td->td_savefpu->sv_xmm.sv_env.en_mxcsr & ~0xFFBF) 631 #ifndef CPU_DISABLE_SSE 632 && cpu_fxsr 633 #endif 634 ) { 635 krateprintf(&badfprate, 636 "pid %d (%s) signal return from user: " 637 "illegal FP MXCSR %08x\n", 638 td->td_proc->p_pid, 639 td->td_proc->p_comm, 640 td->td_savefpu->sv_xmm.sv_env.en_mxcsr); 641 } 642 td->td_flags |= TDF_USINGFP; 643 break; 644 } 645 } 646 647 648 #ifndef CPU_DISABLE_SSE 649 /* 650 * On AuthenticAMD processors, the fxrstor instruction does not restore 651 * the x87's stored last instruction pointer, last data pointer, and last 652 * opcode values, except in the rare case in which the exception summary 653 * (ES) bit in the x87 status word is set to 1. 654 * 655 * In order to avoid leaking this information across processes, we clean 656 * these values by performing a dummy load before executing fxrstor(). 657 */ 658 static double dummy_variable = 0.0; 659 static void 660 fpu_clean_state(void) 661 { 662 u_short status; 663 664 /* 665 * Clear the ES bit in the x87 status word if it is currently 666 * set, in order to avoid causing a fault in the upcoming load. 667 */ 668 fnstsw(&status); 669 if (status & 0x80) 670 fnclex(); 671 672 /* 673 * Load the dummy variable into the x87 stack. This mangles 674 * the x87 stack, but we don't care since we're about to call 675 * fxrstor() anyway. 676 */ 677 __asm __volatile("ffree %%st(7); fld %0" : : "m" (dummy_variable)); 678 } 679 #endif /* CPU_DISABLE_SSE */ 680 681 static void 682 fpurstor(union savefpu *addr) 683 { 684 #ifndef CPU_DISABLE_SSE 685 if (cpu_fxsr) { 686 fpu_clean_state(); 687 fxrstor(addr); 688 } else { 689 frstor(addr); 690 } 691 #else 692 frstor(addr); 693 #endif 694 } 695 696