1 /*- 2 * Copyright (c) 1992 Terrence R. Lambert. 3 * Copyright (C) 1994, David Greenman 4 * Copyright (c) 1982, 1987, 1990, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * William Jolitz. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 39 * $FreeBSD: src/sys/i386/i386/machdep.c,v 1.385.2.30 2003/05/31 08:48:05 alc Exp $ 40 */ 41 42 #include "opt_ddb.h" 43 #include "opt_directio.h" 44 #include "opt_inet.h" 45 #include "opt_msgbuf.h" 46 #include "opt_swap.h" 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/sysproto.h> 51 #include <sys/signalvar.h> 52 #include <sys/kernel.h> 53 #include <sys/linker.h> 54 #include <sys/malloc.h> 55 #include <sys/proc.h> 56 #include <sys/buf.h> 57 #include <sys/reboot.h> 58 #include <sys/mbuf.h> 59 #include <sys/msgbuf.h> 60 #include <sys/sysent.h> 61 #include <sys/sysctl.h> 62 #include <sys/vmmeter.h> 63 #include <sys/bus.h> 64 #include <sys/usched.h> 65 #include <sys/reg.h> 66 67 #include <vm/vm.h> 68 #include <vm/vm_param.h> 69 #include <sys/lock.h> 70 #include <vm/vm_kern.h> 71 #include <vm/vm_object.h> 72 #include <vm/vm_page.h> 73 #include <vm/vm_map.h> 74 #include <vm/vm_pager.h> 75 #include <vm/vm_extern.h> 76 77 #include <sys/thread2.h> 78 #include <sys/mplock2.h> 79 80 #include <sys/user.h> 81 #include <sys/exec.h> 82 #include <sys/cons.h> 83 84 #include <ddb/ddb.h> 85 86 #include <machine/cpu.h> 87 #include <machine/clock.h> 88 #include <machine/specialreg.h> 89 #include <machine/md_var.h> 90 #include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */ 91 #include <machine/globaldata.h> /* CPU_prvspace */ 92 #include <machine/smp.h> 93 #include <machine/cputypes.h> 94 95 #include <bus/isa/rtc.h> 96 #include <sys/random.h> 97 #include <sys/ptrace.h> 98 #include <machine/sigframe.h> 99 #include <unistd.h> /* umtx_* functions */ 100 #include <pthread.h> /* pthread_yield() */ 101 102 extern void dblfault_handler (void); 103 104 static void set_fpregs_xmm (struct save87 *, struct savexmm *); 105 static void fill_fpregs_xmm (struct savexmm *, struct save87 *); 106 #ifdef DIRECTIO 107 extern void ffs_rawread_setup(void); 108 #endif /* DIRECTIO */ 109 110 int64_t tsc_offsets[MAXCPU]; 111 112 #if defined(SWTCH_OPTIM_STATS) 113 extern int swtch_optim_stats; 114 SYSCTL_INT(_debug, OID_AUTO, swtch_optim_stats, 115 CTLFLAG_RD, &swtch_optim_stats, 0, ""); 116 SYSCTL_INT(_debug, OID_AUTO, tlb_flush_count, 117 CTLFLAG_RD, &tlb_flush_count, 0, ""); 118 #endif 119 120 static int 121 sysctl_hw_physmem(SYSCTL_HANDLER_ARGS) 122 { 123 u_long pmem = ctob(physmem); 124 125 int error = sysctl_handle_long(oidp, &pmem, 0, req); 126 return (error); 127 } 128 129 SYSCTL_PROC(_hw, HW_PHYSMEM, physmem, CTLTYPE_ULONG|CTLFLAG_RD, 130 0, 0, sysctl_hw_physmem, "LU", "Total system memory in bytes (number of pages * page size)"); 131 132 static int 133 sysctl_hw_usermem(SYSCTL_HANDLER_ARGS) 134 { 135 /* JG */ 136 int error = sysctl_handle_int(oidp, 0, 137 ctob((int)Maxmem - vmstats.v_wire_count), req); 138 return (error); 139 } 140 141 SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT|CTLFLAG_RD, 142 0, 0, sysctl_hw_usermem, "IU", ""); 143 144 SYSCTL_ULONG(_hw, OID_AUTO, availpages, CTLFLAG_RD, &Maxmem, 0, ""); 145 146 /* 147 * Send an interrupt to process. 148 * 149 * Stack is set up to allow sigcode stored 150 * at top to call routine, followed by kcall 151 * to sigreturn routine below. After sigreturn 152 * resets the signal mask, the stack, and the 153 * frame pointer, it returns to the user 154 * specified pc, psl. 155 */ 156 void 157 sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code) 158 { 159 struct lwp *lp = curthread->td_lwp; 160 struct proc *p = lp->lwp_proc; 161 struct trapframe *regs; 162 struct sigacts *psp = p->p_sigacts; 163 struct sigframe sf, *sfp; 164 int oonstack; 165 char *sp; 166 167 regs = lp->lwp_md.md_regs; 168 oonstack = (lp->lwp_sigstk.ss_flags & SS_ONSTACK) ? 1 : 0; 169 170 /* Save user context */ 171 bzero(&sf, sizeof(struct sigframe)); 172 sf.sf_uc.uc_sigmask = *mask; 173 sf.sf_uc.uc_stack = lp->lwp_sigstk; 174 sf.sf_uc.uc_mcontext.mc_onstack = oonstack; 175 KKASSERT(__offsetof(struct trapframe, tf_rdi) == 0); 176 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_rdi, sizeof(struct trapframe)); 177 178 /* Make the size of the saved context visible to userland */ 179 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); 180 181 /* Allocate and validate space for the signal handler context. */ 182 if ((lp->lwp_flags & LWP_ALTSTACK) != 0 && !oonstack && 183 SIGISMEMBER(psp->ps_sigonstack, sig)) { 184 sp = (char *)(lp->lwp_sigstk.ss_sp + lp->lwp_sigstk.ss_size - 185 sizeof(struct sigframe)); 186 lp->lwp_sigstk.ss_flags |= SS_ONSTACK; 187 } else { 188 /* We take red zone into account */ 189 sp = (char *)regs->tf_rsp - sizeof(struct sigframe) - 128; 190 } 191 192 /* Align to 16 bytes */ 193 sfp = (struct sigframe *)((intptr_t)sp & ~0xFUL); 194 195 /* Translate the signal is appropriate */ 196 if (p->p_sysent->sv_sigtbl) { 197 if (sig <= p->p_sysent->sv_sigsize) 198 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 199 } 200 201 /* 202 * Build the argument list for the signal handler. 203 * 204 * Arguments are in registers (%rdi, %rsi, %rdx, %rcx) 205 */ 206 regs->tf_rdi = sig; /* argument 1 */ 207 regs->tf_rdx = (register_t)&sfp->sf_uc; /* argument 3 */ 208 209 if (SIGISMEMBER(psp->ps_siginfo, sig)) { 210 /* 211 * Signal handler installed with SA_SIGINFO. 212 * 213 * action(signo, siginfo, ucontext) 214 */ 215 regs->tf_rsi = (register_t)&sfp->sf_si; /* argument 2 */ 216 regs->tf_rcx = (register_t)regs->tf_err; /* argument 4 */ 217 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 218 219 /* fill siginfo structure */ 220 sf.sf_si.si_signo = sig; 221 sf.sf_si.si_code = code; 222 sf.sf_si.si_addr = (void *)regs->tf_addr; 223 } else { 224 /* 225 * Old FreeBSD-style arguments. 226 * 227 * handler (signo, code, [uc], addr) 228 */ 229 regs->tf_rsi = (register_t)code; /* argument 2 */ 230 regs->tf_rcx = (register_t)regs->tf_addr; /* argument 4 */ 231 sf.sf_ahu.sf_handler = catcher; 232 } 233 234 #if 0 235 /* 236 * If we're a vm86 process, we want to save the segment registers. 237 * We also change eflags to be our emulated eflags, not the actual 238 * eflags. 239 */ 240 if (regs->tf_eflags & PSL_VM) { 241 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 242 struct vm86_kernel *vm86 = &lp->lwp_thread->td_pcb->pcb_ext->ext_vm86; 243 244 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs; 245 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs; 246 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es; 247 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds; 248 249 if (vm86->vm86_has_vme == 0) 250 sf.sf_uc.uc_mcontext.mc_eflags = 251 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 252 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 253 254 /* 255 * Clear PSL_NT to inhibit T_TSSFLT faults on return from 256 * syscalls made by the signal handler. This just avoids 257 * wasting time for our lazy fixup of such faults. PSL_NT 258 * does nothing in vm86 mode, but vm86 programs can set it 259 * almost legitimately in probes for old cpu types. 260 */ 261 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP); 262 } 263 #endif 264 265 /* 266 * Save the FPU state and reinit the FP unit 267 */ 268 npxpush(&sf.sf_uc.uc_mcontext); 269 270 /* 271 * Copy the sigframe out to the user's stack. 272 */ 273 if (copyout(&sf, sfp, sizeof(struct sigframe)) != 0) { 274 /* 275 * Something is wrong with the stack pointer. 276 * ...Kill the process. 277 */ 278 sigexit(lp, SIGILL); 279 } 280 281 regs->tf_rsp = (register_t)sfp; 282 regs->tf_rip = PS_STRINGS - *(p->p_sysent->sv_szsigcode); 283 284 /* 285 * i386 abi specifies that the direction flag must be cleared 286 * on function entry 287 */ 288 regs->tf_rflags &= ~(PSL_T|PSL_D); 289 290 /* 291 * 64 bit mode has a code and stack selector but 292 * no data or extra selector. %fs and %gs are not 293 * stored in-context. 294 */ 295 regs->tf_cs = _ucodesel; 296 regs->tf_ss = _udatasel; 297 } 298 299 /* 300 * Sanitize the trapframe for a virtual kernel passing control to a custom 301 * VM context. Remove any items that would otherwise create a privilage 302 * issue. 303 * 304 * XXX at the moment we allow userland to set the resume flag. Is this a 305 * bad idea? 306 */ 307 int 308 cpu_sanitize_frame(struct trapframe *frame) 309 { 310 frame->tf_cs = _ucodesel; 311 frame->tf_ss = _udatasel; 312 /* XXX VM (8086) mode not supported? */ 313 frame->tf_rflags &= (PSL_RF | PSL_USERCHANGE | PSL_VM_UNSUPP); 314 frame->tf_rflags |= PSL_RESERVED_DEFAULT | PSL_I; 315 316 return(0); 317 } 318 319 /* 320 * Sanitize the tls so loading the descriptor does not blow up 321 * on us. For x86_64 we don't have to do anything. 322 */ 323 int 324 cpu_sanitize_tls(struct savetls *tls) 325 { 326 return(0); 327 } 328 329 /* 330 * sigreturn(ucontext_t *sigcntxp) 331 * 332 * System call to cleanup state after a signal 333 * has been taken. Reset signal mask and 334 * stack state from context left by sendsig (above). 335 * Return to previous pc and psl as specified by 336 * context left by sendsig. Check carefully to 337 * make sure that the user has not modified the 338 * state to gain improper privileges. 339 */ 340 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) 341 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) 342 343 int 344 sys_sigreturn(struct sigreturn_args *uap) 345 { 346 struct lwp *lp = curthread->td_lwp; 347 struct trapframe *regs; 348 ucontext_t uc; 349 ucontext_t *ucp; 350 register_t rflags; 351 int cs; 352 int error; 353 354 /* 355 * We have to copy the information into kernel space so userland 356 * can't modify it while we are sniffing it. 357 */ 358 regs = lp->lwp_md.md_regs; 359 error = copyin(uap->sigcntxp, &uc, sizeof(uc)); 360 if (error) 361 return (error); 362 ucp = &uc; 363 rflags = ucp->uc_mcontext.mc_rflags; 364 365 /* VM (8086) mode not supported */ 366 rflags &= ~PSL_VM_UNSUPP; 367 368 #if 0 369 if (eflags & PSL_VM) { 370 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 371 struct vm86_kernel *vm86; 372 373 /* 374 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 375 * set up the vm86 area, and we can't enter vm86 mode. 376 */ 377 if (lp->lwp_thread->td_pcb->pcb_ext == 0) 378 return (EINVAL); 379 vm86 = &lp->lwp_thread->td_pcb->pcb_ext->ext_vm86; 380 if (vm86->vm86_inited == 0) 381 return (EINVAL); 382 383 /* go back to user mode if both flags are set */ 384 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 385 trapsignal(lp->lwp_proc, SIGBUS, 0); 386 387 if (vm86->vm86_has_vme) { 388 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 389 (eflags & VME_USERCHANGE) | PSL_VM; 390 } else { 391 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 392 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM; 393 } 394 bcopy(&ucp.uc_mcontext.mc_gs, tf, sizeof(struct trapframe)); 395 tf->tf_eflags = eflags; 396 tf->tf_vm86_ds = tf->tf_ds; 397 tf->tf_vm86_es = tf->tf_es; 398 tf->tf_vm86_fs = tf->tf_fs; 399 tf->tf_vm86_gs = tf->tf_gs; 400 tf->tf_ds = _udatasel; 401 tf->tf_es = _udatasel; 402 #if 0 403 tf->tf_fs = _udatasel; 404 tf->tf_gs = _udatasel; 405 #endif 406 } else 407 #endif 408 { 409 /* 410 * Don't allow users to change privileged or reserved flags. 411 */ 412 /* 413 * XXX do allow users to change the privileged flag PSL_RF. 414 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 415 * should sometimes set it there too. tf_eflags is kept in 416 * the signal context during signal handling and there is no 417 * other place to remember it, so the PSL_RF bit may be 418 * corrupted by the signal handler without us knowing. 419 * Corruption of the PSL_RF bit at worst causes one more or 420 * one less debugger trap, so allowing it is fairly harmless. 421 */ 422 if (!EFL_SECURE(rflags & ~PSL_RF, regs->tf_rflags & ~PSL_RF)) { 423 kprintf("sigreturn: rflags = 0x%lx\n", (long)rflags); 424 return(EINVAL); 425 } 426 427 /* 428 * Don't allow users to load a valid privileged %cs. Let the 429 * hardware check for invalid selectors, excess privilege in 430 * other selectors, invalid %eip's and invalid %esp's. 431 */ 432 cs = ucp->uc_mcontext.mc_cs; 433 if (!CS_SECURE(cs)) { 434 kprintf("sigreturn: cs = 0x%x\n", cs); 435 trapsignal(lp, SIGBUS, T_PROTFLT); 436 return(EINVAL); 437 } 438 bcopy(&ucp->uc_mcontext.mc_rdi, regs, sizeof(struct trapframe)); 439 } 440 441 /* 442 * Restore the FPU state from the frame 443 */ 444 npxpop(&ucp->uc_mcontext); 445 446 if (ucp->uc_mcontext.mc_onstack & 1) 447 lp->lwp_sigstk.ss_flags |= SS_ONSTACK; 448 else 449 lp->lwp_sigstk.ss_flags &= ~SS_ONSTACK; 450 451 lp->lwp_sigmask = ucp->uc_sigmask; 452 SIG_CANTMASK(lp->lwp_sigmask); 453 return(EJUSTRETURN); 454 } 455 456 /* 457 * cpu_idle() represents the idle LWKT. You cannot return from this function 458 * (unless you want to blow things up!). Instead we look for runnable threads 459 * and loop or halt as appropriate. Giant is not held on entry to the thread. 460 * 461 * The main loop is entered with a critical section held, we must release 462 * the critical section before doing anything else. lwkt_switch() will 463 * check for pending interrupts due to entering and exiting its own 464 * critical section. 465 * 466 * Note on cpu_idle_hlt: On an SMP system we rely on a scheduler IPI 467 * to wake a HLTed cpu up. 468 */ 469 static int cpu_idle_hlt = 1; 470 static int cpu_idle_hltcnt; 471 static int cpu_idle_spincnt; 472 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW, 473 &cpu_idle_hlt, 0, "Idle loop HLT enable"); 474 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hltcnt, CTLFLAG_RW, 475 &cpu_idle_hltcnt, 0, "Idle loop entry halts"); 476 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_spincnt, CTLFLAG_RW, 477 &cpu_idle_spincnt, 0, "Idle loop entry spins"); 478 479 void 480 cpu_idle(void) 481 { 482 struct thread *td = curthread; 483 struct mdglobaldata *gd = mdcpu; 484 int reqflags; 485 486 crit_exit(); 487 KKASSERT(td->td_critcount == 0); 488 cpu_enable_intr(); 489 490 for (;;) { 491 /* 492 * See if there are any LWKTs ready to go. 493 */ 494 lwkt_switch(); 495 496 /* 497 * The idle loop halts only if no threads are scheduleable 498 * and no signals have occured. 499 */ 500 if (cpu_idle_hlt && 501 (td->td_gd->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) { 502 splz(); 503 if ((td->td_gd->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) { 504 #ifdef DEBUGIDLE 505 struct timeval tv1, tv2; 506 gettimeofday(&tv1, NULL); 507 #endif 508 reqflags = gd->mi.gd_reqflags & 509 ~RQF_IDLECHECK_WK_MASK; 510 KKASSERT(gd->mi.gd_processing_ipiq == 0); 511 umtx_sleep(&gd->mi.gd_reqflags, reqflags, 512 1000000); 513 #ifdef DEBUGIDLE 514 gettimeofday(&tv2, NULL); 515 if (tv2.tv_usec - tv1.tv_usec + 516 (tv2.tv_sec - tv1.tv_sec) * 1000000 517 > 500000) { 518 kprintf("cpu %d idlelock %08x %08x\n", 519 gd->mi.gd_cpuid, 520 gd->mi.gd_reqflags, 521 gd->gd_fpending); 522 } 523 #endif 524 } 525 ++cpu_idle_hltcnt; 526 } else { 527 splz(); 528 __asm __volatile("pause"); 529 ++cpu_idle_spincnt; 530 } 531 } 532 } 533 534 /* 535 * Called by the spinlock code with or without a critical section held 536 * when a spinlock is found to be seriously constested. 537 * 538 * We need to enter a critical section to prevent signals from recursing 539 * into pthreads. 540 */ 541 void 542 cpu_spinlock_contested(void) 543 { 544 cpu_pause(); 545 } 546 547 /* 548 * Clear registers on exec 549 */ 550 void 551 exec_setregs(u_long entry, u_long stack, u_long ps_strings) 552 { 553 struct thread *td = curthread; 554 struct lwp *lp = td->td_lwp; 555 struct pcb *pcb = td->td_pcb; 556 struct trapframe *regs = lp->lwp_md.md_regs; 557 558 /* was i386_user_cleanup() in NetBSD */ 559 user_ldt_free(pcb); 560 561 bzero((char *)regs, sizeof(struct trapframe)); 562 regs->tf_rip = entry; 563 regs->tf_rsp = ((stack - 8) & ~0xFul) + 8; /* align the stack */ 564 regs->tf_rdi = stack; /* argv */ 565 regs->tf_rflags = PSL_USER | (regs->tf_rflags & PSL_T); 566 regs->tf_ss = _udatasel; 567 regs->tf_cs = _ucodesel; 568 regs->tf_rbx = ps_strings; 569 570 /* 571 * Reset the hardware debug registers if they were in use. 572 * They won't have any meaning for the newly exec'd process. 573 */ 574 if (pcb->pcb_flags & PCB_DBREGS) { 575 pcb->pcb_dr0 = 0; 576 pcb->pcb_dr1 = 0; 577 pcb->pcb_dr2 = 0; 578 pcb->pcb_dr3 = 0; 579 pcb->pcb_dr6 = 0; 580 pcb->pcb_dr7 = 0; /* JG set bit 10? */ 581 if (pcb == td->td_pcb) { 582 /* 583 * Clear the debug registers on the running 584 * CPU, otherwise they will end up affecting 585 * the next process we switch to. 586 */ 587 reset_dbregs(); 588 } 589 pcb->pcb_flags &= ~PCB_DBREGS; 590 } 591 592 /* 593 * Initialize the math emulator (if any) for the current process. 594 * Actually, just clear the bit that says that the emulator has 595 * been initialized. Initialization is delayed until the process 596 * traps to the emulator (if it is done at all) mainly because 597 * emulators don't provide an entry point for initialization. 598 */ 599 pcb->pcb_flags &= ~FP_SOFTFP; 600 601 /* 602 * NOTE: do not set CR0_TS here. npxinit() must do it after clearing 603 * gd_npxthread. Otherwise a preemptive interrupt thread 604 * may panic in npxdna(). 605 */ 606 crit_enter(); 607 #if 0 608 load_cr0(rcr0() | CR0_MP); 609 #endif 610 611 /* 612 * NOTE: The MSR values must be correct so we can return to 613 * userland. gd_user_fs/gs must be correct so the switch 614 * code knows what the current MSR values are. 615 */ 616 pcb->pcb_fsbase = 0; /* Values loaded from PCB on switch */ 617 pcb->pcb_gsbase = 0; 618 /* Initialize the npx (if any) for the current process. */ 619 npxinit(); 620 crit_exit(); 621 622 /* 623 * note: linux emulator needs edx to be 0x0 on entry, which is 624 * handled in execve simply by setting the 64 bit syscall 625 * return value to 0. 626 */ 627 } 628 629 void 630 cpu_setregs(void) 631 { 632 #if 0 633 unsigned int cr0; 634 635 cr0 = rcr0(); 636 cr0 |= CR0_NE; /* Done by npxinit() */ 637 cr0 |= CR0_MP | CR0_TS; /* Done at every execve() too. */ 638 cr0 |= CR0_WP | CR0_AM; 639 load_cr0(cr0); 640 load_gs(_udatasel); 641 #endif 642 } 643 644 static int 645 sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS) 646 { 647 int error; 648 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 649 req); 650 if (!error && req->newptr) 651 resettodr(); 652 return (error); 653 } 654 655 SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW, 656 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", ""); 657 658 extern u_long bootdev; /* not a cdev_t - encoding is different */ 659 SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev, 660 CTLFLAG_RD, &bootdev, 0, "Boot device (not in cdev_t format)"); 661 662 /* 663 * Initialize 386 and configure to run kernel 664 */ 665 666 /* 667 * Initialize segments & interrupt table 668 */ 669 670 extern struct user *proc0paddr; 671 672 #if 0 673 674 extern inthand_t 675 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 676 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 677 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 678 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 679 IDTVEC(xmm), IDTVEC(dblfault), 680 IDTVEC(fast_syscall), IDTVEC(fast_syscall32); 681 #endif 682 683 int 684 ptrace_set_pc(struct lwp *lp, unsigned long addr) 685 { 686 lp->lwp_md.md_regs->tf_rip = addr; 687 return (0); 688 } 689 690 int 691 ptrace_single_step(struct lwp *lp) 692 { 693 lp->lwp_md.md_regs->tf_rflags |= PSL_T; 694 return (0); 695 } 696 697 int 698 fill_regs(struct lwp *lp, struct reg *regs) 699 { 700 struct trapframe *tp; 701 702 if ((tp = lp->lwp_md.md_regs) == NULL) 703 return EINVAL; 704 bcopy(&tp->tf_rdi, ®s->r_rdi, sizeof(*regs)); 705 return (0); 706 } 707 708 int 709 set_regs(struct lwp *lp, struct reg *regs) 710 { 711 struct trapframe *tp; 712 713 tp = lp->lwp_md.md_regs; 714 if (!EFL_SECURE(regs->r_rflags, tp->tf_rflags) || 715 !CS_SECURE(regs->r_cs)) 716 return (EINVAL); 717 bcopy(®s->r_rdi, &tp->tf_rdi, sizeof(*regs)); 718 return (0); 719 } 720 721 static void 722 fill_fpregs_xmm(struct savexmm *sv_xmm, struct save87 *sv_87) 723 { 724 struct env87 *penv_87 = &sv_87->sv_env; 725 struct envxmm *penv_xmm = &sv_xmm->sv_env; 726 int i; 727 728 /* FPU control/status */ 729 penv_87->en_cw = penv_xmm->en_cw; 730 penv_87->en_sw = penv_xmm->en_sw; 731 penv_87->en_tw = penv_xmm->en_tw; 732 penv_87->en_fip = penv_xmm->en_fip; 733 penv_87->en_fcs = penv_xmm->en_fcs; 734 penv_87->en_opcode = penv_xmm->en_opcode; 735 penv_87->en_foo = penv_xmm->en_foo; 736 penv_87->en_fos = penv_xmm->en_fos; 737 738 /* FPU registers */ 739 for (i = 0; i < 8; ++i) 740 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc; 741 } 742 743 static void 744 set_fpregs_xmm(struct save87 *sv_87, struct savexmm *sv_xmm) 745 { 746 struct env87 *penv_87 = &sv_87->sv_env; 747 struct envxmm *penv_xmm = &sv_xmm->sv_env; 748 int i; 749 750 /* FPU control/status */ 751 penv_xmm->en_cw = penv_87->en_cw; 752 penv_xmm->en_sw = penv_87->en_sw; 753 penv_xmm->en_tw = penv_87->en_tw; 754 penv_xmm->en_fip = penv_87->en_fip; 755 penv_xmm->en_fcs = penv_87->en_fcs; 756 penv_xmm->en_opcode = penv_87->en_opcode; 757 penv_xmm->en_foo = penv_87->en_foo; 758 penv_xmm->en_fos = penv_87->en_fos; 759 760 /* FPU registers */ 761 for (i = 0; i < 8; ++i) 762 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i]; 763 } 764 765 int 766 fill_fpregs(struct lwp *lp, struct fpreg *fpregs) 767 { 768 if (lp->lwp_thread == NULL || lp->lwp_thread->td_pcb == NULL) 769 return EINVAL; 770 if (cpu_fxsr) { 771 fill_fpregs_xmm(&lp->lwp_thread->td_pcb->pcb_save.sv_xmm, 772 (struct save87 *)fpregs); 773 return (0); 774 } 775 bcopy(&lp->lwp_thread->td_pcb->pcb_save.sv_87, fpregs, sizeof *fpregs); 776 return (0); 777 } 778 779 int 780 set_fpregs(struct lwp *lp, struct fpreg *fpregs) 781 { 782 if (cpu_fxsr) { 783 set_fpregs_xmm((struct save87 *)fpregs, 784 &lp->lwp_thread->td_pcb->pcb_save.sv_xmm); 785 return (0); 786 } 787 bcopy(fpregs, &lp->lwp_thread->td_pcb->pcb_save.sv_87, sizeof *fpregs); 788 return (0); 789 } 790 791 int 792 fill_dbregs(struct lwp *lp, struct dbreg *dbregs) 793 { 794 return (ENOSYS); 795 } 796 797 int 798 set_dbregs(struct lwp *lp, struct dbreg *dbregs) 799 { 800 return (ENOSYS); 801 } 802 803 #if 0 804 /* 805 * Return > 0 if a hardware breakpoint has been hit, and the 806 * breakpoint was in user space. Return 0, otherwise. 807 */ 808 int 809 user_dbreg_trap(void) 810 { 811 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */ 812 u_int32_t bp; /* breakpoint bits extracted from dr6 */ 813 int nbp; /* number of breakpoints that triggered */ 814 caddr_t addr[4]; /* breakpoint addresses */ 815 int i; 816 817 dr7 = rdr7(); 818 if ((dr7 & 0x000000ff) == 0) { 819 /* 820 * all GE and LE bits in the dr7 register are zero, 821 * thus the trap couldn't have been caused by the 822 * hardware debug registers 823 */ 824 return 0; 825 } 826 827 nbp = 0; 828 dr6 = rdr6(); 829 bp = dr6 & 0x0000000f; 830 831 if (!bp) { 832 /* 833 * None of the breakpoint bits are set meaning this 834 * trap was not caused by any of the debug registers 835 */ 836 return 0; 837 } 838 839 /* 840 * at least one of the breakpoints were hit, check to see 841 * which ones and if any of them are user space addresses 842 */ 843 844 if (bp & 0x01) { 845 addr[nbp++] = (caddr_t)rdr0(); 846 } 847 if (bp & 0x02) { 848 addr[nbp++] = (caddr_t)rdr1(); 849 } 850 if (bp & 0x04) { 851 addr[nbp++] = (caddr_t)rdr2(); 852 } 853 if (bp & 0x08) { 854 addr[nbp++] = (caddr_t)rdr3(); 855 } 856 857 for (i=0; i<nbp; i++) { 858 if (addr[i] < 859 (caddr_t)VM_MAX_USER_ADDRESS) { 860 /* 861 * addr[i] is in user space 862 */ 863 return nbp; 864 } 865 } 866 867 /* 868 * None of the breakpoints are in user space. 869 */ 870 return 0; 871 } 872 873 #endif 874 875 void 876 identcpu(void) 877 { 878 int regs[4]; 879 880 do_cpuid(1, regs); 881 cpu_feature = regs[3]; 882 } 883 884 885 #ifndef DDB 886 void 887 Debugger(const char *msg) 888 { 889 kprintf("Debugger(\"%s\") called.\n", msg); 890 } 891 #endif /* no DDB */ 892