1 /*- 2 * Copyright (c) 1992 Terrence R. Lambert. 3 * Copyright (C) 1994, David Greenman 4 * Copyright (c) 1982, 1987, 1990, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * William Jolitz. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 39 * $FreeBSD: src/sys/i386/i386/machdep.c,v 1.385.2.30 2003/05/31 08:48:05 alc Exp $ 40 */ 41 42 #include "opt_ddb.h" 43 #include "opt_inet.h" 44 #include "opt_msgbuf.h" 45 #include "opt_swap.h" 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/sysproto.h> 50 #include <sys/signalvar.h> 51 #include <sys/kernel.h> 52 #include <sys/linker.h> 53 #include <sys/malloc.h> 54 #include <sys/proc.h> 55 #include <sys/buf.h> 56 #include <sys/reboot.h> 57 #include <sys/mbuf.h> 58 #include <sys/msgbuf.h> 59 #include <sys/sysent.h> 60 #include <sys/sysctl.h> 61 #include <sys/vmmeter.h> 62 #include <sys/bus.h> 63 #include <sys/usched.h> 64 #include <sys/reg.h> 65 66 #include <vm/vm.h> 67 #include <vm/vm_param.h> 68 #include <sys/lock.h> 69 #include <vm/vm_kern.h> 70 #include <vm/vm_object.h> 71 #include <vm/vm_page.h> 72 #include <vm/vm_map.h> 73 #include <vm/vm_pager.h> 74 #include <vm/vm_extern.h> 75 76 #include <sys/thread2.h> 77 #include <sys/mplock2.h> 78 79 #include <sys/user.h> 80 #include <sys/exec.h> 81 #include <sys/cons.h> 82 83 #include <ddb/ddb.h> 84 85 #include <machine/cpu.h> 86 #include <machine/clock.h> 87 #include <machine/specialreg.h> 88 #include <machine/md_var.h> 89 #include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */ 90 #include <machine/globaldata.h> /* CPU_prvspace */ 91 #include <machine/smp.h> 92 #include <machine/cputypes.h> 93 94 #include <bus/isa/rtc.h> 95 #include <sys/random.h> 96 #include <sys/ptrace.h> 97 #include <machine/sigframe.h> 98 #include <unistd.h> /* umtx_* functions */ 99 #include <pthread.h> /* pthread_yield() */ 100 101 extern void dblfault_handler (void); 102 103 static void set_fpregs_xmm (struct save87 *, struct savexmm *); 104 static void fill_fpregs_xmm (struct savexmm *, struct save87 *); 105 106 int64_t tsc_offsets[MAXCPU]; 107 108 #if defined(SWTCH_OPTIM_STATS) 109 extern int swtch_optim_stats; 110 SYSCTL_INT(_debug, OID_AUTO, swtch_optim_stats, 111 CTLFLAG_RD, &swtch_optim_stats, 0, ""); 112 SYSCTL_INT(_debug, OID_AUTO, tlb_flush_count, 113 CTLFLAG_RD, &tlb_flush_count, 0, ""); 114 #endif 115 116 static int 117 sysctl_hw_physmem(SYSCTL_HANDLER_ARGS) 118 { 119 u_long pmem = ctob(physmem); 120 int error; 121 122 error = sysctl_handle_long(oidp, &pmem, 0, req); 123 124 return (error); 125 } 126 127 SYSCTL_PROC(_hw, HW_PHYSMEM, physmem, CTLTYPE_ULONG|CTLFLAG_RD, 128 0, 0, sysctl_hw_physmem, "LU", "Total system memory in bytes (number of pages * page size)"); 129 130 static int 131 sysctl_hw_usermem(SYSCTL_HANDLER_ARGS) 132 { 133 u_long usermem = ctob(Maxmem - vmstats.v_wire_count); 134 int error; 135 136 error = sysctl_handle_long(oidp, &usermem, 0, req); 137 138 return (error); 139 } 140 141 SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_ULONG|CTLFLAG_RD, 142 0, 0, sysctl_hw_usermem, "LU", ""); 143 144 SYSCTL_ULONG(_hw, OID_AUTO, availpages, CTLFLAG_RD, &Maxmem, 0, ""); 145 146 /* 147 * Send an interrupt to process. 148 * 149 * Stack is set up to allow sigcode stored 150 * at top to call routine, followed by kcall 151 * to sigreturn routine below. After sigreturn 152 * resets the signal mask, the stack, and the 153 * frame pointer, it returns to the user 154 * specified pc, psl. 155 */ 156 void 157 sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code) 158 { 159 struct lwp *lp = curthread->td_lwp; 160 struct proc *p = lp->lwp_proc; 161 struct trapframe *regs; 162 struct sigacts *psp = p->p_sigacts; 163 struct sigframe sf, *sfp; 164 int oonstack; 165 char *sp; 166 167 regs = lp->lwp_md.md_regs; 168 oonstack = (lp->lwp_sigstk.ss_flags & SS_ONSTACK) ? 1 : 0; 169 170 /* Save user context */ 171 bzero(&sf, sizeof(struct sigframe)); 172 sf.sf_uc.uc_sigmask = *mask; 173 sf.sf_uc.uc_stack = lp->lwp_sigstk; 174 sf.sf_uc.uc_mcontext.mc_onstack = oonstack; 175 KKASSERT(__offsetof(struct trapframe, tf_rdi) == 0); 176 /* gcc8 craps out on -Warray-bounds w/ optimized bcopy */ 177 _bcopy(regs, &sf.sf_uc.uc_mcontext.mc_rdi, sizeof(struct trapframe)); 178 179 /* Make the size of the saved context visible to userland */ 180 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); 181 182 /* Allocate and validate space for the signal handler context. */ 183 if ((lp->lwp_flags & LWP_ALTSTACK) != 0 && !oonstack && 184 SIGISMEMBER(psp->ps_sigonstack, sig)) { 185 sp = (char *)(lp->lwp_sigstk.ss_sp + lp->lwp_sigstk.ss_size - 186 sizeof(struct sigframe)); 187 lp->lwp_sigstk.ss_flags |= SS_ONSTACK; 188 } else { 189 /* We take red zone into account */ 190 sp = (char *)regs->tf_rsp - sizeof(struct sigframe) - 128; 191 } 192 193 /* Align to 16 bytes */ 194 sfp = (struct sigframe *)((intptr_t)sp & ~0xFUL); 195 196 /* Translate the signal is appropriate */ 197 if (p->p_sysent->sv_sigtbl) { 198 if (sig <= p->p_sysent->sv_sigsize) 199 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 200 } 201 202 /* 203 * Build the argument list for the signal handler. 204 * 205 * Arguments are in registers (%rdi, %rsi, %rdx, %rcx) 206 */ 207 regs->tf_rdi = sig; /* argument 1 */ 208 regs->tf_rdx = (register_t)&sfp->sf_uc; /* argument 3 */ 209 210 if (SIGISMEMBER(psp->ps_siginfo, sig)) { 211 /* 212 * Signal handler installed with SA_SIGINFO. 213 * 214 * action(signo, siginfo, ucontext) 215 */ 216 regs->tf_rsi = (register_t)&sfp->sf_si; /* argument 2 */ 217 regs->tf_rcx = (register_t)regs->tf_err; /* argument 4 */ 218 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 219 220 /* fill siginfo structure */ 221 sf.sf_si.si_signo = sig; 222 sf.sf_si.si_pid = psp->ps_frominfo[sig].pid; 223 sf.sf_si.si_uid = psp->ps_frominfo[sig].uid; 224 sf.sf_si.si_code = code; 225 sf.sf_si.si_addr = (void *)regs->tf_addr; 226 } else { 227 /* 228 * Old FreeBSD-style arguments. 229 * 230 * handler (signo, code, [uc], addr) 231 */ 232 regs->tf_rsi = (register_t)code; /* argument 2 */ 233 regs->tf_rcx = (register_t)regs->tf_addr; /* argument 4 */ 234 sf.sf_ahu.sf_handler = catcher; 235 } 236 237 #if 0 238 /* 239 * If we're a vm86 process, we want to save the segment registers. 240 * We also change eflags to be our emulated eflags, not the actual 241 * eflags. 242 */ 243 if (regs->tf_eflags & PSL_VM) { 244 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 245 struct vm86_kernel *vm86 = &lp->lwp_thread->td_pcb->pcb_ext->ext_vm86; 246 247 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs; 248 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs; 249 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es; 250 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds; 251 252 if (vm86->vm86_has_vme == 0) 253 sf.sf_uc.uc_mcontext.mc_eflags = 254 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 255 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 256 257 /* 258 * Clear PSL_NT to inhibit T_TSSFLT faults on return from 259 * syscalls made by the signal handler. This just avoids 260 * wasting time for our lazy fixup of such faults. PSL_NT 261 * does nothing in vm86 mode, but vm86 programs can set it 262 * almost legitimately in probes for old cpu types. 263 */ 264 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP); 265 } 266 #endif 267 268 /* 269 * Save the FPU state and reinit the FP unit 270 */ 271 npxpush(&sf.sf_uc.uc_mcontext); 272 273 /* 274 * Copy the sigframe out to the user's stack. 275 */ 276 if (copyout(&sf, sfp, sizeof(struct sigframe)) != 0) { 277 /* 278 * Something is wrong with the stack pointer. 279 * ...Kill the process. 280 */ 281 sigexit(lp, SIGILL); 282 } 283 284 regs->tf_rsp = (register_t)sfp; 285 regs->tf_rip = trunc_page64(PS_STRINGS - *(p->p_sysent->sv_szsigcode)); 286 regs->tf_rip -= SZSIGCODE_EXTRA_BYTES; 287 288 /* 289 * x86 abi specifies that the direction flag must be cleared 290 * on function entry 291 */ 292 regs->tf_rflags &= ~(PSL_T|PSL_D); 293 294 /* 295 * 64 bit mode has a code and stack selector but 296 * no data or extra selector. %fs and %gs are not 297 * stored in-context. 298 */ 299 regs->tf_cs = _ucodesel; 300 regs->tf_ss = _udatasel; 301 } 302 303 /* 304 * Sanitize the trapframe for a virtual kernel passing control to a custom 305 * VM context. Remove any items that would otherwise create a privilage 306 * issue. 307 * 308 * XXX at the moment we allow userland to set the resume flag. Is this a 309 * bad idea? 310 */ 311 int 312 cpu_sanitize_frame(struct trapframe *frame) 313 { 314 frame->tf_cs = _ucodesel; 315 frame->tf_ss = _udatasel; 316 /* XXX VM (8086) mode not supported? */ 317 frame->tf_rflags &= (PSL_RF | PSL_USERCHANGE | PSL_VM_UNSUPP); 318 frame->tf_rflags |= PSL_RESERVED_DEFAULT | PSL_I; 319 320 return(0); 321 } 322 323 /* 324 * Sanitize the tls so loading the descriptor does not blow up 325 * on us. For x86_64 we don't have to do anything. 326 */ 327 int 328 cpu_sanitize_tls(struct savetls *tls) 329 { 330 return(0); 331 } 332 333 /* 334 * sigreturn(ucontext_t *sigcntxp) 335 * 336 * System call to cleanup state after a signal 337 * has been taken. Reset signal mask and 338 * stack state from context left by sendsig (above). 339 * Return to previous pc and psl as specified by 340 * context left by sendsig. Check carefully to 341 * make sure that the user has not modified the 342 * state to gain improper privileges. 343 */ 344 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) 345 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) 346 347 int 348 sys_sigreturn(struct sigreturn_args *uap) 349 { 350 struct lwp *lp = curthread->td_lwp; 351 struct trapframe *regs; 352 ucontext_t uc; 353 ucontext_t *ucp; 354 register_t rflags; 355 int cs; 356 int error; 357 358 /* 359 * We have to copy the information into kernel space so userland 360 * can't modify it while we are sniffing it. 361 */ 362 regs = lp->lwp_md.md_regs; 363 error = copyin(uap->sigcntxp, &uc, sizeof(uc)); 364 if (error) 365 return (error); 366 ucp = &uc; 367 rflags = ucp->uc_mcontext.mc_rflags; 368 369 /* VM (8086) mode not supported */ 370 rflags &= ~PSL_VM_UNSUPP; 371 372 #if 0 373 if (eflags & PSL_VM) { 374 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 375 struct vm86_kernel *vm86; 376 377 /* 378 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 379 * set up the vm86 area, and we can't enter vm86 mode. 380 */ 381 if (lp->lwp_thread->td_pcb->pcb_ext == 0) 382 return (EINVAL); 383 vm86 = &lp->lwp_thread->td_pcb->pcb_ext->ext_vm86; 384 if (vm86->vm86_inited == 0) 385 return (EINVAL); 386 387 /* go back to user mode if both flags are set */ 388 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 389 trapsignal(lp->lwp_proc, SIGBUS, 0); 390 391 if (vm86->vm86_has_vme) { 392 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 393 (eflags & VME_USERCHANGE) | PSL_VM; 394 } else { 395 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 396 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM; 397 } 398 bcopy(&ucp.uc_mcontext.mc_gs, tf, sizeof(struct trapframe)); 399 tf->tf_eflags = eflags; 400 tf->tf_vm86_ds = tf->tf_ds; 401 tf->tf_vm86_es = tf->tf_es; 402 tf->tf_vm86_fs = tf->tf_fs; 403 tf->tf_vm86_gs = tf->tf_gs; 404 tf->tf_ds = _udatasel; 405 tf->tf_es = _udatasel; 406 #if 0 407 tf->tf_fs = _udatasel; 408 tf->tf_gs = _udatasel; 409 #endif 410 } else 411 #endif 412 { 413 /* 414 * Don't allow users to change privileged or reserved flags. 415 */ 416 /* 417 * XXX do allow users to change the privileged flag PSL_RF. 418 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 419 * should sometimes set it there too. tf_eflags is kept in 420 * the signal context during signal handling and there is no 421 * other place to remember it, so the PSL_RF bit may be 422 * corrupted by the signal handler without us knowing. 423 * Corruption of the PSL_RF bit at worst causes one more or 424 * one less debugger trap, so allowing it is fairly harmless. 425 */ 426 if (!EFL_SECURE(rflags & ~PSL_RF, regs->tf_rflags & ~PSL_RF)) { 427 kprintf("sigreturn: rflags = 0x%lx\n", (long)rflags); 428 return(EINVAL); 429 } 430 431 /* 432 * Don't allow users to load a valid privileged %cs. Let the 433 * hardware check for invalid selectors, excess privilege in 434 * other selectors, invalid %eip's and invalid %esp's. 435 */ 436 cs = ucp->uc_mcontext.mc_cs; 437 if (!CS_SECURE(cs)) { 438 kprintf("sigreturn: cs = 0x%x\n", cs); 439 trapsignal(lp, SIGBUS, T_PROTFLT); 440 return(EINVAL); 441 } 442 bcopy(&ucp->uc_mcontext.mc_rdi, regs, sizeof(struct trapframe)); 443 } 444 445 /* 446 * Restore the FPU state from the frame 447 */ 448 npxpop(&ucp->uc_mcontext); 449 450 if (ucp->uc_mcontext.mc_onstack & 1) 451 lp->lwp_sigstk.ss_flags |= SS_ONSTACK; 452 else 453 lp->lwp_sigstk.ss_flags &= ~SS_ONSTACK; 454 455 lp->lwp_sigmask = ucp->uc_sigmask; 456 SIG_CANTMASK(lp->lwp_sigmask); 457 return(EJUSTRETURN); 458 } 459 460 /* 461 * cpu_idle() represents the idle LWKT. You cannot return from this function 462 * (unless you want to blow things up!). Instead we look for runnable threads 463 * and loop or halt as appropriate. Giant is not held on entry to the thread. 464 * 465 * The main loop is entered with a critical section held, we must release 466 * the critical section before doing anything else. lwkt_switch() will 467 * check for pending interrupts due to entering and exiting its own 468 * critical section. 469 * 470 * Note on cpu_idle_hlt: On an SMP system we rely on a scheduler IPI 471 * to wake a HLTed cpu up. 472 */ 473 static int cpu_idle_hlt = 1; 474 static int cpu_idle_hltcnt; 475 static int cpu_idle_spincnt; 476 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW, 477 &cpu_idle_hlt, 0, "Idle loop HLT enable"); 478 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hltcnt, CTLFLAG_RW, 479 &cpu_idle_hltcnt, 0, "Idle loop entry halts"); 480 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_spincnt, CTLFLAG_RW, 481 &cpu_idle_spincnt, 0, "Idle loop entry spins"); 482 483 void 484 cpu_idle(void) 485 { 486 struct thread *td = curthread; 487 struct mdglobaldata *gd = mdcpu; 488 int reqflags; 489 490 crit_exit(); 491 KKASSERT(td->td_critcount == 0); 492 cpu_enable_intr(); 493 494 for (;;) { 495 /* 496 * See if there are any LWKTs ready to go. 497 */ 498 lwkt_switch(); 499 500 /* 501 * The idle loop halts only if no threads are scheduleable 502 * and no signals have occured. 503 */ 504 if (cpu_idle_hlt && 505 (td->td_gd->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) { 506 splz(); 507 if ((td->td_gd->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) { 508 #ifdef DEBUGIDLE 509 struct timeval tv1, tv2; 510 gettimeofday(&tv1, NULL); 511 #endif 512 reqflags = gd->mi.gd_reqflags & 513 ~RQF_IDLECHECK_WK_MASK; 514 KKASSERT(gd->mi.gd_processing_ipiq == 0); 515 umtx_sleep(&gd->mi.gd_reqflags, reqflags, 516 1000000); 517 #ifdef DEBUGIDLE 518 gettimeofday(&tv2, NULL); 519 if (tv2.tv_usec - tv1.tv_usec + 520 (tv2.tv_sec - tv1.tv_sec) * 1000000 521 > 500000) { 522 kprintf("cpu %d idlelock %08x %08x\n", 523 gd->mi.gd_cpuid, 524 gd->mi.gd_reqflags, 525 gd->gd_fpending); 526 } 527 #endif 528 } 529 ++cpu_idle_hltcnt; 530 } else { 531 splz(); 532 __asm __volatile("pause"); 533 ++cpu_idle_spincnt; 534 } 535 } 536 } 537 538 /* 539 * Called by the spinlock code with or without a critical section held 540 * when a spinlock is found to be seriously constested. 541 * 542 * We need to enter a critical section to prevent signals from recursing 543 * into pthreads. 544 */ 545 void 546 cpu_spinlock_contested(void) 547 { 548 cpu_pause(); 549 } 550 551 /* 552 * Clear registers on exec 553 */ 554 void 555 exec_setregs(u_long entry, u_long stack, u_long ps_strings) 556 { 557 struct thread *td = curthread; 558 struct lwp *lp = td->td_lwp; 559 struct pcb *pcb = td->td_pcb; 560 struct trapframe *regs = lp->lwp_md.md_regs; 561 562 user_ldt_free(pcb); 563 564 bzero((char *)regs, sizeof(struct trapframe)); 565 regs->tf_rip = entry; 566 regs->tf_rsp = ((stack - 8) & ~0xFul) + 8; /* align the stack */ 567 regs->tf_rdi = stack; /* argv */ 568 regs->tf_rflags = PSL_USER | (regs->tf_rflags & PSL_T); 569 regs->tf_ss = _udatasel; 570 regs->tf_cs = _ucodesel; 571 regs->tf_rbx = ps_strings; 572 573 /* 574 * Reset the hardware debug registers if they were in use. 575 * They won't have any meaning for the newly exec'd process. 576 */ 577 if (pcb->pcb_flags & PCB_DBREGS) { 578 pcb->pcb_dr0 = 0; 579 pcb->pcb_dr1 = 0; 580 pcb->pcb_dr2 = 0; 581 pcb->pcb_dr3 = 0; 582 pcb->pcb_dr6 = 0; 583 pcb->pcb_dr7 = 0; /* JG set bit 10? */ 584 if (pcb == td->td_pcb) { 585 /* 586 * Clear the debug registers on the running 587 * CPU, otherwise they will end up affecting 588 * the next process we switch to. 589 */ 590 reset_dbregs(); 591 } 592 pcb->pcb_flags &= ~PCB_DBREGS; 593 } 594 595 /* 596 * Initialize the math emulator (if any) for the current process. 597 * Actually, just clear the bit that says that the emulator has 598 * been initialized. Initialization is delayed until the process 599 * traps to the emulator (if it is done at all) mainly because 600 * emulators don't provide an entry point for initialization. 601 */ 602 pcb->pcb_flags &= ~FP_SOFTFP; 603 604 /* 605 * NOTE: do not set CR0_TS here. npxinit() must do it after clearing 606 * gd_npxthread. Otherwise a preemptive interrupt thread 607 * may panic in npxdna(). 608 */ 609 crit_enter(); 610 #if 0 611 load_cr0(rcr0() | CR0_MP); 612 #endif 613 614 /* 615 * NOTE: The MSR values must be correct so we can return to 616 * userland. gd_user_fs/gs must be correct so the switch 617 * code knows what the current MSR values are. 618 */ 619 pcb->pcb_fsbase = 0; /* Values loaded from PCB on switch */ 620 pcb->pcb_gsbase = 0; 621 /* Initialize the npx (if any) for the current process. */ 622 npxinit(); 623 crit_exit(); 624 625 /* 626 * note: linux emulator needs edx to be 0x0 on entry, which is 627 * handled in execve simply by setting the 64 bit syscall 628 * return value to 0. 629 */ 630 } 631 632 void 633 cpu_setregs(void) 634 { 635 #if 0 636 unsigned int cr0; 637 638 cr0 = rcr0(); 639 cr0 |= CR0_NE; /* Done by npxinit() */ 640 cr0 |= CR0_MP | CR0_TS; /* Done at every execve() too. */ 641 cr0 |= CR0_WP | CR0_AM; 642 load_cr0(cr0); 643 load_gs(_udatasel); 644 #endif 645 } 646 647 static int 648 sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS) 649 { 650 int error; 651 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 652 req); 653 if (!error && req->newptr) 654 resettodr(); 655 return (error); 656 } 657 658 SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW, 659 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", ""); 660 661 /* 662 * Initialize x86 and configure to run kernel 663 */ 664 665 /* 666 * Initialize segments & interrupt table 667 */ 668 669 extern struct user *proc0paddr; 670 671 #if 0 672 673 extern inthand_t 674 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 675 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 676 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 677 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 678 IDTVEC(xmm), IDTVEC(dblfault), 679 IDTVEC(fast_syscall), IDTVEC(fast_syscall32); 680 #endif 681 682 int 683 ptrace_set_pc(struct lwp *lp, unsigned long addr) 684 { 685 lp->lwp_md.md_regs->tf_rip = addr; 686 return (0); 687 } 688 689 int 690 ptrace_single_step(struct lwp *lp) 691 { 692 lp->lwp_md.md_regs->tf_rflags |= PSL_T; 693 return (0); 694 } 695 696 int 697 fill_regs(struct lwp *lp, struct reg *regs) 698 { 699 struct trapframe *tp; 700 701 if ((tp = lp->lwp_md.md_regs) == NULL) 702 return EINVAL; 703 bcopy(&tp->tf_rdi, ®s->r_rdi, sizeof(*regs)); 704 return (0); 705 } 706 707 int 708 set_regs(struct lwp *lp, struct reg *regs) 709 { 710 struct trapframe *tp; 711 712 tp = lp->lwp_md.md_regs; 713 if (!EFL_SECURE(regs->r_rflags, tp->tf_rflags) || 714 !CS_SECURE(regs->r_cs)) 715 return (EINVAL); 716 bcopy(®s->r_rdi, &tp->tf_rdi, sizeof(*regs)); 717 return (0); 718 } 719 720 static void 721 fill_fpregs_xmm(struct savexmm *sv_xmm, struct save87 *sv_87) 722 { 723 struct env87 *penv_87 = &sv_87->sv_env; 724 struct envxmm *penv_xmm = &sv_xmm->sv_env; 725 int i; 726 727 /* FPU control/status */ 728 penv_87->en_cw = penv_xmm->en_cw; 729 penv_87->en_sw = penv_xmm->en_sw; 730 penv_87->en_tw = penv_xmm->en_tw; 731 penv_87->en_fip = penv_xmm->en_fip; 732 penv_87->en_fcs = penv_xmm->en_fcs; 733 penv_87->en_opcode = penv_xmm->en_opcode; 734 penv_87->en_foo = penv_xmm->en_foo; 735 penv_87->en_fos = penv_xmm->en_fos; 736 737 /* FPU registers */ 738 for (i = 0; i < 8; ++i) 739 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc; 740 } 741 742 static void 743 set_fpregs_xmm(struct save87 *sv_87, struct savexmm *sv_xmm) 744 { 745 struct env87 *penv_87 = &sv_87->sv_env; 746 struct envxmm *penv_xmm = &sv_xmm->sv_env; 747 int i; 748 749 /* FPU control/status */ 750 penv_xmm->en_cw = penv_87->en_cw; 751 penv_xmm->en_sw = penv_87->en_sw; 752 penv_xmm->en_tw = penv_87->en_tw; 753 penv_xmm->en_fip = penv_87->en_fip; 754 penv_xmm->en_fcs = penv_87->en_fcs; 755 penv_xmm->en_opcode = penv_87->en_opcode; 756 penv_xmm->en_foo = penv_87->en_foo; 757 penv_xmm->en_fos = penv_87->en_fos; 758 759 /* FPU registers */ 760 for (i = 0; i < 8; ++i) 761 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i]; 762 } 763 764 int 765 fill_fpregs(struct lwp *lp, struct fpreg *fpregs) 766 { 767 if (lp->lwp_thread == NULL || lp->lwp_thread->td_pcb == NULL) 768 return EINVAL; 769 if (cpu_fxsr) { 770 fill_fpregs_xmm(&lp->lwp_thread->td_pcb->pcb_save.sv_xmm, 771 (struct save87 *)fpregs); 772 return (0); 773 } 774 bcopy(&lp->lwp_thread->td_pcb->pcb_save.sv_87, fpregs, sizeof *fpregs); 775 return (0); 776 } 777 778 int 779 set_fpregs(struct lwp *lp, struct fpreg *fpregs) 780 { 781 if (cpu_fxsr) { 782 set_fpregs_xmm((struct save87 *)fpregs, 783 &lp->lwp_thread->td_pcb->pcb_save.sv_xmm); 784 return (0); 785 } 786 bcopy(fpregs, &lp->lwp_thread->td_pcb->pcb_save.sv_87, sizeof *fpregs); 787 return (0); 788 } 789 790 int 791 fill_dbregs(struct lwp *lp, struct dbreg *dbregs) 792 { 793 return (ENOSYS); 794 } 795 796 int 797 set_dbregs(struct lwp *lp, struct dbreg *dbregs) 798 { 799 return (ENOSYS); 800 } 801 802 #if 0 803 /* 804 * Return > 0 if a hardware breakpoint has been hit, and the 805 * breakpoint was in user space. Return 0, otherwise. 806 */ 807 int 808 user_dbreg_trap(void) 809 { 810 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */ 811 u_int32_t bp; /* breakpoint bits extracted from dr6 */ 812 int nbp; /* number of breakpoints that triggered */ 813 caddr_t addr[4]; /* breakpoint addresses */ 814 int i; 815 816 dr7 = rdr7(); 817 if ((dr7 & 0x000000ff) == 0) { 818 /* 819 * all GE and LE bits in the dr7 register are zero, 820 * thus the trap couldn't have been caused by the 821 * hardware debug registers 822 */ 823 return 0; 824 } 825 826 nbp = 0; 827 dr6 = rdr6(); 828 bp = dr6 & 0x0000000f; 829 830 if (!bp) { 831 /* 832 * None of the breakpoint bits are set meaning this 833 * trap was not caused by any of the debug registers 834 */ 835 return 0; 836 } 837 838 /* 839 * at least one of the breakpoints were hit, check to see 840 * which ones and if any of them are user space addresses 841 */ 842 843 if (bp & 0x01) { 844 addr[nbp++] = (caddr_t)rdr0(); 845 } 846 if (bp & 0x02) { 847 addr[nbp++] = (caddr_t)rdr1(); 848 } 849 if (bp & 0x04) { 850 addr[nbp++] = (caddr_t)rdr2(); 851 } 852 if (bp & 0x08) { 853 addr[nbp++] = (caddr_t)rdr3(); 854 } 855 856 for (i=0; i<nbp; i++) { 857 if (addr[i] < 858 (caddr_t)VM_MAX_USER_ADDRESS) { 859 /* 860 * addr[i] is in user space 861 */ 862 return nbp; 863 } 864 } 865 866 /* 867 * None of the breakpoints are in user space. 868 */ 869 return 0; 870 } 871 872 #endif 873 874 void 875 identcpu(void) 876 { 877 int regs[4]; 878 879 do_cpuid(1, regs); 880 cpu_feature = regs[3]; 881 } 882 883 884 #ifndef DDB 885 void 886 Debugger(const char *msg) 887 { 888 kprintf("Debugger(\"%s\") called.\n", msg); 889 } 890 #endif /* no DDB */ 891