1 /*- 2 * Copyright (c) 1992 Terrence R. Lambert. 3 * Copyright (C) 1994, David Greenman 4 * Copyright (c) 1982, 1987, 1990, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * William Jolitz. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 39 * $FreeBSD: src/sys/i386/i386/machdep.c,v 1.385.2.30 2003/05/31 08:48:05 alc Exp $ 40 */ 41 42 #include "use_ether.h" 43 #include "use_isa.h" 44 #include "opt_atalk.h" 45 #include "opt_compat.h" 46 #include "opt_ddb.h" 47 #include "opt_directio.h" 48 #include "opt_inet.h" 49 #include "opt_ipx.h" 50 #include "opt_msgbuf.h" 51 #include "opt_swap.h" 52 53 #include <sys/param.h> 54 #include <sys/systm.h> 55 #include <sys/sysproto.h> 56 #include <sys/signalvar.h> 57 #include <sys/kernel.h> 58 #include <sys/linker.h> 59 #include <sys/malloc.h> 60 #include <sys/proc.h> 61 #include <sys/buf.h> 62 #include <sys/reboot.h> 63 #include <sys/mbuf.h> 64 #include <sys/msgbuf.h> 65 #include <sys/sysent.h> 66 #include <sys/sysctl.h> 67 #include <sys/vmmeter.h> 68 #include <sys/bus.h> 69 #include <sys/upcall.h> 70 #include <sys/usched.h> 71 #include <sys/reg.h> 72 73 #include <vm/vm.h> 74 #include <vm/vm_param.h> 75 #include <sys/lock.h> 76 #include <vm/vm_kern.h> 77 #include <vm/vm_object.h> 78 #include <vm/vm_page.h> 79 #include <vm/vm_map.h> 80 #include <vm/vm_pager.h> 81 #include <vm/vm_extern.h> 82 83 #include <sys/thread2.h> 84 #include <sys/mplock2.h> 85 86 #include <sys/user.h> 87 #include <sys/exec.h> 88 #include <sys/cons.h> 89 90 #include <ddb/ddb.h> 91 92 #include <machine/cpu.h> 93 #include <machine/clock.h> 94 #include <machine/specialreg.h> 95 #include <machine/md_var.h> 96 #include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */ 97 #include <machine/globaldata.h> /* CPU_prvspace */ 98 #include <machine/smp.h> 99 #ifdef PERFMON 100 #include <machine/perfmon.h> 101 #endif 102 #include <machine/cputypes.h> 103 104 #include <bus/isa/rtc.h> 105 #include <sys/random.h> 106 #include <sys/ptrace.h> 107 #include <machine/sigframe.h> 108 #include <unistd.h> /* umtx_* functions */ 109 110 extern void dblfault_handler (void); 111 112 #ifndef CPU_DISABLE_SSE 113 static void set_fpregs_xmm (struct save87 *, struct savexmm *); 114 static void fill_fpregs_xmm (struct savexmm *, struct save87 *); 115 #endif /* CPU_DISABLE_SSE */ 116 #ifdef DIRECTIO 117 extern void ffs_rawread_setup(void); 118 #endif /* DIRECTIO */ 119 120 #ifdef SMP 121 int64_t tsc_offsets[MAXCPU]; 122 #else 123 int64_t tsc_offsets[1]; 124 #endif 125 126 #if defined(SWTCH_OPTIM_STATS) 127 extern int swtch_optim_stats; 128 SYSCTL_INT(_debug, OID_AUTO, swtch_optim_stats, 129 CTLFLAG_RD, &swtch_optim_stats, 0, ""); 130 SYSCTL_INT(_debug, OID_AUTO, tlb_flush_count, 131 CTLFLAG_RD, &tlb_flush_count, 0, ""); 132 #endif 133 134 static int 135 sysctl_hw_physmem(SYSCTL_HANDLER_ARGS) 136 { 137 /* JG */ 138 int error = sysctl_handle_int(oidp, 0, ctob((int)Maxmem), req); 139 return (error); 140 } 141 142 SYSCTL_PROC(_hw, HW_PHYSMEM, physmem, CTLTYPE_INT|CTLFLAG_RD, 143 0, 0, sysctl_hw_physmem, "IU", ""); 144 145 static int 146 sysctl_hw_usermem(SYSCTL_HANDLER_ARGS) 147 { 148 /* JG */ 149 int error = sysctl_handle_int(oidp, 0, 150 ctob((int)Maxmem - vmstats.v_wire_count), req); 151 return (error); 152 } 153 154 SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT|CTLFLAG_RD, 155 0, 0, sysctl_hw_usermem, "IU", ""); 156 157 SYSCTL_ULONG(_hw, OID_AUTO, availpages, CTLFLAG_RD, &Maxmem, 0, ""); 158 159 #if 0 160 161 static int 162 sysctl_machdep_msgbuf(SYSCTL_HANDLER_ARGS) 163 { 164 int error; 165 166 /* Unwind the buffer, so that it's linear (possibly starting with 167 * some initial nulls). 168 */ 169 error=sysctl_handle_opaque(oidp,msgbufp->msg_ptr+msgbufp->msg_bufr, 170 msgbufp->msg_size-msgbufp->msg_bufr,req); 171 if(error) return(error); 172 if(msgbufp->msg_bufr>0) { 173 error=sysctl_handle_opaque(oidp,msgbufp->msg_ptr, 174 msgbufp->msg_bufr,req); 175 } 176 return(error); 177 } 178 179 SYSCTL_PROC(_machdep, OID_AUTO, msgbuf, CTLTYPE_STRING|CTLFLAG_RD, 180 0, 0, sysctl_machdep_msgbuf, "A","Contents of kernel message buffer"); 181 182 static int msgbuf_clear; 183 184 static int 185 sysctl_machdep_msgbuf_clear(SYSCTL_HANDLER_ARGS) 186 { 187 int error; 188 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 189 req); 190 if (!error && req->newptr) { 191 /* Clear the buffer and reset write pointer */ 192 bzero(msgbufp->msg_ptr,msgbufp->msg_size); 193 msgbufp->msg_bufr=msgbufp->msg_bufx=0; 194 msgbuf_clear=0; 195 } 196 return (error); 197 } 198 199 SYSCTL_PROC(_machdep, OID_AUTO, msgbuf_clear, CTLTYPE_INT|CTLFLAG_RW, 200 &msgbuf_clear, 0, sysctl_machdep_msgbuf_clear, "I", 201 "Clear kernel message buffer"); 202 203 #endif 204 205 /* 206 * Send an interrupt to process. 207 * 208 * Stack is set up to allow sigcode stored 209 * at top to call routine, followed by kcall 210 * to sigreturn routine below. After sigreturn 211 * resets the signal mask, the stack, and the 212 * frame pointer, it returns to the user 213 * specified pc, psl. 214 */ 215 void 216 sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code) 217 { 218 struct lwp *lp = curthread->td_lwp; 219 struct proc *p = lp->lwp_proc; 220 struct trapframe *regs; 221 struct sigacts *psp = p->p_sigacts; 222 struct sigframe sf, *sfp; 223 int oonstack; 224 char *sp; 225 226 regs = lp->lwp_md.md_regs; 227 oonstack = (lp->lwp_sigstk.ss_flags & SS_ONSTACK) ? 1 : 0; 228 229 /* Save user context */ 230 bzero(&sf, sizeof(struct sigframe)); 231 sf.sf_uc.uc_sigmask = *mask; 232 sf.sf_uc.uc_stack = lp->lwp_sigstk; 233 sf.sf_uc.uc_mcontext.mc_onstack = oonstack; 234 KKASSERT(__offsetof(struct trapframe, tf_rdi) == 0); 235 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_rdi, sizeof(struct trapframe)); 236 237 /* Make the size of the saved context visible to userland */ 238 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); 239 240 /* Save mailbox pending state for syscall interlock semantics */ 241 if (p->p_flag & P_MAILBOX) 242 sf.sf_uc.uc_mcontext.mc_xflags |= PGEX_MAILBOX; 243 244 /* Allocate and validate space for the signal handler context. */ 245 if ((lp->lwp_flag & LWP_ALTSTACK) != 0 && !oonstack && 246 SIGISMEMBER(psp->ps_sigonstack, sig)) { 247 sp = (char *)(lp->lwp_sigstk.ss_sp + lp->lwp_sigstk.ss_size - 248 sizeof(struct sigframe)); 249 lp->lwp_sigstk.ss_flags |= SS_ONSTACK; 250 } else { 251 /* We take red zone into account */ 252 sp = (char *)regs->tf_rsp - sizeof(struct sigframe) - 128; 253 } 254 255 /* Align to 16 bytes */ 256 sfp = (struct sigframe *)((intptr_t)sp & ~0xFUL); 257 258 /* Translate the signal is appropriate */ 259 if (p->p_sysent->sv_sigtbl) { 260 if (sig <= p->p_sysent->sv_sigsize) 261 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 262 } 263 264 /* 265 * Build the argument list for the signal handler. 266 * 267 * Arguments are in registers (%rdi, %rsi, %rdx, %rcx) 268 */ 269 regs->tf_rdi = sig; /* argument 1 */ 270 regs->tf_rdx = (register_t)&sfp->sf_uc; /* argument 3 */ 271 272 if (SIGISMEMBER(psp->ps_siginfo, sig)) { 273 /* 274 * Signal handler installed with SA_SIGINFO. 275 * 276 * action(signo, siginfo, ucontext) 277 */ 278 regs->tf_rsi = (register_t)&sfp->sf_si; /* argument 2 */ 279 regs->tf_rcx = (register_t)regs->tf_err; /* argument 4 */ 280 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 281 282 /* fill siginfo structure */ 283 sf.sf_si.si_signo = sig; 284 sf.sf_si.si_code = code; 285 sf.sf_si.si_addr = (void *)regs->tf_err; 286 } else { 287 /* 288 * Old FreeBSD-style arguments. 289 * 290 * handler (signo, code, [uc], addr) 291 */ 292 regs->tf_rsi = (register_t)code; /* argument 2 */ 293 regs->tf_rcx = (register_t)regs->tf_err; /* argument 4 */ 294 sf.sf_ahu.sf_handler = catcher; 295 } 296 297 #if 0 298 /* 299 * If we're a vm86 process, we want to save the segment registers. 300 * We also change eflags to be our emulated eflags, not the actual 301 * eflags. 302 */ 303 if (regs->tf_eflags & PSL_VM) { 304 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 305 struct vm86_kernel *vm86 = &lp->lwp_thread->td_pcb->pcb_ext->ext_vm86; 306 307 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs; 308 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs; 309 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es; 310 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds; 311 312 if (vm86->vm86_has_vme == 0) 313 sf.sf_uc.uc_mcontext.mc_eflags = 314 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 315 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 316 317 /* 318 * Clear PSL_NT to inhibit T_TSSFLT faults on return from 319 * syscalls made by the signal handler. This just avoids 320 * wasting time for our lazy fixup of such faults. PSL_NT 321 * does nothing in vm86 mode, but vm86 programs can set it 322 * almost legitimately in probes for old cpu types. 323 */ 324 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP); 325 } 326 #endif 327 328 /* 329 * Save the FPU state and reinit the FP unit 330 */ 331 npxpush(&sf.sf_uc.uc_mcontext); 332 333 /* 334 * Copy the sigframe out to the user's stack. 335 */ 336 if (copyout(&sf, sfp, sizeof(struct sigframe)) != 0) { 337 /* 338 * Something is wrong with the stack pointer. 339 * ...Kill the process. 340 */ 341 sigexit(lp, SIGILL); 342 } 343 344 regs->tf_rsp = (register_t)sfp; 345 regs->tf_rip = PS_STRINGS - *(p->p_sysent->sv_szsigcode); 346 347 /* 348 * i386 abi specifies that the direction flag must be cleared 349 * on function entry 350 */ 351 regs->tf_rflags &= ~(PSL_T|PSL_D); 352 353 /* 354 * 64 bit mode has a code and stack selector but 355 * no data or extra selector. %fs and %gs are not 356 * stored in-context. 357 */ 358 regs->tf_cs = _ucodesel; 359 regs->tf_ss = _udatasel; 360 } 361 362 /* 363 * Sanitize the trapframe for a virtual kernel passing control to a custom 364 * VM context. Remove any items that would otherwise create a privilage 365 * issue. 366 * 367 * XXX at the moment we allow userland to set the resume flag. Is this a 368 * bad idea? 369 */ 370 int 371 cpu_sanitize_frame(struct trapframe *frame) 372 { 373 frame->tf_cs = _ucodesel; 374 frame->tf_ss = _udatasel; 375 /* XXX VM (8086) mode not supported? */ 376 frame->tf_rflags &= (PSL_RF | PSL_USERCHANGE | PSL_VM_UNSUPP); 377 frame->tf_rflags |= PSL_RESERVED_DEFAULT | PSL_I; 378 379 return(0); 380 } 381 382 /* 383 * Sanitize the tls so loading the descriptor does not blow up 384 * on us. For x86_64 we don't have to do anything. 385 */ 386 int 387 cpu_sanitize_tls(struct savetls *tls) 388 { 389 return(0); 390 } 391 392 /* 393 * sigreturn(ucontext_t *sigcntxp) 394 * 395 * System call to cleanup state after a signal 396 * has been taken. Reset signal mask and 397 * stack state from context left by sendsig (above). 398 * Return to previous pc and psl as specified by 399 * context left by sendsig. Check carefully to 400 * make sure that the user has not modified the 401 * state to gain improper privileges. 402 */ 403 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) 404 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) 405 406 int 407 sys_sigreturn(struct sigreturn_args *uap) 408 { 409 struct lwp *lp = curthread->td_lwp; 410 struct proc *p = lp->lwp_proc; 411 struct trapframe *regs; 412 ucontext_t uc; 413 ucontext_t *ucp; 414 register_t rflags; 415 int cs; 416 int error; 417 418 /* 419 * We have to copy the information into kernel space so userland 420 * can't modify it while we are sniffing it. 421 */ 422 regs = lp->lwp_md.md_regs; 423 error = copyin(uap->sigcntxp, &uc, sizeof(uc)); 424 if (error) 425 return (error); 426 ucp = &uc; 427 rflags = ucp->uc_mcontext.mc_rflags; 428 429 /* VM (8086) mode not supported */ 430 rflags &= ~PSL_VM_UNSUPP; 431 432 #if 0 433 if (eflags & PSL_VM) { 434 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 435 struct vm86_kernel *vm86; 436 437 /* 438 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 439 * set up the vm86 area, and we can't enter vm86 mode. 440 */ 441 if (lp->lwp_thread->td_pcb->pcb_ext == 0) 442 return (EINVAL); 443 vm86 = &lp->lwp_thread->td_pcb->pcb_ext->ext_vm86; 444 if (vm86->vm86_inited == 0) 445 return (EINVAL); 446 447 /* go back to user mode if both flags are set */ 448 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 449 trapsignal(lp->lwp_proc, SIGBUS, 0); 450 451 if (vm86->vm86_has_vme) { 452 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 453 (eflags & VME_USERCHANGE) | PSL_VM; 454 } else { 455 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 456 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM; 457 } 458 bcopy(&ucp.uc_mcontext.mc_gs, tf, sizeof(struct trapframe)); 459 tf->tf_eflags = eflags; 460 tf->tf_vm86_ds = tf->tf_ds; 461 tf->tf_vm86_es = tf->tf_es; 462 tf->tf_vm86_fs = tf->tf_fs; 463 tf->tf_vm86_gs = tf->tf_gs; 464 tf->tf_ds = _udatasel; 465 tf->tf_es = _udatasel; 466 #if 0 467 tf->tf_fs = _udatasel; 468 tf->tf_gs = _udatasel; 469 #endif 470 } else 471 #endif 472 { 473 /* 474 * Don't allow users to change privileged or reserved flags. 475 */ 476 /* 477 * XXX do allow users to change the privileged flag PSL_RF. 478 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 479 * should sometimes set it there too. tf_eflags is kept in 480 * the signal context during signal handling and there is no 481 * other place to remember it, so the PSL_RF bit may be 482 * corrupted by the signal handler without us knowing. 483 * Corruption of the PSL_RF bit at worst causes one more or 484 * one less debugger trap, so allowing it is fairly harmless. 485 */ 486 if (!EFL_SECURE(rflags & ~PSL_RF, regs->tf_rflags & ~PSL_RF)) { 487 kprintf("sigreturn: rflags = 0x%lx\n", (long)rflags); 488 return(EINVAL); 489 } 490 491 /* 492 * Don't allow users to load a valid privileged %cs. Let the 493 * hardware check for invalid selectors, excess privilege in 494 * other selectors, invalid %eip's and invalid %esp's. 495 */ 496 cs = ucp->uc_mcontext.mc_cs; 497 if (!CS_SECURE(cs)) { 498 kprintf("sigreturn: cs = 0x%x\n", cs); 499 trapsignal(lp, SIGBUS, T_PROTFLT); 500 return(EINVAL); 501 } 502 bcopy(&ucp->uc_mcontext.mc_rdi, regs, sizeof(struct trapframe)); 503 } 504 505 /* 506 * Restore the FPU state from the frame 507 */ 508 npxpop(&ucp->uc_mcontext); 509 510 /* 511 * Merge saved signal mailbox pending flag to maintain interlock 512 * semantics against system calls. 513 */ 514 if (ucp->uc_mcontext.mc_xflags & PGEX_MAILBOX) 515 p->p_flag |= P_MAILBOX; 516 517 if (ucp->uc_mcontext.mc_onstack & 1) 518 lp->lwp_sigstk.ss_flags |= SS_ONSTACK; 519 else 520 lp->lwp_sigstk.ss_flags &= ~SS_ONSTACK; 521 522 lp->lwp_sigmask = ucp->uc_sigmask; 523 SIG_CANTMASK(lp->lwp_sigmask); 524 return(EJUSTRETURN); 525 } 526 527 /* 528 * Stack frame on entry to function. %rax will contain the function vector, 529 * %rcx will contain the function data. flags, rcx, and rax will have 530 * already been pushed on the stack. 531 */ 532 struct upc_frame { 533 register_t rax; 534 register_t rcx; 535 register_t rdx; 536 register_t flags; 537 register_t oldip; 538 }; 539 540 void 541 sendupcall(struct vmupcall *vu, int morepending) 542 { 543 struct lwp *lp = curthread->td_lwp; 544 struct trapframe *regs; 545 struct upcall upcall; 546 struct upc_frame upc_frame; 547 int crit_count = 0; 548 549 /* 550 * If we are a virtual kernel running an emulated user process 551 * context, switch back to the virtual kernel context before 552 * trying to post the signal. 553 */ 554 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) { 555 lp->lwp_md.md_regs->tf_trapno = 0; 556 vkernel_trap(lp, lp->lwp_md.md_regs); 557 } 558 559 /* 560 * Get the upcall data structure 561 */ 562 if (copyin(lp->lwp_upcall, &upcall, sizeof(upcall)) || 563 copyin((char *)upcall.upc_uthread + upcall.upc_critoff, &crit_count, sizeof(int)) 564 ) { 565 vu->vu_pending = 0; 566 kprintf("bad upcall address\n"); 567 return; 568 } 569 570 /* 571 * If the data structure is already marked pending or has a critical 572 * section count, mark the data structure as pending and return 573 * without doing an upcall. vu_pending is left set. 574 */ 575 if (upcall.upc_pending || crit_count >= vu->vu_pending) { 576 if (upcall.upc_pending < vu->vu_pending) { 577 upcall.upc_pending = vu->vu_pending; 578 copyout(&upcall.upc_pending, &lp->lwp_upcall->upc_pending, 579 sizeof(upcall.upc_pending)); 580 } 581 return; 582 } 583 584 /* 585 * We can run this upcall now, clear vu_pending. 586 * 587 * Bump our critical section count and set or clear the 588 * user pending flag depending on whether more upcalls are 589 * pending. The user will be responsible for calling 590 * upc_dispatch(-1) to process remaining upcalls. 591 */ 592 vu->vu_pending = 0; 593 upcall.upc_pending = morepending; 594 crit_count += TDPRI_CRIT; 595 copyout(&upcall.upc_pending, &lp->lwp_upcall->upc_pending, 596 sizeof(upcall.upc_pending)); 597 copyout(&crit_count, (char *)upcall.upc_uthread + upcall.upc_critoff, 598 sizeof(int)); 599 600 /* 601 * Construct a stack frame and issue the upcall 602 */ 603 regs = lp->lwp_md.md_regs; 604 upc_frame.rax = regs->tf_rax; 605 upc_frame.rcx = regs->tf_rcx; 606 upc_frame.rdx = regs->tf_rdx; 607 upc_frame.flags = regs->tf_rflags; 608 upc_frame.oldip = regs->tf_rip; 609 if (copyout(&upc_frame, (void *)(regs->tf_rsp - sizeof(upc_frame)), 610 sizeof(upc_frame)) != 0) { 611 kprintf("bad stack on upcall\n"); 612 } else { 613 regs->tf_rax = (register_t)vu->vu_func; 614 regs->tf_rcx = (register_t)vu->vu_data; 615 regs->tf_rdx = (register_t)lp->lwp_upcall; 616 regs->tf_rip = (register_t)vu->vu_ctx; 617 regs->tf_rsp -= sizeof(upc_frame); 618 } 619 } 620 621 /* 622 * fetchupcall occurs in the context of a system call, which means that 623 * we have to return EJUSTRETURN in order to prevent eax and edx from 624 * being overwritten by the syscall return value. 625 * 626 * if vu is not NULL we return the new context in %edx, the new data in %ecx, 627 * and the function pointer in %eax. 628 */ 629 int 630 fetchupcall(struct vmupcall *vu, int morepending, void *rsp) 631 { 632 struct upc_frame upc_frame; 633 struct lwp *lp = curthread->td_lwp; 634 struct trapframe *regs; 635 int error; 636 struct upcall upcall; 637 int crit_count; 638 639 regs = lp->lwp_md.md_regs; 640 641 error = copyout(&morepending, &lp->lwp_upcall->upc_pending, sizeof(int)); 642 if (error == 0) { 643 if (vu) { 644 /* 645 * This jumps us to the next ready context. 646 */ 647 vu->vu_pending = 0; 648 error = copyin(lp->lwp_upcall, &upcall, sizeof(upcall)); 649 crit_count = 0; 650 if (error == 0) 651 error = copyin((char *)upcall.upc_uthread + upcall.upc_critoff, &crit_count, sizeof(int)); 652 crit_count += TDPRI_CRIT; 653 if (error == 0) 654 error = copyout(&crit_count, (char *)upcall.upc_uthread + upcall.upc_critoff, sizeof(int)); 655 regs->tf_rax = (register_t)vu->vu_func; 656 regs->tf_rcx = (register_t)vu->vu_data; 657 regs->tf_rdx = (register_t)lp->lwp_upcall; 658 regs->tf_rip = (register_t)vu->vu_ctx; 659 regs->tf_rsp = (register_t)rsp; 660 } else { 661 /* 662 * This returns us to the originally interrupted code. 663 */ 664 error = copyin(rsp, &upc_frame, sizeof(upc_frame)); 665 regs->tf_rax = upc_frame.rax; 666 regs->tf_rcx = upc_frame.rcx; 667 regs->tf_rdx = upc_frame.rdx; 668 regs->tf_rflags = (regs->tf_rflags & ~PSL_USERCHANGE) | 669 (upc_frame.flags & PSL_USERCHANGE); 670 regs->tf_rip = upc_frame.oldip; 671 regs->tf_rsp = (register_t)((char *)rsp + sizeof(upc_frame)); 672 } 673 } 674 if (error == 0) 675 error = EJUSTRETURN; 676 return(error); 677 } 678 679 /* 680 * cpu_idle() represents the idle LWKT. You cannot return from this function 681 * (unless you want to blow things up!). Instead we look for runnable threads 682 * and loop or halt as appropriate. Giant is not held on entry to the thread. 683 * 684 * The main loop is entered with a critical section held, we must release 685 * the critical section before doing anything else. lwkt_switch() will 686 * check for pending interrupts due to entering and exiting its own 687 * critical section. 688 * 689 * Note on cpu_idle_hlt: On an SMP system we rely on a scheduler IPI 690 * to wake a HLTed cpu up. However, there are cases where the idlethread 691 * will be entered with the possibility that no IPI will occur and in such 692 * cases lwkt_switch() sets TDF_IDLE_NOHLT. 693 */ 694 static int cpu_idle_hlt = 1; 695 static int cpu_idle_hltcnt; 696 static int cpu_idle_spincnt; 697 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW, 698 &cpu_idle_hlt, 0, "Idle loop HLT enable"); 699 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hltcnt, CTLFLAG_RW, 700 &cpu_idle_hltcnt, 0, "Idle loop entry halts"); 701 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_spincnt, CTLFLAG_RW, 702 &cpu_idle_spincnt, 0, "Idle loop entry spins"); 703 704 void 705 cpu_idle(void) 706 { 707 struct thread *td = curthread; 708 struct mdglobaldata *gd = mdcpu; 709 710 crit_exit(); 711 KKASSERT(td->td_pri < TDPRI_CRIT); 712 cpu_enable_intr(); 713 for (;;) { 714 /* 715 * See if there are any LWKTs ready to go. 716 */ 717 lwkt_switch(); 718 719 /* 720 * The idle loop halts only if no threads are scheduleable 721 * and no signals have occured. 722 */ 723 if (cpu_idle_hlt && !lwkt_runnable() && 724 (td->td_flags & TDF_IDLE_NOHLT) == 0) { 725 splz(); 726 if (!lwkt_runnable()) { 727 #ifdef DEBUGIDLE 728 struct timeval tv1, tv2; 729 gettimeofday(&tv1, NULL); 730 #endif 731 umtx_sleep(&gd->mi.gd_runqmask, 0, 1000000); 732 #ifdef DEBUGIDLE 733 gettimeofday(&tv2, NULL); 734 if (tv2.tv_usec - tv1.tv_usec + 735 (tv2.tv_sec - tv1.tv_sec) * 1000000 736 > 500000) { 737 kprintf("cpu %d idlelock %08x %08x\n", 738 gd->mi.gd_cpuid, 739 gd->mi.gd_runqmask, 740 gd->gd_fpending); 741 } 742 #endif 743 } 744 #ifdef SMP 745 else { 746 __asm __volatile("pause"); 747 } 748 #endif 749 ++cpu_idle_hltcnt; 750 } else { 751 td->td_flags &= ~TDF_IDLE_NOHLT; 752 splz(); 753 #ifdef SMP 754 /*__asm __volatile("sti; pause");*/ 755 __asm __volatile("pause"); 756 #else 757 /*__asm __volatile("sti");*/ 758 #endif 759 ++cpu_idle_spincnt; 760 } 761 } 762 } 763 764 #ifdef SMP 765 766 /* 767 * Called by the LWKT switch core with a critical section held if the only 768 * schedulable thread needs the MP lock and we couldn't get it. On 769 * a real cpu we just spin in the scheduler. In the virtual kernel 770 * we sleep for a bit. 771 */ 772 void 773 cpu_mplock_contested(void) 774 { 775 usleep(1000); 776 } 777 778 /* 779 * Called by the spinlock code with or without a critical section held 780 * when a spinlock is found to be seriously constested. 781 * 782 * We need to enter a critical section to prevent signals from recursing 783 * into pthreads. 784 */ 785 void 786 cpu_spinlock_contested(void) 787 { 788 crit_enter(); 789 usleep(1000); 790 crit_exit(); 791 } 792 793 #endif 794 795 /* 796 * Clear registers on exec 797 */ 798 void 799 exec_setregs(u_long entry, u_long stack, u_long ps_strings) 800 { 801 struct thread *td = curthread; 802 struct lwp *lp = td->td_lwp; 803 struct pcb *pcb = td->td_pcb; 804 struct trapframe *regs = lp->lwp_md.md_regs; 805 806 /* was i386_user_cleanup() in NetBSD */ 807 user_ldt_free(pcb); 808 809 bzero((char *)regs, sizeof(struct trapframe)); 810 regs->tf_rip = entry; 811 regs->tf_rsp = ((stack - 8) & ~0xFul) + 8; /* align the stack */ 812 regs->tf_rdi = stack; /* argv */ 813 regs->tf_rflags = PSL_USER | (regs->tf_rflags & PSL_T); 814 regs->tf_ss = _udatasel; 815 regs->tf_cs = _ucodesel; 816 regs->tf_rbx = ps_strings; 817 818 /* 819 * Reset the hardware debug registers if they were in use. 820 * They won't have any meaning for the newly exec'd process. 821 */ 822 if (pcb->pcb_flags & PCB_DBREGS) { 823 pcb->pcb_dr0 = 0; 824 pcb->pcb_dr1 = 0; 825 pcb->pcb_dr2 = 0; 826 pcb->pcb_dr3 = 0; 827 pcb->pcb_dr6 = 0; 828 pcb->pcb_dr7 = 0; /* JG set bit 10? */ 829 if (pcb == td->td_pcb) { 830 /* 831 * Clear the debug registers on the running 832 * CPU, otherwise they will end up affecting 833 * the next process we switch to. 834 */ 835 reset_dbregs(); 836 } 837 pcb->pcb_flags &= ~PCB_DBREGS; 838 } 839 840 /* 841 * Initialize the math emulator (if any) for the current process. 842 * Actually, just clear the bit that says that the emulator has 843 * been initialized. Initialization is delayed until the process 844 * traps to the emulator (if it is done at all) mainly because 845 * emulators don't provide an entry point for initialization. 846 */ 847 pcb->pcb_flags &= ~FP_SOFTFP; 848 849 /* 850 * NOTE: do not set CR0_TS here. npxinit() must do it after clearing 851 * gd_npxthread. Otherwise a preemptive interrupt thread 852 * may panic in npxdna(). 853 */ 854 crit_enter(); 855 #if 0 856 load_cr0(rcr0() | CR0_MP); 857 #endif 858 859 /* 860 * NOTE: The MSR values must be correct so we can return to 861 * userland. gd_user_fs/gs must be correct so the switch 862 * code knows what the current MSR values are. 863 */ 864 pcb->pcb_fsbase = 0; /* Values loaded from PCB on switch */ 865 pcb->pcb_gsbase = 0; 866 /* Initialize the npx (if any) for the current process. */ 867 npxinit(__INITIAL_NPXCW__); 868 crit_exit(); 869 870 /* 871 * note: linux emulator needs edx to be 0x0 on entry, which is 872 * handled in execve simply by setting the 64 bit syscall 873 * return value to 0. 874 */ 875 } 876 877 void 878 cpu_setregs(void) 879 { 880 #if 0 881 unsigned int cr0; 882 883 cr0 = rcr0(); 884 cr0 |= CR0_NE; /* Done by npxinit() */ 885 cr0 |= CR0_MP | CR0_TS; /* Done at every execve() too. */ 886 cr0 |= CR0_WP | CR0_AM; 887 load_cr0(cr0); 888 load_gs(_udatasel); 889 #endif 890 } 891 892 static int 893 sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS) 894 { 895 int error; 896 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 897 req); 898 if (!error && req->newptr) 899 resettodr(); 900 return (error); 901 } 902 903 SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW, 904 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", ""); 905 906 extern u_long bootdev; /* not a cdev_t - encoding is different */ 907 SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev, 908 CTLFLAG_RD, &bootdev, 0, "Boot device (not in cdev_t format)"); 909 910 /* 911 * Initialize 386 and configure to run kernel 912 */ 913 914 /* 915 * Initialize segments & interrupt table 916 */ 917 918 extern struct user *proc0paddr; 919 920 #if 0 921 922 extern inthand_t 923 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 924 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 925 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 926 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 927 IDTVEC(xmm), IDTVEC(dblfault), 928 IDTVEC(fast_syscall), IDTVEC(fast_syscall32); 929 #endif 930 931 #ifdef DEBUG_INTERRUPTS 932 extern inthand_t *Xrsvdary[256]; 933 #endif 934 935 int 936 ptrace_set_pc(struct lwp *lp, unsigned long addr) 937 { 938 lp->lwp_md.md_regs->tf_rip = addr; 939 return (0); 940 } 941 942 int 943 ptrace_single_step(struct lwp *lp) 944 { 945 lp->lwp_md.md_regs->tf_rflags |= PSL_T; 946 return (0); 947 } 948 949 int 950 fill_regs(struct lwp *lp, struct reg *regs) 951 { 952 struct trapframe *tp; 953 954 tp = lp->lwp_md.md_regs; 955 bcopy(&tp->tf_rdi, ®s->r_rdi, sizeof(*regs)); 956 return (0); 957 } 958 959 int 960 set_regs(struct lwp *lp, struct reg *regs) 961 { 962 struct trapframe *tp; 963 964 tp = lp->lwp_md.md_regs; 965 if (!EFL_SECURE(regs->r_rflags, tp->tf_rflags) || 966 !CS_SECURE(regs->r_cs)) 967 return (EINVAL); 968 bcopy(®s->r_rdi, &tp->tf_rdi, sizeof(*regs)); 969 return (0); 970 } 971 972 #ifndef CPU_DISABLE_SSE 973 static void 974 fill_fpregs_xmm(struct savexmm *sv_xmm, struct save87 *sv_87) 975 { 976 struct env87 *penv_87 = &sv_87->sv_env; 977 struct envxmm *penv_xmm = &sv_xmm->sv_env; 978 int i; 979 980 /* FPU control/status */ 981 penv_87->en_cw = penv_xmm->en_cw; 982 penv_87->en_sw = penv_xmm->en_sw; 983 penv_87->en_tw = penv_xmm->en_tw; 984 penv_87->en_fip = penv_xmm->en_fip; 985 penv_87->en_fcs = penv_xmm->en_fcs; 986 penv_87->en_opcode = penv_xmm->en_opcode; 987 penv_87->en_foo = penv_xmm->en_foo; 988 penv_87->en_fos = penv_xmm->en_fos; 989 990 /* FPU registers */ 991 for (i = 0; i < 8; ++i) 992 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc; 993 994 sv_87->sv_ex_sw = sv_xmm->sv_ex_sw; 995 } 996 997 static void 998 set_fpregs_xmm(struct save87 *sv_87, struct savexmm *sv_xmm) 999 { 1000 struct env87 *penv_87 = &sv_87->sv_env; 1001 struct envxmm *penv_xmm = &sv_xmm->sv_env; 1002 int i; 1003 1004 /* FPU control/status */ 1005 penv_xmm->en_cw = penv_87->en_cw; 1006 penv_xmm->en_sw = penv_87->en_sw; 1007 penv_xmm->en_tw = penv_87->en_tw; 1008 penv_xmm->en_fip = penv_87->en_fip; 1009 penv_xmm->en_fcs = penv_87->en_fcs; 1010 penv_xmm->en_opcode = penv_87->en_opcode; 1011 penv_xmm->en_foo = penv_87->en_foo; 1012 penv_xmm->en_fos = penv_87->en_fos; 1013 1014 /* FPU registers */ 1015 for (i = 0; i < 8; ++i) 1016 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i]; 1017 1018 sv_xmm->sv_ex_sw = sv_87->sv_ex_sw; 1019 } 1020 #endif /* CPU_DISABLE_SSE */ 1021 1022 int 1023 fill_fpregs(struct lwp *lp, struct fpreg *fpregs) 1024 { 1025 #ifndef CPU_DISABLE_SSE 1026 if (cpu_fxsr) { 1027 fill_fpregs_xmm(&lp->lwp_thread->td_pcb->pcb_save.sv_xmm, 1028 (struct save87 *)fpregs); 1029 return (0); 1030 } 1031 #endif /* CPU_DISABLE_SSE */ 1032 bcopy(&lp->lwp_thread->td_pcb->pcb_save.sv_87, fpregs, sizeof *fpregs); 1033 return (0); 1034 } 1035 1036 int 1037 set_fpregs(struct lwp *lp, struct fpreg *fpregs) 1038 { 1039 #ifndef CPU_DISABLE_SSE 1040 if (cpu_fxsr) { 1041 set_fpregs_xmm((struct save87 *)fpregs, 1042 &lp->lwp_thread->td_pcb->pcb_save.sv_xmm); 1043 return (0); 1044 } 1045 #endif /* CPU_DISABLE_SSE */ 1046 bcopy(fpregs, &lp->lwp_thread->td_pcb->pcb_save.sv_87, sizeof *fpregs); 1047 return (0); 1048 } 1049 1050 int 1051 fill_dbregs(struct lwp *lp, struct dbreg *dbregs) 1052 { 1053 return (ENOSYS); 1054 } 1055 1056 int 1057 set_dbregs(struct lwp *lp, struct dbreg *dbregs) 1058 { 1059 return (ENOSYS); 1060 } 1061 1062 #if 0 1063 /* 1064 * Return > 0 if a hardware breakpoint has been hit, and the 1065 * breakpoint was in user space. Return 0, otherwise. 1066 */ 1067 int 1068 user_dbreg_trap(void) 1069 { 1070 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */ 1071 u_int32_t bp; /* breakpoint bits extracted from dr6 */ 1072 int nbp; /* number of breakpoints that triggered */ 1073 caddr_t addr[4]; /* breakpoint addresses */ 1074 int i; 1075 1076 dr7 = rdr7(); 1077 if ((dr7 & 0x000000ff) == 0) { 1078 /* 1079 * all GE and LE bits in the dr7 register are zero, 1080 * thus the trap couldn't have been caused by the 1081 * hardware debug registers 1082 */ 1083 return 0; 1084 } 1085 1086 nbp = 0; 1087 dr6 = rdr6(); 1088 bp = dr6 & 0x0000000f; 1089 1090 if (!bp) { 1091 /* 1092 * None of the breakpoint bits are set meaning this 1093 * trap was not caused by any of the debug registers 1094 */ 1095 return 0; 1096 } 1097 1098 /* 1099 * at least one of the breakpoints were hit, check to see 1100 * which ones and if any of them are user space addresses 1101 */ 1102 1103 if (bp & 0x01) { 1104 addr[nbp++] = (caddr_t)rdr0(); 1105 } 1106 if (bp & 0x02) { 1107 addr[nbp++] = (caddr_t)rdr1(); 1108 } 1109 if (bp & 0x04) { 1110 addr[nbp++] = (caddr_t)rdr2(); 1111 } 1112 if (bp & 0x08) { 1113 addr[nbp++] = (caddr_t)rdr3(); 1114 } 1115 1116 for (i=0; i<nbp; i++) { 1117 if (addr[i] < 1118 (caddr_t)VM_MAX_USER_ADDRESS) { 1119 /* 1120 * addr[i] is in user space 1121 */ 1122 return nbp; 1123 } 1124 } 1125 1126 /* 1127 * None of the breakpoints are in user space. 1128 */ 1129 return 0; 1130 } 1131 1132 #endif 1133 1134 void 1135 identcpu(void) 1136 { 1137 int regs[4]; 1138 1139 do_cpuid(1, regs); 1140 cpu_feature = regs[3]; 1141 } 1142 1143 1144 #ifndef DDB 1145 void 1146 Debugger(const char *msg) 1147 { 1148 kprintf("Debugger(\"%s\") called.\n", msg); 1149 } 1150 #endif /* no DDB */ 1151