1 /*- 2 * Copyright (c) 1992 Terrence R. Lambert. 3 * Copyright (C) 1994, David Greenman 4 * Copyright (c) 1982, 1987, 1990, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * William Jolitz. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 39 * $FreeBSD: src/sys/i386/i386/machdep.c,v 1.385.2.30 2003/05/31 08:48:05 alc Exp $ 40 */ 41 42 #include "opt_compat.h" 43 #include "opt_ddb.h" 44 #include "opt_directio.h" 45 #include "opt_inet.h" 46 #include "opt_ipx.h" 47 #include "opt_msgbuf.h" 48 #include "opt_swap.h" 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/sysproto.h> 53 #include <sys/signalvar.h> 54 #include <sys/kernel.h> 55 #include <sys/linker.h> 56 #include <sys/malloc.h> 57 #include <sys/proc.h> 58 #include <sys/buf.h> 59 #include <sys/reboot.h> 60 #include <sys/mbuf.h> 61 #include <sys/msgbuf.h> 62 #include <sys/sysent.h> 63 #include <sys/sysctl.h> 64 #include <sys/vmmeter.h> 65 #include <sys/bus.h> 66 #include <sys/upcall.h> 67 #include <sys/usched.h> 68 #include <sys/reg.h> 69 70 #include <vm/vm.h> 71 #include <vm/vm_param.h> 72 #include <sys/lock.h> 73 #include <vm/vm_kern.h> 74 #include <vm/vm_object.h> 75 #include <vm/vm_page.h> 76 #include <vm/vm_map.h> 77 #include <vm/vm_pager.h> 78 #include <vm/vm_extern.h> 79 80 #include <sys/thread2.h> 81 #include <sys/mplock2.h> 82 83 #include <sys/user.h> 84 #include <sys/exec.h> 85 #include <sys/cons.h> 86 87 #include <ddb/ddb.h> 88 89 #include <machine/cpu.h> 90 #include <machine/clock.h> 91 #include <machine/specialreg.h> 92 #include <machine/md_var.h> 93 #include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */ 94 #include <machine/globaldata.h> /* CPU_prvspace */ 95 #include <machine/smp.h> 96 #ifdef PERFMON 97 #include <machine/perfmon.h> 98 #endif 99 #include <machine/cputypes.h> 100 101 #include <bus/isa/rtc.h> 102 #include <sys/random.h> 103 #include <sys/ptrace.h> 104 #include <machine/sigframe.h> 105 #include <unistd.h> /* umtx_* functions */ 106 #include <pthread.h> /* pthread_yield() */ 107 108 extern void dblfault_handler (void); 109 110 #ifndef CPU_DISABLE_SSE 111 static void set_fpregs_xmm (struct save87 *, struct savexmm *); 112 static void fill_fpregs_xmm (struct savexmm *, struct save87 *); 113 #endif /* CPU_DISABLE_SSE */ 114 #ifdef DIRECTIO 115 extern void ffs_rawread_setup(void); 116 #endif /* DIRECTIO */ 117 118 #ifdef SMP 119 int64_t tsc_offsets[MAXCPU]; 120 #else 121 int64_t tsc_offsets[1]; 122 #endif 123 124 #if defined(SWTCH_OPTIM_STATS) 125 extern int swtch_optim_stats; 126 SYSCTL_INT(_debug, OID_AUTO, swtch_optim_stats, 127 CTLFLAG_RD, &swtch_optim_stats, 0, ""); 128 SYSCTL_INT(_debug, OID_AUTO, tlb_flush_count, 129 CTLFLAG_RD, &tlb_flush_count, 0, ""); 130 #endif 131 132 static int 133 sysctl_hw_physmem(SYSCTL_HANDLER_ARGS) 134 { 135 u_long pmem = ctob(physmem); 136 137 int error = sysctl_handle_long(oidp, &pmem, 0, req); 138 return (error); 139 } 140 141 SYSCTL_PROC(_hw, HW_PHYSMEM, physmem, CTLTYPE_ULONG|CTLFLAG_RD, 142 0, 0, sysctl_hw_physmem, "LU", "Total system memory in bytes (number of pages * page size)"); 143 144 static int 145 sysctl_hw_usermem(SYSCTL_HANDLER_ARGS) 146 { 147 /* JG */ 148 int error = sysctl_handle_int(oidp, 0, 149 ctob((int)Maxmem - vmstats.v_wire_count), req); 150 return (error); 151 } 152 153 SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT|CTLFLAG_RD, 154 0, 0, sysctl_hw_usermem, "IU", ""); 155 156 SYSCTL_ULONG(_hw, OID_AUTO, availpages, CTLFLAG_RD, &Maxmem, 0, ""); 157 158 #if 0 159 160 static int 161 sysctl_machdep_msgbuf(SYSCTL_HANDLER_ARGS) 162 { 163 int error; 164 165 /* Unwind the buffer, so that it's linear (possibly starting with 166 * some initial nulls). 167 */ 168 error=sysctl_handle_opaque(oidp,msgbufp->msg_ptr+msgbufp->msg_bufr, 169 msgbufp->msg_size-msgbufp->msg_bufr,req); 170 if(error) return(error); 171 if(msgbufp->msg_bufr>0) { 172 error=sysctl_handle_opaque(oidp,msgbufp->msg_ptr, 173 msgbufp->msg_bufr,req); 174 } 175 return(error); 176 } 177 178 SYSCTL_PROC(_machdep, OID_AUTO, msgbuf, CTLTYPE_STRING|CTLFLAG_RD, 179 0, 0, sysctl_machdep_msgbuf, "A","Contents of kernel message buffer"); 180 181 static int msgbuf_clear; 182 183 static int 184 sysctl_machdep_msgbuf_clear(SYSCTL_HANDLER_ARGS) 185 { 186 int error; 187 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 188 req); 189 if (!error && req->newptr) { 190 /* Clear the buffer and reset write pointer */ 191 bzero(msgbufp->msg_ptr,msgbufp->msg_size); 192 msgbufp->msg_bufr=msgbufp->msg_bufx=0; 193 msgbuf_clear=0; 194 } 195 return (error); 196 } 197 198 SYSCTL_PROC(_machdep, OID_AUTO, msgbuf_clear, CTLTYPE_INT|CTLFLAG_RW, 199 &msgbuf_clear, 0, sysctl_machdep_msgbuf_clear, "I", 200 "Clear kernel message buffer"); 201 202 #endif 203 204 /* 205 * Send an interrupt to process. 206 * 207 * Stack is set up to allow sigcode stored 208 * at top to call routine, followed by kcall 209 * to sigreturn routine below. After sigreturn 210 * resets the signal mask, the stack, and the 211 * frame pointer, it returns to the user 212 * specified pc, psl. 213 */ 214 void 215 sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code) 216 { 217 struct lwp *lp = curthread->td_lwp; 218 struct proc *p = lp->lwp_proc; 219 struct trapframe *regs; 220 struct sigacts *psp = p->p_sigacts; 221 struct sigframe sf, *sfp; 222 int oonstack; 223 char *sp; 224 225 regs = lp->lwp_md.md_regs; 226 oonstack = (lp->lwp_sigstk.ss_flags & SS_ONSTACK) ? 1 : 0; 227 228 /* Save user context */ 229 bzero(&sf, sizeof(struct sigframe)); 230 sf.sf_uc.uc_sigmask = *mask; 231 sf.sf_uc.uc_stack = lp->lwp_sigstk; 232 sf.sf_uc.uc_mcontext.mc_onstack = oonstack; 233 KKASSERT(__offsetof(struct trapframe, tf_rdi) == 0); 234 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_rdi, sizeof(struct trapframe)); 235 236 /* Make the size of the saved context visible to userland */ 237 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); 238 239 /* Allocate and validate space for the signal handler context. */ 240 if ((lp->lwp_flags & LWP_ALTSTACK) != 0 && !oonstack && 241 SIGISMEMBER(psp->ps_sigonstack, sig)) { 242 sp = (char *)(lp->lwp_sigstk.ss_sp + lp->lwp_sigstk.ss_size - 243 sizeof(struct sigframe)); 244 lp->lwp_sigstk.ss_flags |= SS_ONSTACK; 245 } else { 246 /* We take red zone into account */ 247 sp = (char *)regs->tf_rsp - sizeof(struct sigframe) - 128; 248 } 249 250 /* Align to 16 bytes */ 251 sfp = (struct sigframe *)((intptr_t)sp & ~0xFUL); 252 253 /* Translate the signal is appropriate */ 254 if (p->p_sysent->sv_sigtbl) { 255 if (sig <= p->p_sysent->sv_sigsize) 256 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 257 } 258 259 /* 260 * Build the argument list for the signal handler. 261 * 262 * Arguments are in registers (%rdi, %rsi, %rdx, %rcx) 263 */ 264 regs->tf_rdi = sig; /* argument 1 */ 265 regs->tf_rdx = (register_t)&sfp->sf_uc; /* argument 3 */ 266 267 if (SIGISMEMBER(psp->ps_siginfo, sig)) { 268 /* 269 * Signal handler installed with SA_SIGINFO. 270 * 271 * action(signo, siginfo, ucontext) 272 */ 273 regs->tf_rsi = (register_t)&sfp->sf_si; /* argument 2 */ 274 regs->tf_rcx = (register_t)regs->tf_err; /* argument 4 */ 275 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 276 277 /* fill siginfo structure */ 278 sf.sf_si.si_signo = sig; 279 sf.sf_si.si_code = code; 280 sf.sf_si.si_addr = (void *)regs->tf_err; 281 } else { 282 /* 283 * Old FreeBSD-style arguments. 284 * 285 * handler (signo, code, [uc], addr) 286 */ 287 regs->tf_rsi = (register_t)code; /* argument 2 */ 288 regs->tf_rcx = (register_t)regs->tf_err; /* argument 4 */ 289 sf.sf_ahu.sf_handler = catcher; 290 } 291 292 #if 0 293 /* 294 * If we're a vm86 process, we want to save the segment registers. 295 * We also change eflags to be our emulated eflags, not the actual 296 * eflags. 297 */ 298 if (regs->tf_eflags & PSL_VM) { 299 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 300 struct vm86_kernel *vm86 = &lp->lwp_thread->td_pcb->pcb_ext->ext_vm86; 301 302 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs; 303 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs; 304 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es; 305 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds; 306 307 if (vm86->vm86_has_vme == 0) 308 sf.sf_uc.uc_mcontext.mc_eflags = 309 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 310 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 311 312 /* 313 * Clear PSL_NT to inhibit T_TSSFLT faults on return from 314 * syscalls made by the signal handler. This just avoids 315 * wasting time for our lazy fixup of such faults. PSL_NT 316 * does nothing in vm86 mode, but vm86 programs can set it 317 * almost legitimately in probes for old cpu types. 318 */ 319 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP); 320 } 321 #endif 322 323 /* 324 * Save the FPU state and reinit the FP unit 325 */ 326 npxpush(&sf.sf_uc.uc_mcontext); 327 328 /* 329 * Copy the sigframe out to the user's stack. 330 */ 331 if (copyout(&sf, sfp, sizeof(struct sigframe)) != 0) { 332 /* 333 * Something is wrong with the stack pointer. 334 * ...Kill the process. 335 */ 336 sigexit(lp, SIGILL); 337 } 338 339 regs->tf_rsp = (register_t)sfp; 340 regs->tf_rip = PS_STRINGS - *(p->p_sysent->sv_szsigcode); 341 342 /* 343 * i386 abi specifies that the direction flag must be cleared 344 * on function entry 345 */ 346 regs->tf_rflags &= ~(PSL_T|PSL_D); 347 348 /* 349 * 64 bit mode has a code and stack selector but 350 * no data or extra selector. %fs and %gs are not 351 * stored in-context. 352 */ 353 regs->tf_cs = _ucodesel; 354 regs->tf_ss = _udatasel; 355 } 356 357 /* 358 * Sanitize the trapframe for a virtual kernel passing control to a custom 359 * VM context. Remove any items that would otherwise create a privilage 360 * issue. 361 * 362 * XXX at the moment we allow userland to set the resume flag. Is this a 363 * bad idea? 364 */ 365 int 366 cpu_sanitize_frame(struct trapframe *frame) 367 { 368 frame->tf_cs = _ucodesel; 369 frame->tf_ss = _udatasel; 370 /* XXX VM (8086) mode not supported? */ 371 frame->tf_rflags &= (PSL_RF | PSL_USERCHANGE | PSL_VM_UNSUPP); 372 frame->tf_rflags |= PSL_RESERVED_DEFAULT | PSL_I; 373 374 return(0); 375 } 376 377 /* 378 * Sanitize the tls so loading the descriptor does not blow up 379 * on us. For x86_64 we don't have to do anything. 380 */ 381 int 382 cpu_sanitize_tls(struct savetls *tls) 383 { 384 return(0); 385 } 386 387 /* 388 * sigreturn(ucontext_t *sigcntxp) 389 * 390 * System call to cleanup state after a signal 391 * has been taken. Reset signal mask and 392 * stack state from context left by sendsig (above). 393 * Return to previous pc and psl as specified by 394 * context left by sendsig. Check carefully to 395 * make sure that the user has not modified the 396 * state to gain improper privileges. 397 */ 398 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) 399 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) 400 401 int 402 sys_sigreturn(struct sigreturn_args *uap) 403 { 404 struct lwp *lp = curthread->td_lwp; 405 struct trapframe *regs; 406 ucontext_t uc; 407 ucontext_t *ucp; 408 register_t rflags; 409 int cs; 410 int error; 411 412 /* 413 * We have to copy the information into kernel space so userland 414 * can't modify it while we are sniffing it. 415 */ 416 regs = lp->lwp_md.md_regs; 417 error = copyin(uap->sigcntxp, &uc, sizeof(uc)); 418 if (error) 419 return (error); 420 ucp = &uc; 421 rflags = ucp->uc_mcontext.mc_rflags; 422 423 /* VM (8086) mode not supported */ 424 rflags &= ~PSL_VM_UNSUPP; 425 426 #if 0 427 if (eflags & PSL_VM) { 428 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 429 struct vm86_kernel *vm86; 430 431 /* 432 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 433 * set up the vm86 area, and we can't enter vm86 mode. 434 */ 435 if (lp->lwp_thread->td_pcb->pcb_ext == 0) 436 return (EINVAL); 437 vm86 = &lp->lwp_thread->td_pcb->pcb_ext->ext_vm86; 438 if (vm86->vm86_inited == 0) 439 return (EINVAL); 440 441 /* go back to user mode if both flags are set */ 442 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 443 trapsignal(lp->lwp_proc, SIGBUS, 0); 444 445 if (vm86->vm86_has_vme) { 446 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 447 (eflags & VME_USERCHANGE) | PSL_VM; 448 } else { 449 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 450 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM; 451 } 452 bcopy(&ucp.uc_mcontext.mc_gs, tf, sizeof(struct trapframe)); 453 tf->tf_eflags = eflags; 454 tf->tf_vm86_ds = tf->tf_ds; 455 tf->tf_vm86_es = tf->tf_es; 456 tf->tf_vm86_fs = tf->tf_fs; 457 tf->tf_vm86_gs = tf->tf_gs; 458 tf->tf_ds = _udatasel; 459 tf->tf_es = _udatasel; 460 #if 0 461 tf->tf_fs = _udatasel; 462 tf->tf_gs = _udatasel; 463 #endif 464 } else 465 #endif 466 { 467 /* 468 * Don't allow users to change privileged or reserved flags. 469 */ 470 /* 471 * XXX do allow users to change the privileged flag PSL_RF. 472 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 473 * should sometimes set it there too. tf_eflags is kept in 474 * the signal context during signal handling and there is no 475 * other place to remember it, so the PSL_RF bit may be 476 * corrupted by the signal handler without us knowing. 477 * Corruption of the PSL_RF bit at worst causes one more or 478 * one less debugger trap, so allowing it is fairly harmless. 479 */ 480 if (!EFL_SECURE(rflags & ~PSL_RF, regs->tf_rflags & ~PSL_RF)) { 481 kprintf("sigreturn: rflags = 0x%lx\n", (long)rflags); 482 return(EINVAL); 483 } 484 485 /* 486 * Don't allow users to load a valid privileged %cs. Let the 487 * hardware check for invalid selectors, excess privilege in 488 * other selectors, invalid %eip's and invalid %esp's. 489 */ 490 cs = ucp->uc_mcontext.mc_cs; 491 if (!CS_SECURE(cs)) { 492 kprintf("sigreturn: cs = 0x%x\n", cs); 493 trapsignal(lp, SIGBUS, T_PROTFLT); 494 return(EINVAL); 495 } 496 bcopy(&ucp->uc_mcontext.mc_rdi, regs, sizeof(struct trapframe)); 497 } 498 499 /* 500 * Restore the FPU state from the frame 501 */ 502 npxpop(&ucp->uc_mcontext); 503 504 if (ucp->uc_mcontext.mc_onstack & 1) 505 lp->lwp_sigstk.ss_flags |= SS_ONSTACK; 506 else 507 lp->lwp_sigstk.ss_flags &= ~SS_ONSTACK; 508 509 lp->lwp_sigmask = ucp->uc_sigmask; 510 SIG_CANTMASK(lp->lwp_sigmask); 511 return(EJUSTRETURN); 512 } 513 514 /* 515 * Stack frame on entry to function. %rax will contain the function vector, 516 * %rcx will contain the function data. flags, rcx, and rax will have 517 * already been pushed on the stack. 518 */ 519 struct upc_frame { 520 register_t rax; 521 register_t rcx; 522 register_t rdx; 523 register_t flags; 524 register_t oldip; 525 }; 526 527 void 528 sendupcall(struct vmupcall *vu, int morepending) 529 { 530 struct lwp *lp = curthread->td_lwp; 531 struct trapframe *regs; 532 struct upcall upcall; 533 struct upc_frame upc_frame; 534 int crit_count = 0; 535 536 /* 537 * If we are a virtual kernel running an emulated user process 538 * context, switch back to the virtual kernel context before 539 * trying to post the signal. 540 */ 541 if (lp->lwp_vkernel && lp->lwp_vkernel->ve) { 542 lp->lwp_md.md_regs->tf_trapno = 0; 543 vkernel_trap(lp, lp->lwp_md.md_regs); 544 } 545 546 /* 547 * Get the upcall data structure 548 */ 549 if (copyin(lp->lwp_upcall, &upcall, sizeof(upcall)) || 550 copyin((char *)upcall.upc_uthread + upcall.upc_critoff, &crit_count, sizeof(int)) 551 ) { 552 vu->vu_pending = 0; 553 kprintf("bad upcall address\n"); 554 return; 555 } 556 557 /* 558 * If the data structure is already marked pending or has a critical 559 * section count, mark the data structure as pending and return 560 * without doing an upcall. vu_pending is left set. 561 */ 562 if (upcall.upc_pending || crit_count >= vu->vu_pending) { 563 if (upcall.upc_pending < vu->vu_pending) { 564 upcall.upc_pending = vu->vu_pending; 565 copyout(&upcall.upc_pending, &lp->lwp_upcall->upc_pending, 566 sizeof(upcall.upc_pending)); 567 } 568 return; 569 } 570 571 /* 572 * We can run this upcall now, clear vu_pending. 573 * 574 * Bump our critical section count and set or clear the 575 * user pending flag depending on whether more upcalls are 576 * pending. The user will be responsible for calling 577 * upc_dispatch(-1) to process remaining upcalls. 578 */ 579 vu->vu_pending = 0; 580 upcall.upc_pending = morepending; 581 ++crit_count; 582 copyout(&upcall.upc_pending, &lp->lwp_upcall->upc_pending, 583 sizeof(upcall.upc_pending)); 584 copyout(&crit_count, (char *)upcall.upc_uthread + upcall.upc_critoff, 585 sizeof(int)); 586 587 /* 588 * Construct a stack frame and issue the upcall 589 */ 590 regs = lp->lwp_md.md_regs; 591 upc_frame.rax = regs->tf_rax; 592 upc_frame.rcx = regs->tf_rcx; 593 upc_frame.rdx = regs->tf_rdx; 594 upc_frame.flags = regs->tf_rflags; 595 upc_frame.oldip = regs->tf_rip; 596 if (copyout(&upc_frame, (void *)(regs->tf_rsp - sizeof(upc_frame)), 597 sizeof(upc_frame)) != 0) { 598 kprintf("bad stack on upcall\n"); 599 } else { 600 regs->tf_rax = (register_t)vu->vu_func; 601 regs->tf_rcx = (register_t)vu->vu_data; 602 regs->tf_rdx = (register_t)lp->lwp_upcall; 603 regs->tf_rip = (register_t)vu->vu_ctx; 604 regs->tf_rsp -= sizeof(upc_frame); 605 } 606 } 607 608 /* 609 * fetchupcall occurs in the context of a system call, which means that 610 * we have to return EJUSTRETURN in order to prevent eax and edx from 611 * being overwritten by the syscall return value. 612 * 613 * if vu is not NULL we return the new context in %edx, the new data in %ecx, 614 * and the function pointer in %eax. 615 */ 616 int 617 fetchupcall(struct vmupcall *vu, int morepending, void *rsp) 618 { 619 struct upc_frame upc_frame; 620 struct lwp *lp = curthread->td_lwp; 621 struct trapframe *regs; 622 int error; 623 struct upcall upcall; 624 int crit_count; 625 626 regs = lp->lwp_md.md_regs; 627 628 error = copyout(&morepending, &lp->lwp_upcall->upc_pending, sizeof(int)); 629 if (error == 0) { 630 if (vu) { 631 /* 632 * This jumps us to the next ready context. 633 */ 634 vu->vu_pending = 0; 635 error = copyin(lp->lwp_upcall, &upcall, sizeof(upcall)); 636 crit_count = 0; 637 if (error == 0) 638 error = copyin((char *)upcall.upc_uthread + upcall.upc_critoff, &crit_count, sizeof(int)); 639 ++crit_count; 640 if (error == 0) 641 error = copyout(&crit_count, (char *)upcall.upc_uthread + upcall.upc_critoff, sizeof(int)); 642 regs->tf_rax = (register_t)vu->vu_func; 643 regs->tf_rcx = (register_t)vu->vu_data; 644 regs->tf_rdx = (register_t)lp->lwp_upcall; 645 regs->tf_rip = (register_t)vu->vu_ctx; 646 regs->tf_rsp = (register_t)rsp; 647 } else { 648 /* 649 * This returns us to the originally interrupted code. 650 */ 651 error = copyin(rsp, &upc_frame, sizeof(upc_frame)); 652 regs->tf_rax = upc_frame.rax; 653 regs->tf_rcx = upc_frame.rcx; 654 regs->tf_rdx = upc_frame.rdx; 655 regs->tf_rflags = (regs->tf_rflags & ~PSL_USERCHANGE) | 656 (upc_frame.flags & PSL_USERCHANGE); 657 regs->tf_rip = upc_frame.oldip; 658 regs->tf_rsp = (register_t)((char *)rsp + sizeof(upc_frame)); 659 } 660 } 661 if (error == 0) 662 error = EJUSTRETURN; 663 return(error); 664 } 665 666 /* 667 * cpu_idle() represents the idle LWKT. You cannot return from this function 668 * (unless you want to blow things up!). Instead we look for runnable threads 669 * and loop or halt as appropriate. Giant is not held on entry to the thread. 670 * 671 * The main loop is entered with a critical section held, we must release 672 * the critical section before doing anything else. lwkt_switch() will 673 * check for pending interrupts due to entering and exiting its own 674 * critical section. 675 * 676 * Note on cpu_idle_hlt: On an SMP system we rely on a scheduler IPI 677 * to wake a HLTed cpu up. 678 */ 679 static int cpu_idle_hlt = 1; 680 static int cpu_idle_hltcnt; 681 static int cpu_idle_spincnt; 682 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW, 683 &cpu_idle_hlt, 0, "Idle loop HLT enable"); 684 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hltcnt, CTLFLAG_RW, 685 &cpu_idle_hltcnt, 0, "Idle loop entry halts"); 686 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_spincnt, CTLFLAG_RW, 687 &cpu_idle_spincnt, 0, "Idle loop entry spins"); 688 689 void 690 cpu_idle(void) 691 { 692 struct thread *td = curthread; 693 struct mdglobaldata *gd = mdcpu; 694 int reqflags; 695 696 crit_exit(); 697 KKASSERT(td->td_critcount == 0); 698 cpu_enable_intr(); 699 700 for (;;) { 701 /* 702 * See if there are any LWKTs ready to go. 703 */ 704 lwkt_switch(); 705 706 /* 707 * The idle loop halts only if no threads are scheduleable 708 * and no signals have occured. 709 */ 710 if (cpu_idle_hlt && 711 (td->td_gd->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) { 712 splz(); 713 if ((td->td_gd->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) { 714 #ifdef DEBUGIDLE 715 struct timeval tv1, tv2; 716 gettimeofday(&tv1, NULL); 717 #endif 718 reqflags = gd->mi.gd_reqflags & 719 ~RQF_IDLECHECK_WK_MASK; 720 KKASSERT(gd->mi.gd_processing_ipiq == 0); 721 umtx_sleep(&gd->mi.gd_reqflags, reqflags, 722 1000000); 723 #ifdef DEBUGIDLE 724 gettimeofday(&tv2, NULL); 725 if (tv2.tv_usec - tv1.tv_usec + 726 (tv2.tv_sec - tv1.tv_sec) * 1000000 727 > 500000) { 728 kprintf("cpu %d idlelock %08x %08x\n", 729 gd->mi.gd_cpuid, 730 gd->mi.gd_reqflags, 731 gd->gd_fpending); 732 } 733 #endif 734 } 735 ++cpu_idle_hltcnt; 736 } else { 737 splz(); 738 #ifdef SMP 739 __asm __volatile("pause"); 740 #endif 741 ++cpu_idle_spincnt; 742 } 743 } 744 } 745 746 #ifdef SMP 747 748 /* 749 * Called by the spinlock code with or without a critical section held 750 * when a spinlock is found to be seriously constested. 751 * 752 * We need to enter a critical section to prevent signals from recursing 753 * into pthreads. 754 */ 755 void 756 cpu_spinlock_contested(void) 757 { 758 cpu_pause(); 759 } 760 761 #endif 762 763 /* 764 * Clear registers on exec 765 */ 766 void 767 exec_setregs(u_long entry, u_long stack, u_long ps_strings) 768 { 769 struct thread *td = curthread; 770 struct lwp *lp = td->td_lwp; 771 struct pcb *pcb = td->td_pcb; 772 struct trapframe *regs = lp->lwp_md.md_regs; 773 774 /* was i386_user_cleanup() in NetBSD */ 775 user_ldt_free(pcb); 776 777 bzero((char *)regs, sizeof(struct trapframe)); 778 regs->tf_rip = entry; 779 regs->tf_rsp = ((stack - 8) & ~0xFul) + 8; /* align the stack */ 780 regs->tf_rdi = stack; /* argv */ 781 regs->tf_rflags = PSL_USER | (regs->tf_rflags & PSL_T); 782 regs->tf_ss = _udatasel; 783 regs->tf_cs = _ucodesel; 784 regs->tf_rbx = ps_strings; 785 786 /* 787 * Reset the hardware debug registers if they were in use. 788 * They won't have any meaning for the newly exec'd process. 789 */ 790 if (pcb->pcb_flags & PCB_DBREGS) { 791 pcb->pcb_dr0 = 0; 792 pcb->pcb_dr1 = 0; 793 pcb->pcb_dr2 = 0; 794 pcb->pcb_dr3 = 0; 795 pcb->pcb_dr6 = 0; 796 pcb->pcb_dr7 = 0; /* JG set bit 10? */ 797 if (pcb == td->td_pcb) { 798 /* 799 * Clear the debug registers on the running 800 * CPU, otherwise they will end up affecting 801 * the next process we switch to. 802 */ 803 reset_dbregs(); 804 } 805 pcb->pcb_flags &= ~PCB_DBREGS; 806 } 807 808 /* 809 * Initialize the math emulator (if any) for the current process. 810 * Actually, just clear the bit that says that the emulator has 811 * been initialized. Initialization is delayed until the process 812 * traps to the emulator (if it is done at all) mainly because 813 * emulators don't provide an entry point for initialization. 814 */ 815 pcb->pcb_flags &= ~FP_SOFTFP; 816 817 /* 818 * NOTE: do not set CR0_TS here. npxinit() must do it after clearing 819 * gd_npxthread. Otherwise a preemptive interrupt thread 820 * may panic in npxdna(). 821 */ 822 crit_enter(); 823 #if 0 824 load_cr0(rcr0() | CR0_MP); 825 #endif 826 827 /* 828 * NOTE: The MSR values must be correct so we can return to 829 * userland. gd_user_fs/gs must be correct so the switch 830 * code knows what the current MSR values are. 831 */ 832 pcb->pcb_fsbase = 0; /* Values loaded from PCB on switch */ 833 pcb->pcb_gsbase = 0; 834 /* Initialize the npx (if any) for the current process. */ 835 npxinit(__INITIAL_NPXCW__); 836 crit_exit(); 837 838 /* 839 * note: linux emulator needs edx to be 0x0 on entry, which is 840 * handled in execve simply by setting the 64 bit syscall 841 * return value to 0. 842 */ 843 } 844 845 void 846 cpu_setregs(void) 847 { 848 #if 0 849 unsigned int cr0; 850 851 cr0 = rcr0(); 852 cr0 |= CR0_NE; /* Done by npxinit() */ 853 cr0 |= CR0_MP | CR0_TS; /* Done at every execve() too. */ 854 cr0 |= CR0_WP | CR0_AM; 855 load_cr0(cr0); 856 load_gs(_udatasel); 857 #endif 858 } 859 860 static int 861 sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS) 862 { 863 int error; 864 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 865 req); 866 if (!error && req->newptr) 867 resettodr(); 868 return (error); 869 } 870 871 SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW, 872 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", ""); 873 874 extern u_long bootdev; /* not a cdev_t - encoding is different */ 875 SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev, 876 CTLFLAG_RD, &bootdev, 0, "Boot device (not in cdev_t format)"); 877 878 /* 879 * Initialize 386 and configure to run kernel 880 */ 881 882 /* 883 * Initialize segments & interrupt table 884 */ 885 886 extern struct user *proc0paddr; 887 888 #if 0 889 890 extern inthand_t 891 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 892 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 893 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 894 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 895 IDTVEC(xmm), IDTVEC(dblfault), 896 IDTVEC(fast_syscall), IDTVEC(fast_syscall32); 897 #endif 898 899 #ifdef DEBUG_INTERRUPTS 900 extern inthand_t *Xrsvdary[256]; 901 #endif 902 903 int 904 ptrace_set_pc(struct lwp *lp, unsigned long addr) 905 { 906 lp->lwp_md.md_regs->tf_rip = addr; 907 return (0); 908 } 909 910 int 911 ptrace_single_step(struct lwp *lp) 912 { 913 lp->lwp_md.md_regs->tf_rflags |= PSL_T; 914 return (0); 915 } 916 917 int 918 fill_regs(struct lwp *lp, struct reg *regs) 919 { 920 struct trapframe *tp; 921 922 if ((tp = lp->lwp_md.md_regs) == NULL) 923 return EINVAL; 924 bcopy(&tp->tf_rdi, ®s->r_rdi, sizeof(*regs)); 925 return (0); 926 } 927 928 int 929 set_regs(struct lwp *lp, struct reg *regs) 930 { 931 struct trapframe *tp; 932 933 tp = lp->lwp_md.md_regs; 934 if (!EFL_SECURE(regs->r_rflags, tp->tf_rflags) || 935 !CS_SECURE(regs->r_cs)) 936 return (EINVAL); 937 bcopy(®s->r_rdi, &tp->tf_rdi, sizeof(*regs)); 938 return (0); 939 } 940 941 #ifndef CPU_DISABLE_SSE 942 static void 943 fill_fpregs_xmm(struct savexmm *sv_xmm, struct save87 *sv_87) 944 { 945 struct env87 *penv_87 = &sv_87->sv_env; 946 struct envxmm *penv_xmm = &sv_xmm->sv_env; 947 int i; 948 949 /* FPU control/status */ 950 penv_87->en_cw = penv_xmm->en_cw; 951 penv_87->en_sw = penv_xmm->en_sw; 952 penv_87->en_tw = penv_xmm->en_tw; 953 penv_87->en_fip = penv_xmm->en_fip; 954 penv_87->en_fcs = penv_xmm->en_fcs; 955 penv_87->en_opcode = penv_xmm->en_opcode; 956 penv_87->en_foo = penv_xmm->en_foo; 957 penv_87->en_fos = penv_xmm->en_fos; 958 959 /* FPU registers */ 960 for (i = 0; i < 8; ++i) 961 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc; 962 } 963 964 static void 965 set_fpregs_xmm(struct save87 *sv_87, struct savexmm *sv_xmm) 966 { 967 struct env87 *penv_87 = &sv_87->sv_env; 968 struct envxmm *penv_xmm = &sv_xmm->sv_env; 969 int i; 970 971 /* FPU control/status */ 972 penv_xmm->en_cw = penv_87->en_cw; 973 penv_xmm->en_sw = penv_87->en_sw; 974 penv_xmm->en_tw = penv_87->en_tw; 975 penv_xmm->en_fip = penv_87->en_fip; 976 penv_xmm->en_fcs = penv_87->en_fcs; 977 penv_xmm->en_opcode = penv_87->en_opcode; 978 penv_xmm->en_foo = penv_87->en_foo; 979 penv_xmm->en_fos = penv_87->en_fos; 980 981 /* FPU registers */ 982 for (i = 0; i < 8; ++i) 983 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i]; 984 } 985 #endif /* CPU_DISABLE_SSE */ 986 987 int 988 fill_fpregs(struct lwp *lp, struct fpreg *fpregs) 989 { 990 if (lp->lwp_thread == NULL || lp->lwp_thread->td_pcb == NULL) 991 return EINVAL; 992 #ifndef CPU_DISABLE_SSE 993 if (cpu_fxsr) { 994 fill_fpregs_xmm(&lp->lwp_thread->td_pcb->pcb_save.sv_xmm, 995 (struct save87 *)fpregs); 996 return (0); 997 } 998 #endif /* CPU_DISABLE_SSE */ 999 bcopy(&lp->lwp_thread->td_pcb->pcb_save.sv_87, fpregs, sizeof *fpregs); 1000 return (0); 1001 } 1002 1003 int 1004 set_fpregs(struct lwp *lp, struct fpreg *fpregs) 1005 { 1006 #ifndef CPU_DISABLE_SSE 1007 if (cpu_fxsr) { 1008 set_fpregs_xmm((struct save87 *)fpregs, 1009 &lp->lwp_thread->td_pcb->pcb_save.sv_xmm); 1010 return (0); 1011 } 1012 #endif /* CPU_DISABLE_SSE */ 1013 bcopy(fpregs, &lp->lwp_thread->td_pcb->pcb_save.sv_87, sizeof *fpregs); 1014 return (0); 1015 } 1016 1017 int 1018 fill_dbregs(struct lwp *lp, struct dbreg *dbregs) 1019 { 1020 return (ENOSYS); 1021 } 1022 1023 int 1024 set_dbregs(struct lwp *lp, struct dbreg *dbregs) 1025 { 1026 return (ENOSYS); 1027 } 1028 1029 #if 0 1030 /* 1031 * Return > 0 if a hardware breakpoint has been hit, and the 1032 * breakpoint was in user space. Return 0, otherwise. 1033 */ 1034 int 1035 user_dbreg_trap(void) 1036 { 1037 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */ 1038 u_int32_t bp; /* breakpoint bits extracted from dr6 */ 1039 int nbp; /* number of breakpoints that triggered */ 1040 caddr_t addr[4]; /* breakpoint addresses */ 1041 int i; 1042 1043 dr7 = rdr7(); 1044 if ((dr7 & 0x000000ff) == 0) { 1045 /* 1046 * all GE and LE bits in the dr7 register are zero, 1047 * thus the trap couldn't have been caused by the 1048 * hardware debug registers 1049 */ 1050 return 0; 1051 } 1052 1053 nbp = 0; 1054 dr6 = rdr6(); 1055 bp = dr6 & 0x0000000f; 1056 1057 if (!bp) { 1058 /* 1059 * None of the breakpoint bits are set meaning this 1060 * trap was not caused by any of the debug registers 1061 */ 1062 return 0; 1063 } 1064 1065 /* 1066 * at least one of the breakpoints were hit, check to see 1067 * which ones and if any of them are user space addresses 1068 */ 1069 1070 if (bp & 0x01) { 1071 addr[nbp++] = (caddr_t)rdr0(); 1072 } 1073 if (bp & 0x02) { 1074 addr[nbp++] = (caddr_t)rdr1(); 1075 } 1076 if (bp & 0x04) { 1077 addr[nbp++] = (caddr_t)rdr2(); 1078 } 1079 if (bp & 0x08) { 1080 addr[nbp++] = (caddr_t)rdr3(); 1081 } 1082 1083 for (i=0; i<nbp; i++) { 1084 if (addr[i] < 1085 (caddr_t)VM_MAX_USER_ADDRESS) { 1086 /* 1087 * addr[i] is in user space 1088 */ 1089 return nbp; 1090 } 1091 } 1092 1093 /* 1094 * None of the breakpoints are in user space. 1095 */ 1096 return 0; 1097 } 1098 1099 #endif 1100 1101 void 1102 identcpu(void) 1103 { 1104 int regs[4]; 1105 1106 do_cpuid(1, regs); 1107 cpu_feature = regs[3]; 1108 } 1109 1110 1111 #ifndef DDB 1112 void 1113 Debugger(const char *msg) 1114 { 1115 kprintf("Debugger(\"%s\") called.\n", msg); 1116 } 1117 #endif /* no DDB */ 1118