1 /*- 2 * Copyright (c) 1992 Terrence R. Lambert. 3 * Copyright (C) 1994, David Greenman 4 * Copyright (c) 1982, 1987, 1990, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * William Jolitz. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 39 * $FreeBSD: src/sys/i386/i386/machdep.c,v 1.385.2.30 2003/05/31 08:48:05 alc Exp $ 40 */ 41 42 #include "opt_compat.h" 43 #include "opt_ddb.h" 44 #include "opt_directio.h" 45 #include "opt_inet.h" 46 #include "opt_ipx.h" 47 #include "opt_msgbuf.h" 48 #include "opt_swap.h" 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/sysproto.h> 53 #include <sys/signalvar.h> 54 #include <sys/kernel.h> 55 #include <sys/linker.h> 56 #include <sys/malloc.h> 57 #include <sys/proc.h> 58 #include <sys/buf.h> 59 #include <sys/reboot.h> 60 #include <sys/mbuf.h> 61 #include <sys/msgbuf.h> 62 #include <sys/sysent.h> 63 #include <sys/sysctl.h> 64 #include <sys/vmmeter.h> 65 #include <sys/bus.h> 66 #include <sys/usched.h> 67 #include <sys/reg.h> 68 69 #include <vm/vm.h> 70 #include <vm/vm_param.h> 71 #include <sys/lock.h> 72 #include <vm/vm_kern.h> 73 #include <vm/vm_object.h> 74 #include <vm/vm_page.h> 75 #include <vm/vm_map.h> 76 #include <vm/vm_pager.h> 77 #include <vm/vm_extern.h> 78 79 #include <sys/thread2.h> 80 #include <sys/mplock2.h> 81 82 #include <sys/user.h> 83 #include <sys/exec.h> 84 #include <sys/cons.h> 85 86 #include <ddb/ddb.h> 87 88 #include <machine/cpu.h> 89 #include <machine/clock.h> 90 #include <machine/specialreg.h> 91 #include <machine/md_var.h> 92 #include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */ 93 #include <machine/globaldata.h> /* CPU_prvspace */ 94 #include <machine/smp.h> 95 #ifdef PERFMON 96 #include <machine/perfmon.h> 97 #endif 98 #include <machine/cputypes.h> 99 100 #include <bus/isa/rtc.h> 101 #include <sys/random.h> 102 #include <sys/ptrace.h> 103 #include <machine/sigframe.h> 104 #include <unistd.h> /* umtx_* functions */ 105 #include <pthread.h> /* pthread_yield() */ 106 107 extern void dblfault_handler (void); 108 109 #ifndef CPU_DISABLE_SSE 110 static void set_fpregs_xmm (struct save87 *, struct savexmm *); 111 static void fill_fpregs_xmm (struct savexmm *, struct save87 *); 112 #endif /* CPU_DISABLE_SSE */ 113 #ifdef DIRECTIO 114 extern void ffs_rawread_setup(void); 115 #endif /* DIRECTIO */ 116 117 int64_t tsc_offsets[MAXCPU]; 118 119 #if defined(SWTCH_OPTIM_STATS) 120 extern int swtch_optim_stats; 121 SYSCTL_INT(_debug, OID_AUTO, swtch_optim_stats, 122 CTLFLAG_RD, &swtch_optim_stats, 0, ""); 123 SYSCTL_INT(_debug, OID_AUTO, tlb_flush_count, 124 CTLFLAG_RD, &tlb_flush_count, 0, ""); 125 #endif 126 127 static int 128 sysctl_hw_physmem(SYSCTL_HANDLER_ARGS) 129 { 130 u_long pmem = ctob(physmem); 131 132 int error = sysctl_handle_long(oidp, &pmem, 0, req); 133 return (error); 134 } 135 136 SYSCTL_PROC(_hw, HW_PHYSMEM, physmem, CTLTYPE_ULONG|CTLFLAG_RD, 137 0, 0, sysctl_hw_physmem, "LU", "Total system memory in bytes (number of pages * page size)"); 138 139 static int 140 sysctl_hw_usermem(SYSCTL_HANDLER_ARGS) 141 { 142 /* JG */ 143 int error = sysctl_handle_int(oidp, 0, 144 ctob((int)Maxmem - vmstats.v_wire_count), req); 145 return (error); 146 } 147 148 SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT|CTLFLAG_RD, 149 0, 0, sysctl_hw_usermem, "IU", ""); 150 151 SYSCTL_ULONG(_hw, OID_AUTO, availpages, CTLFLAG_RD, &Maxmem, 0, ""); 152 153 #if 0 154 155 static int 156 sysctl_machdep_msgbuf(SYSCTL_HANDLER_ARGS) 157 { 158 int error; 159 160 /* Unwind the buffer, so that it's linear (possibly starting with 161 * some initial nulls). 162 */ 163 error=sysctl_handle_opaque(oidp,msgbufp->msg_ptr+msgbufp->msg_bufr, 164 msgbufp->msg_size-msgbufp->msg_bufr,req); 165 if(error) return(error); 166 if(msgbufp->msg_bufr>0) { 167 error=sysctl_handle_opaque(oidp,msgbufp->msg_ptr, 168 msgbufp->msg_bufr,req); 169 } 170 return(error); 171 } 172 173 SYSCTL_PROC(_machdep, OID_AUTO, msgbuf, CTLTYPE_STRING|CTLFLAG_RD, 174 0, 0, sysctl_machdep_msgbuf, "A","Contents of kernel message buffer"); 175 176 static int msgbuf_clear; 177 178 static int 179 sysctl_machdep_msgbuf_clear(SYSCTL_HANDLER_ARGS) 180 { 181 int error; 182 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 183 req); 184 if (!error && req->newptr) { 185 /* Clear the buffer and reset write pointer */ 186 bzero(msgbufp->msg_ptr,msgbufp->msg_size); 187 msgbufp->msg_bufr=msgbufp->msg_bufx=0; 188 msgbuf_clear=0; 189 } 190 return (error); 191 } 192 193 SYSCTL_PROC(_machdep, OID_AUTO, msgbuf_clear, CTLTYPE_INT|CTLFLAG_RW, 194 &msgbuf_clear, 0, sysctl_machdep_msgbuf_clear, "I", 195 "Clear kernel message buffer"); 196 197 #endif 198 199 /* 200 * Send an interrupt to process. 201 * 202 * Stack is set up to allow sigcode stored 203 * at top to call routine, followed by kcall 204 * to sigreturn routine below. After sigreturn 205 * resets the signal mask, the stack, and the 206 * frame pointer, it returns to the user 207 * specified pc, psl. 208 */ 209 void 210 sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code) 211 { 212 struct lwp *lp = curthread->td_lwp; 213 struct proc *p = lp->lwp_proc; 214 struct trapframe *regs; 215 struct sigacts *psp = p->p_sigacts; 216 struct sigframe sf, *sfp; 217 int oonstack; 218 char *sp; 219 220 regs = lp->lwp_md.md_regs; 221 oonstack = (lp->lwp_sigstk.ss_flags & SS_ONSTACK) ? 1 : 0; 222 223 /* Save user context */ 224 bzero(&sf, sizeof(struct sigframe)); 225 sf.sf_uc.uc_sigmask = *mask; 226 sf.sf_uc.uc_stack = lp->lwp_sigstk; 227 sf.sf_uc.uc_mcontext.mc_onstack = oonstack; 228 KKASSERT(__offsetof(struct trapframe, tf_rdi) == 0); 229 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_rdi, sizeof(struct trapframe)); 230 231 /* Make the size of the saved context visible to userland */ 232 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); 233 234 /* Allocate and validate space for the signal handler context. */ 235 if ((lp->lwp_flags & LWP_ALTSTACK) != 0 && !oonstack && 236 SIGISMEMBER(psp->ps_sigonstack, sig)) { 237 sp = (char *)(lp->lwp_sigstk.ss_sp + lp->lwp_sigstk.ss_size - 238 sizeof(struct sigframe)); 239 lp->lwp_sigstk.ss_flags |= SS_ONSTACK; 240 } else { 241 /* We take red zone into account */ 242 sp = (char *)regs->tf_rsp - sizeof(struct sigframe) - 128; 243 } 244 245 /* Align to 16 bytes */ 246 sfp = (struct sigframe *)((intptr_t)sp & ~0xFUL); 247 248 /* Translate the signal is appropriate */ 249 if (p->p_sysent->sv_sigtbl) { 250 if (sig <= p->p_sysent->sv_sigsize) 251 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 252 } 253 254 /* 255 * Build the argument list for the signal handler. 256 * 257 * Arguments are in registers (%rdi, %rsi, %rdx, %rcx) 258 */ 259 regs->tf_rdi = sig; /* argument 1 */ 260 regs->tf_rdx = (register_t)&sfp->sf_uc; /* argument 3 */ 261 262 if (SIGISMEMBER(psp->ps_siginfo, sig)) { 263 /* 264 * Signal handler installed with SA_SIGINFO. 265 * 266 * action(signo, siginfo, ucontext) 267 */ 268 regs->tf_rsi = (register_t)&sfp->sf_si; /* argument 2 */ 269 regs->tf_rcx = (register_t)regs->tf_err; /* argument 4 */ 270 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 271 272 /* fill siginfo structure */ 273 sf.sf_si.si_signo = sig; 274 sf.sf_si.si_code = code; 275 sf.sf_si.si_addr = (void *)regs->tf_addr; 276 } else { 277 /* 278 * Old FreeBSD-style arguments. 279 * 280 * handler (signo, code, [uc], addr) 281 */ 282 regs->tf_rsi = (register_t)code; /* argument 2 */ 283 regs->tf_rcx = (register_t)regs->tf_addr; /* argument 4 */ 284 sf.sf_ahu.sf_handler = catcher; 285 } 286 287 #if 0 288 /* 289 * If we're a vm86 process, we want to save the segment registers. 290 * We also change eflags to be our emulated eflags, not the actual 291 * eflags. 292 */ 293 if (regs->tf_eflags & PSL_VM) { 294 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 295 struct vm86_kernel *vm86 = &lp->lwp_thread->td_pcb->pcb_ext->ext_vm86; 296 297 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs; 298 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs; 299 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es; 300 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds; 301 302 if (vm86->vm86_has_vme == 0) 303 sf.sf_uc.uc_mcontext.mc_eflags = 304 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 305 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 306 307 /* 308 * Clear PSL_NT to inhibit T_TSSFLT faults on return from 309 * syscalls made by the signal handler. This just avoids 310 * wasting time for our lazy fixup of such faults. PSL_NT 311 * does nothing in vm86 mode, but vm86 programs can set it 312 * almost legitimately in probes for old cpu types. 313 */ 314 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP); 315 } 316 #endif 317 318 /* 319 * Save the FPU state and reinit the FP unit 320 */ 321 npxpush(&sf.sf_uc.uc_mcontext); 322 323 /* 324 * Copy the sigframe out to the user's stack. 325 */ 326 if (copyout(&sf, sfp, sizeof(struct sigframe)) != 0) { 327 /* 328 * Something is wrong with the stack pointer. 329 * ...Kill the process. 330 */ 331 sigexit(lp, SIGILL); 332 } 333 334 regs->tf_rsp = (register_t)sfp; 335 regs->tf_rip = PS_STRINGS - *(p->p_sysent->sv_szsigcode); 336 337 /* 338 * i386 abi specifies that the direction flag must be cleared 339 * on function entry 340 */ 341 regs->tf_rflags &= ~(PSL_T|PSL_D); 342 343 /* 344 * 64 bit mode has a code and stack selector but 345 * no data or extra selector. %fs and %gs are not 346 * stored in-context. 347 */ 348 regs->tf_cs = _ucodesel; 349 regs->tf_ss = _udatasel; 350 } 351 352 /* 353 * Sanitize the trapframe for a virtual kernel passing control to a custom 354 * VM context. Remove any items that would otherwise create a privilage 355 * issue. 356 * 357 * XXX at the moment we allow userland to set the resume flag. Is this a 358 * bad idea? 359 */ 360 int 361 cpu_sanitize_frame(struct trapframe *frame) 362 { 363 frame->tf_cs = _ucodesel; 364 frame->tf_ss = _udatasel; 365 /* XXX VM (8086) mode not supported? */ 366 frame->tf_rflags &= (PSL_RF | PSL_USERCHANGE | PSL_VM_UNSUPP); 367 frame->tf_rflags |= PSL_RESERVED_DEFAULT | PSL_I; 368 369 return(0); 370 } 371 372 /* 373 * Sanitize the tls so loading the descriptor does not blow up 374 * on us. For x86_64 we don't have to do anything. 375 */ 376 int 377 cpu_sanitize_tls(struct savetls *tls) 378 { 379 return(0); 380 } 381 382 /* 383 * sigreturn(ucontext_t *sigcntxp) 384 * 385 * System call to cleanup state after a signal 386 * has been taken. Reset signal mask and 387 * stack state from context left by sendsig (above). 388 * Return to previous pc and psl as specified by 389 * context left by sendsig. Check carefully to 390 * make sure that the user has not modified the 391 * state to gain improper privileges. 392 */ 393 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) 394 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) 395 396 int 397 sys_sigreturn(struct sigreturn_args *uap) 398 { 399 struct lwp *lp = curthread->td_lwp; 400 struct trapframe *regs; 401 ucontext_t uc; 402 ucontext_t *ucp; 403 register_t rflags; 404 int cs; 405 int error; 406 407 /* 408 * We have to copy the information into kernel space so userland 409 * can't modify it while we are sniffing it. 410 */ 411 regs = lp->lwp_md.md_regs; 412 error = copyin(uap->sigcntxp, &uc, sizeof(uc)); 413 if (error) 414 return (error); 415 ucp = &uc; 416 rflags = ucp->uc_mcontext.mc_rflags; 417 418 /* VM (8086) mode not supported */ 419 rflags &= ~PSL_VM_UNSUPP; 420 421 #if 0 422 if (eflags & PSL_VM) { 423 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 424 struct vm86_kernel *vm86; 425 426 /* 427 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 428 * set up the vm86 area, and we can't enter vm86 mode. 429 */ 430 if (lp->lwp_thread->td_pcb->pcb_ext == 0) 431 return (EINVAL); 432 vm86 = &lp->lwp_thread->td_pcb->pcb_ext->ext_vm86; 433 if (vm86->vm86_inited == 0) 434 return (EINVAL); 435 436 /* go back to user mode if both flags are set */ 437 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 438 trapsignal(lp->lwp_proc, SIGBUS, 0); 439 440 if (vm86->vm86_has_vme) { 441 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 442 (eflags & VME_USERCHANGE) | PSL_VM; 443 } else { 444 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 445 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM; 446 } 447 bcopy(&ucp.uc_mcontext.mc_gs, tf, sizeof(struct trapframe)); 448 tf->tf_eflags = eflags; 449 tf->tf_vm86_ds = tf->tf_ds; 450 tf->tf_vm86_es = tf->tf_es; 451 tf->tf_vm86_fs = tf->tf_fs; 452 tf->tf_vm86_gs = tf->tf_gs; 453 tf->tf_ds = _udatasel; 454 tf->tf_es = _udatasel; 455 #if 0 456 tf->tf_fs = _udatasel; 457 tf->tf_gs = _udatasel; 458 #endif 459 } else 460 #endif 461 { 462 /* 463 * Don't allow users to change privileged or reserved flags. 464 */ 465 /* 466 * XXX do allow users to change the privileged flag PSL_RF. 467 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 468 * should sometimes set it there too. tf_eflags is kept in 469 * the signal context during signal handling and there is no 470 * other place to remember it, so the PSL_RF bit may be 471 * corrupted by the signal handler without us knowing. 472 * Corruption of the PSL_RF bit at worst causes one more or 473 * one less debugger trap, so allowing it is fairly harmless. 474 */ 475 if (!EFL_SECURE(rflags & ~PSL_RF, regs->tf_rflags & ~PSL_RF)) { 476 kprintf("sigreturn: rflags = 0x%lx\n", (long)rflags); 477 return(EINVAL); 478 } 479 480 /* 481 * Don't allow users to load a valid privileged %cs. Let the 482 * hardware check for invalid selectors, excess privilege in 483 * other selectors, invalid %eip's and invalid %esp's. 484 */ 485 cs = ucp->uc_mcontext.mc_cs; 486 if (!CS_SECURE(cs)) { 487 kprintf("sigreturn: cs = 0x%x\n", cs); 488 trapsignal(lp, SIGBUS, T_PROTFLT); 489 return(EINVAL); 490 } 491 bcopy(&ucp->uc_mcontext.mc_rdi, regs, sizeof(struct trapframe)); 492 } 493 494 /* 495 * Restore the FPU state from the frame 496 */ 497 npxpop(&ucp->uc_mcontext); 498 499 if (ucp->uc_mcontext.mc_onstack & 1) 500 lp->lwp_sigstk.ss_flags |= SS_ONSTACK; 501 else 502 lp->lwp_sigstk.ss_flags &= ~SS_ONSTACK; 503 504 lp->lwp_sigmask = ucp->uc_sigmask; 505 SIG_CANTMASK(lp->lwp_sigmask); 506 return(EJUSTRETURN); 507 } 508 509 /* 510 * cpu_idle() represents the idle LWKT. You cannot return from this function 511 * (unless you want to blow things up!). Instead we look for runnable threads 512 * and loop or halt as appropriate. Giant is not held on entry to the thread. 513 * 514 * The main loop is entered with a critical section held, we must release 515 * the critical section before doing anything else. lwkt_switch() will 516 * check for pending interrupts due to entering and exiting its own 517 * critical section. 518 * 519 * Note on cpu_idle_hlt: On an SMP system we rely on a scheduler IPI 520 * to wake a HLTed cpu up. 521 */ 522 static int cpu_idle_hlt = 1; 523 static int cpu_idle_hltcnt; 524 static int cpu_idle_spincnt; 525 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW, 526 &cpu_idle_hlt, 0, "Idle loop HLT enable"); 527 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hltcnt, CTLFLAG_RW, 528 &cpu_idle_hltcnt, 0, "Idle loop entry halts"); 529 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_spincnt, CTLFLAG_RW, 530 &cpu_idle_spincnt, 0, "Idle loop entry spins"); 531 532 void 533 cpu_idle(void) 534 { 535 struct thread *td = curthread; 536 struct mdglobaldata *gd = mdcpu; 537 int reqflags; 538 539 crit_exit(); 540 KKASSERT(td->td_critcount == 0); 541 cpu_enable_intr(); 542 543 for (;;) { 544 /* 545 * See if there are any LWKTs ready to go. 546 */ 547 lwkt_switch(); 548 549 /* 550 * The idle loop halts only if no threads are scheduleable 551 * and no signals have occured. 552 */ 553 if (cpu_idle_hlt && 554 (td->td_gd->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) { 555 splz(); 556 if ((td->td_gd->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) { 557 #ifdef DEBUGIDLE 558 struct timeval tv1, tv2; 559 gettimeofday(&tv1, NULL); 560 #endif 561 reqflags = gd->mi.gd_reqflags & 562 ~RQF_IDLECHECK_WK_MASK; 563 KKASSERT(gd->mi.gd_processing_ipiq == 0); 564 umtx_sleep(&gd->mi.gd_reqflags, reqflags, 565 1000000); 566 #ifdef DEBUGIDLE 567 gettimeofday(&tv2, NULL); 568 if (tv2.tv_usec - tv1.tv_usec + 569 (tv2.tv_sec - tv1.tv_sec) * 1000000 570 > 500000) { 571 kprintf("cpu %d idlelock %08x %08x\n", 572 gd->mi.gd_cpuid, 573 gd->mi.gd_reqflags, 574 gd->gd_fpending); 575 } 576 #endif 577 } 578 ++cpu_idle_hltcnt; 579 } else { 580 splz(); 581 __asm __volatile("pause"); 582 ++cpu_idle_spincnt; 583 } 584 } 585 } 586 587 /* 588 * Called by the spinlock code with or without a critical section held 589 * when a spinlock is found to be seriously constested. 590 * 591 * We need to enter a critical section to prevent signals from recursing 592 * into pthreads. 593 */ 594 void 595 cpu_spinlock_contested(void) 596 { 597 cpu_pause(); 598 } 599 600 /* 601 * Clear registers on exec 602 */ 603 void 604 exec_setregs(u_long entry, u_long stack, u_long ps_strings) 605 { 606 struct thread *td = curthread; 607 struct lwp *lp = td->td_lwp; 608 struct pcb *pcb = td->td_pcb; 609 struct trapframe *regs = lp->lwp_md.md_regs; 610 611 /* was i386_user_cleanup() in NetBSD */ 612 user_ldt_free(pcb); 613 614 bzero((char *)regs, sizeof(struct trapframe)); 615 regs->tf_rip = entry; 616 regs->tf_rsp = ((stack - 8) & ~0xFul) + 8; /* align the stack */ 617 regs->tf_rdi = stack; /* argv */ 618 regs->tf_rflags = PSL_USER | (regs->tf_rflags & PSL_T); 619 regs->tf_ss = _udatasel; 620 regs->tf_cs = _ucodesel; 621 regs->tf_rbx = ps_strings; 622 623 /* 624 * Reset the hardware debug registers if they were in use. 625 * They won't have any meaning for the newly exec'd process. 626 */ 627 if (pcb->pcb_flags & PCB_DBREGS) { 628 pcb->pcb_dr0 = 0; 629 pcb->pcb_dr1 = 0; 630 pcb->pcb_dr2 = 0; 631 pcb->pcb_dr3 = 0; 632 pcb->pcb_dr6 = 0; 633 pcb->pcb_dr7 = 0; /* JG set bit 10? */ 634 if (pcb == td->td_pcb) { 635 /* 636 * Clear the debug registers on the running 637 * CPU, otherwise they will end up affecting 638 * the next process we switch to. 639 */ 640 reset_dbregs(); 641 } 642 pcb->pcb_flags &= ~PCB_DBREGS; 643 } 644 645 /* 646 * Initialize the math emulator (if any) for the current process. 647 * Actually, just clear the bit that says that the emulator has 648 * been initialized. Initialization is delayed until the process 649 * traps to the emulator (if it is done at all) mainly because 650 * emulators don't provide an entry point for initialization. 651 */ 652 pcb->pcb_flags &= ~FP_SOFTFP; 653 654 /* 655 * NOTE: do not set CR0_TS here. npxinit() must do it after clearing 656 * gd_npxthread. Otherwise a preemptive interrupt thread 657 * may panic in npxdna(). 658 */ 659 crit_enter(); 660 #if 0 661 load_cr0(rcr0() | CR0_MP); 662 #endif 663 664 /* 665 * NOTE: The MSR values must be correct so we can return to 666 * userland. gd_user_fs/gs must be correct so the switch 667 * code knows what the current MSR values are. 668 */ 669 pcb->pcb_fsbase = 0; /* Values loaded from PCB on switch */ 670 pcb->pcb_gsbase = 0; 671 /* Initialize the npx (if any) for the current process. */ 672 npxinit(__INITIAL_FPUCW__); 673 crit_exit(); 674 675 /* 676 * note: linux emulator needs edx to be 0x0 on entry, which is 677 * handled in execve simply by setting the 64 bit syscall 678 * return value to 0. 679 */ 680 } 681 682 void 683 cpu_setregs(void) 684 { 685 #if 0 686 unsigned int cr0; 687 688 cr0 = rcr0(); 689 cr0 |= CR0_NE; /* Done by npxinit() */ 690 cr0 |= CR0_MP | CR0_TS; /* Done at every execve() too. */ 691 cr0 |= CR0_WP | CR0_AM; 692 load_cr0(cr0); 693 load_gs(_udatasel); 694 #endif 695 } 696 697 static int 698 sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS) 699 { 700 int error; 701 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 702 req); 703 if (!error && req->newptr) 704 resettodr(); 705 return (error); 706 } 707 708 SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW, 709 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", ""); 710 711 extern u_long bootdev; /* not a cdev_t - encoding is different */ 712 SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev, 713 CTLFLAG_RD, &bootdev, 0, "Boot device (not in cdev_t format)"); 714 715 /* 716 * Initialize 386 and configure to run kernel 717 */ 718 719 /* 720 * Initialize segments & interrupt table 721 */ 722 723 extern struct user *proc0paddr; 724 725 #if 0 726 727 extern inthand_t 728 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 729 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 730 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 731 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 732 IDTVEC(xmm), IDTVEC(dblfault), 733 IDTVEC(fast_syscall), IDTVEC(fast_syscall32); 734 #endif 735 736 #ifdef DEBUG_INTERRUPTS 737 extern inthand_t *Xrsvdary[256]; 738 #endif 739 740 int 741 ptrace_set_pc(struct lwp *lp, unsigned long addr) 742 { 743 lp->lwp_md.md_regs->tf_rip = addr; 744 return (0); 745 } 746 747 int 748 ptrace_single_step(struct lwp *lp) 749 { 750 lp->lwp_md.md_regs->tf_rflags |= PSL_T; 751 return (0); 752 } 753 754 int 755 fill_regs(struct lwp *lp, struct reg *regs) 756 { 757 struct trapframe *tp; 758 759 if ((tp = lp->lwp_md.md_regs) == NULL) 760 return EINVAL; 761 bcopy(&tp->tf_rdi, ®s->r_rdi, sizeof(*regs)); 762 return (0); 763 } 764 765 int 766 set_regs(struct lwp *lp, struct reg *regs) 767 { 768 struct trapframe *tp; 769 770 tp = lp->lwp_md.md_regs; 771 if (!EFL_SECURE(regs->r_rflags, tp->tf_rflags) || 772 !CS_SECURE(regs->r_cs)) 773 return (EINVAL); 774 bcopy(®s->r_rdi, &tp->tf_rdi, sizeof(*regs)); 775 return (0); 776 } 777 778 #ifndef CPU_DISABLE_SSE 779 static void 780 fill_fpregs_xmm(struct savexmm *sv_xmm, struct save87 *sv_87) 781 { 782 struct env87 *penv_87 = &sv_87->sv_env; 783 struct envxmm *penv_xmm = &sv_xmm->sv_env; 784 int i; 785 786 /* FPU control/status */ 787 penv_87->en_cw = penv_xmm->en_cw; 788 penv_87->en_sw = penv_xmm->en_sw; 789 penv_87->en_tw = penv_xmm->en_tw; 790 penv_87->en_fip = penv_xmm->en_fip; 791 penv_87->en_fcs = penv_xmm->en_fcs; 792 penv_87->en_opcode = penv_xmm->en_opcode; 793 penv_87->en_foo = penv_xmm->en_foo; 794 penv_87->en_fos = penv_xmm->en_fos; 795 796 /* FPU registers */ 797 for (i = 0; i < 8; ++i) 798 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc; 799 } 800 801 static void 802 set_fpregs_xmm(struct save87 *sv_87, struct savexmm *sv_xmm) 803 { 804 struct env87 *penv_87 = &sv_87->sv_env; 805 struct envxmm *penv_xmm = &sv_xmm->sv_env; 806 int i; 807 808 /* FPU control/status */ 809 penv_xmm->en_cw = penv_87->en_cw; 810 penv_xmm->en_sw = penv_87->en_sw; 811 penv_xmm->en_tw = penv_87->en_tw; 812 penv_xmm->en_fip = penv_87->en_fip; 813 penv_xmm->en_fcs = penv_87->en_fcs; 814 penv_xmm->en_opcode = penv_87->en_opcode; 815 penv_xmm->en_foo = penv_87->en_foo; 816 penv_xmm->en_fos = penv_87->en_fos; 817 818 /* FPU registers */ 819 for (i = 0; i < 8; ++i) 820 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i]; 821 } 822 #endif /* CPU_DISABLE_SSE */ 823 824 int 825 fill_fpregs(struct lwp *lp, struct fpreg *fpregs) 826 { 827 if (lp->lwp_thread == NULL || lp->lwp_thread->td_pcb == NULL) 828 return EINVAL; 829 #ifndef CPU_DISABLE_SSE 830 if (cpu_fxsr) { 831 fill_fpregs_xmm(&lp->lwp_thread->td_pcb->pcb_save.sv_xmm, 832 (struct save87 *)fpregs); 833 return (0); 834 } 835 #endif /* CPU_DISABLE_SSE */ 836 bcopy(&lp->lwp_thread->td_pcb->pcb_save.sv_87, fpregs, sizeof *fpregs); 837 return (0); 838 } 839 840 int 841 set_fpregs(struct lwp *lp, struct fpreg *fpregs) 842 { 843 #ifndef CPU_DISABLE_SSE 844 if (cpu_fxsr) { 845 set_fpregs_xmm((struct save87 *)fpregs, 846 &lp->lwp_thread->td_pcb->pcb_save.sv_xmm); 847 return (0); 848 } 849 #endif /* CPU_DISABLE_SSE */ 850 bcopy(fpregs, &lp->lwp_thread->td_pcb->pcb_save.sv_87, sizeof *fpregs); 851 return (0); 852 } 853 854 int 855 fill_dbregs(struct lwp *lp, struct dbreg *dbregs) 856 { 857 return (ENOSYS); 858 } 859 860 int 861 set_dbregs(struct lwp *lp, struct dbreg *dbregs) 862 { 863 return (ENOSYS); 864 } 865 866 #if 0 867 /* 868 * Return > 0 if a hardware breakpoint has been hit, and the 869 * breakpoint was in user space. Return 0, otherwise. 870 */ 871 int 872 user_dbreg_trap(void) 873 { 874 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */ 875 u_int32_t bp; /* breakpoint bits extracted from dr6 */ 876 int nbp; /* number of breakpoints that triggered */ 877 caddr_t addr[4]; /* breakpoint addresses */ 878 int i; 879 880 dr7 = rdr7(); 881 if ((dr7 & 0x000000ff) == 0) { 882 /* 883 * all GE and LE bits in the dr7 register are zero, 884 * thus the trap couldn't have been caused by the 885 * hardware debug registers 886 */ 887 return 0; 888 } 889 890 nbp = 0; 891 dr6 = rdr6(); 892 bp = dr6 & 0x0000000f; 893 894 if (!bp) { 895 /* 896 * None of the breakpoint bits are set meaning this 897 * trap was not caused by any of the debug registers 898 */ 899 return 0; 900 } 901 902 /* 903 * at least one of the breakpoints were hit, check to see 904 * which ones and if any of them are user space addresses 905 */ 906 907 if (bp & 0x01) { 908 addr[nbp++] = (caddr_t)rdr0(); 909 } 910 if (bp & 0x02) { 911 addr[nbp++] = (caddr_t)rdr1(); 912 } 913 if (bp & 0x04) { 914 addr[nbp++] = (caddr_t)rdr2(); 915 } 916 if (bp & 0x08) { 917 addr[nbp++] = (caddr_t)rdr3(); 918 } 919 920 for (i=0; i<nbp; i++) { 921 if (addr[i] < 922 (caddr_t)VM_MAX_USER_ADDRESS) { 923 /* 924 * addr[i] is in user space 925 */ 926 return nbp; 927 } 928 } 929 930 /* 931 * None of the breakpoints are in user space. 932 */ 933 return 0; 934 } 935 936 #endif 937 938 void 939 identcpu(void) 940 { 941 int regs[4]; 942 943 do_cpuid(1, regs); 944 cpu_feature = regs[3]; 945 } 946 947 948 #ifndef DDB 949 void 950 Debugger(const char *msg) 951 { 952 kprintf("Debugger(\"%s\") called.\n", msg); 953 } 954 #endif /* no DDB */ 955