1 /*- 2 * Copyright (c) 1982, 1986 The Regents of the University of California. 3 * Copyright (c) 1989, 1990 William Jolitz 4 * Copyright (c) 1994 John Dyson 5 * Copyright (c) 2008 The DragonFly Project. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department, and William Jolitz. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 41 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ 42 * $FreeBSD: src/sys/i386/i386/vm_machdep.c,v 1.132.2.9 2003/01/25 19:02:23 dillon Exp $ 43 */ 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/malloc.h> 48 #include <sys/proc.h> 49 #include <sys/buf.h> 50 #include <sys/interrupt.h> 51 #include <sys/vnode.h> 52 #include <sys/vmmeter.h> 53 #include <sys/kernel.h> 54 #include <sys/sysctl.h> 55 #include <sys/unistd.h> 56 57 #include <machine/clock.h> 58 #include <machine/cpu.h> 59 #include <machine/md_var.h> 60 #include <machine/smp.h> 61 #include <machine/pcb.h> 62 #include <machine/pcb_ext.h> 63 #include <machine/segments.h> 64 #include <machine/globaldata.h> /* npxthread */ 65 #include <machine/vmm.h> 66 67 #include <vm/vm.h> 68 #include <vm/vm_param.h> 69 #include <sys/lock.h> 70 #include <vm/vm_kern.h> 71 #include <vm/vm_page.h> 72 #include <vm/vm_map.h> 73 #include <vm/vm_extern.h> 74 75 #include <sys/thread2.h> 76 #include <sys/mplock2.h> 77 78 #include <bus/isa/isa.h> 79 80 static void cpu_reset_real (void); 81 /* 82 * Finish a fork operation, with lwp lp2 nearly set up. 83 * Copy and update the pcb, set up the stack so that the child 84 * ready to run and return to user mode. 85 */ 86 void 87 cpu_fork(struct lwp *lp1, struct lwp *lp2, int flags) 88 { 89 struct pcb *pcb2; 90 91 if ((flags & RFPROC) == 0) { 92 if ((flags & RFMEM) == 0) { 93 /* unshare user LDT */ 94 struct pcb *pcb1 = lp1->lwp_thread->td_pcb; 95 struct pcb_ldt *pcb_ldt = pcb1->pcb_ldt; 96 if (pcb_ldt && pcb_ldt->ldt_refcnt > 1) { 97 pcb_ldt = user_ldt_alloc(pcb1,pcb_ldt->ldt_len); 98 user_ldt_free(pcb1); 99 pcb1->pcb_ldt = pcb_ldt; 100 set_user_ldt(pcb1); 101 } 102 } 103 return; 104 } 105 106 /* Ensure that lp1's pcb is up to date. */ 107 if (mdcpu->gd_npxthread == lp1->lwp_thread) 108 npxsave(lp1->lwp_thread->td_savefpu); 109 110 /* 111 * Copy lp1's PCB. This really only applies to the 112 * debug registers and FP state, but its faster to just copy the 113 * whole thing. Because we only save the PCB at switchout time, 114 * the register state may not be current. 115 */ 116 pcb2 = lp2->lwp_thread->td_pcb; 117 *pcb2 = *lp1->lwp_thread->td_pcb; 118 119 /* 120 * Create a new fresh stack for the new process. 121 * Copy the trap frame for the return to user mode as if from a 122 * syscall. This copies the user mode register values. 123 * 124 * pcb_rsp must allocate an additional call-return pointer below 125 * the trap frame which will be restored by cpu_heavy_restore from 126 * PCB_RIP, and the thread's td_sp pointer must allocate an 127 * additonal two quadwords below the pcb_rsp call-return pointer to 128 * hold the LWKT restore function pointer and rflags. 129 * 130 * The LWKT restore function pointer must be set to cpu_heavy_restore, 131 * which is our standard heavy-weight process switch-in function. 132 * YYY eventually we should shortcut fork_return and fork_trampoline 133 * to use the LWKT restore function directly so we can get rid of 134 * all the extra crap we are setting up. 135 */ 136 lp2->lwp_md.md_regs = (struct trapframe *)pcb2 - 1; 137 bcopy(lp1->lwp_md.md_regs, lp2->lwp_md.md_regs, sizeof(*lp2->lwp_md.md_regs)); 138 139 /* 140 * Set registers for trampoline to user mode. Leave space for the 141 * return address on stack. These are the kernel mode register values. 142 */ 143 pcb2->pcb_cr3 = vtophys(vmspace_pmap(lp2->lwp_proc->p_vmspace)->pm_pml4); 144 pcb2->pcb_rbx = (unsigned long)fork_return; /* fork_trampoline argument */ 145 pcb2->pcb_rbp = 0; 146 pcb2->pcb_rsp = (unsigned long)lp2->lwp_md.md_regs - sizeof(void *); 147 pcb2->pcb_r12 = (unsigned long)lp2; /* fork_trampoline argument */ 148 pcb2->pcb_r13 = 0; 149 pcb2->pcb_r14 = 0; 150 pcb2->pcb_r15 = 0; 151 pcb2->pcb_rip = (unsigned long)fork_trampoline; 152 lp2->lwp_thread->td_sp = (char *)(pcb2->pcb_rsp - sizeof(void *)); 153 *(u_int64_t *)lp2->lwp_thread->td_sp = PSL_USER; 154 lp2->lwp_thread->td_sp -= sizeof(void *); 155 *(void **)lp2->lwp_thread->td_sp = (void *)cpu_heavy_restore; 156 157 /* 158 * pcb2->pcb_ldt: duplicated below, if necessary. 159 * pcb2->pcb_savefpu: cloned above. 160 * pcb2->pcb_flags: cloned above (always 0 here?). 161 * pcb2->pcb_onfault: cloned above (always NULL here). 162 * pcb2->pcb_onfault_sp:cloned above (dont care) 163 */ 164 165 /* 166 * XXX don't copy the i/o pages. this should probably be fixed. 167 */ 168 pcb2->pcb_ext = NULL; 169 170 /* Copy the LDT, if necessary. */ 171 if (pcb2->pcb_ldt != NULL) { 172 if (flags & RFMEM) { 173 pcb2->pcb_ldt->ldt_refcnt++; 174 } else { 175 pcb2->pcb_ldt = user_ldt_alloc(pcb2, 176 pcb2->pcb_ldt->ldt_len); 177 } 178 } 179 bcopy(&lp1->lwp_thread->td_tls, &lp2->lwp_thread->td_tls, 180 sizeof(lp2->lwp_thread->td_tls)); 181 /* 182 * Now, cpu_switch() can schedule the new lwp. 183 * pcb_rsp is loaded pointing to the cpu_switch() stack frame 184 * containing the return address when exiting cpu_switch. 185 * This will normally be to fork_trampoline(), which will have 186 * %rbx loaded with the new lwp's pointer. fork_trampoline() 187 * will set up a stack to call fork_return(lp, frame); to complete 188 * the return to user-mode. 189 */ 190 } 191 192 /* 193 * Prepare new lwp to return to the address specified in params. 194 */ 195 int 196 cpu_prepare_lwp(struct lwp *lp, struct lwp_params *params) 197 { 198 struct trapframe *regs = lp->lwp_md.md_regs; 199 void *bad_return = NULL; 200 int error; 201 202 regs->tf_rip = (long)params->lwp_func; 203 regs->tf_rsp = (long)params->lwp_stack; 204 /* Set up argument for function call */ 205 regs->tf_rdi = (long)params->lwp_arg; 206 207 /* 208 * Set up fake return address. As the lwp function may never return, 209 * we simply copy out a NULL pointer and force the lwp to receive 210 * a SIGSEGV if it returns anyways. 211 */ 212 regs->tf_rsp -= sizeof(void *); 213 error = copyout(&bad_return, (void *)regs->tf_rsp, sizeof(bad_return)); 214 if (error) 215 return (error); 216 217 if (lp->lwp_proc->p_vmm) { 218 lp->lwp_thread->td_pcb->pcb_cr3 = KPML4phys; 219 cpu_set_fork_handler(lp, 220 (void (*)(void *, struct trapframe *))vmm_lwp_return, lp); 221 } else { 222 cpu_set_fork_handler(lp, 223 (void (*)(void *, struct trapframe *))generic_lwp_return, lp); 224 } 225 return (0); 226 } 227 228 /* 229 * Intercept the return address from a freshly forked process that has NOT 230 * been scheduled yet. 231 * 232 * This is needed to make kernel threads stay in kernel mode. 233 */ 234 void 235 cpu_set_fork_handler(struct lwp *lp, void (*func)(void *, struct trapframe *), 236 void *arg) 237 { 238 /* 239 * Note that the trap frame follows the args, so the function 240 * is really called like this: func(arg, frame); 241 */ 242 lp->lwp_thread->td_pcb->pcb_rbx = (long)func; /* function */ 243 lp->lwp_thread->td_pcb->pcb_r12 = (long)arg; /* first arg */ 244 } 245 246 void 247 cpu_set_thread_handler(thread_t td, void (*rfunc)(void), void *func, void *arg) 248 { 249 td->td_pcb->pcb_rbx = (long)func; 250 td->td_pcb->pcb_r12 = (long)arg; 251 td->td_switch = cpu_lwkt_switch; 252 td->td_sp -= sizeof(void *); 253 *(void **)td->td_sp = rfunc; /* exit function on return */ 254 td->td_sp -= sizeof(void *); 255 *(void **)td->td_sp = cpu_kthread_restore; 256 } 257 258 void 259 cpu_lwp_exit(void) 260 { 261 struct thread *td = curthread; 262 struct pcb *pcb; 263 264 pcb = td->td_pcb; 265 266 /* Some i386 functionality was dropped */ 267 KKASSERT(pcb->pcb_ext == NULL); 268 269 /* 270 * disable all hardware breakpoints 271 */ 272 if (pcb->pcb_flags & PCB_DBREGS) { 273 reset_dbregs(); 274 pcb->pcb_flags &= ~PCB_DBREGS; 275 } 276 td->td_gd->gd_cnt.v_swtch++; 277 278 crit_enter_quick(td); 279 if (td->td_flags & TDF_TSLEEPQ) 280 tsleep_remove(td); 281 lwkt_deschedule_self(td); 282 lwkt_remove_tdallq(td); 283 cpu_thread_exit(); 284 } 285 286 /* 287 * Terminate the current thread. The caller must have already acquired 288 * the thread's rwlock and placed it on a reap list or otherwise notified 289 * a reaper of its existance. We set a special assembly switch function which 290 * releases td_rwlock after it has cleaned up the MMU state and switched 291 * out the stack. 292 * 293 * Must be caller from a critical section and with the thread descheduled. 294 */ 295 void 296 cpu_thread_exit(void) 297 { 298 npxexit(); 299 curthread->td_switch = cpu_exit_switch; 300 curthread->td_flags |= TDF_EXITING; 301 lwkt_switch(); 302 panic("cpu_thread_exit: lwkt_switch() unexpectedly returned"); 303 } 304 305 void 306 cpu_reset(void) 307 { 308 cpu_reset_real(); 309 } 310 311 static void 312 cpu_reset_real(void) 313 { 314 /* 315 * Attempt to do a CPU reset via the keyboard controller, 316 * do not turn off the GateA20, as any machine that fails 317 * to do the reset here would then end up in no man's land. 318 */ 319 320 #if !defined(BROKEN_KEYBOARD_RESET) 321 outb(IO_KBD + 4, 0xFE); 322 DELAY(500000); /* wait 0.5 sec to see if that did it */ 323 kprintf("Keyboard reset did not work, attempting CPU shutdown\n"); 324 DELAY(1000000); /* wait 1 sec for kprintf to complete */ 325 #endif 326 #if 0 /* JG */ 327 /* force a shutdown by unmapping entire address space ! */ 328 bzero((caddr_t) PTD, PAGE_SIZE); 329 #endif 330 331 /* "good night, sweet prince .... <THUNK!>" */ 332 cpu_invltlb(); 333 /* NOTREACHED */ 334 while(1); 335 } 336 337 /* 338 * Convert kernel VA to physical address 339 */ 340 vm_paddr_t 341 kvtop(void *addr) 342 { 343 vm_paddr_t pa; 344 345 pa = pmap_kextract((vm_offset_t)addr); 346 if (pa == 0) 347 panic("kvtop: zero page frame"); 348 return (pa); 349 } 350 351 static void 352 swi_vm(void *arg, void *frame) 353 { 354 if (busdma_swi_pending != 0) 355 busdma_swi(); 356 } 357 358 static void 359 swi_vm_setup(void *arg) 360 { 361 register_swi(SWI_VM, swi_vm, NULL, "swi_vm", NULL, 0); 362 } 363 364 SYSINIT(vm_setup, SI_BOOT2_MACHDEP, SI_ORDER_ANY, swi_vm_setup, NULL); 365 366 /* 367 * platform-specific vmspace initialization (nothing for x86_64) 368 */ 369 void 370 cpu_vmspace_alloc(struct vmspace *vm __unused) 371 { 372 } 373 374 void 375 cpu_vmspace_free(struct vmspace *vm __unused) 376 { 377 } 378 379 int 380 kvm_access_check(vm_offset_t saddr, vm_offset_t eaddr, int prot) 381 { 382 vm_offset_t addr; 383 384 if (saddr < KvaStart) 385 return EFAULT; 386 if (eaddr >= KvaEnd) 387 return EFAULT; 388 for (addr = saddr; addr < eaddr; addr += PAGE_SIZE) { 389 if (pmap_extract(&kernel_pmap, addr) == 0) 390 return EFAULT; 391 } 392 if (!kernacc((caddr_t)saddr, eaddr - saddr, prot)) 393 return EFAULT; 394 return 0; 395 } 396 397 #if 0 398 399 void _test_frame_enter(struct trapframe *frame); 400 void _test_frame_exit(struct trapframe *frame); 401 402 void 403 _test_frame_enter(struct trapframe *frame) 404 { 405 thread_t td = curthread; 406 407 if (ISPL(frame->tf_cs) == SEL_UPL) { 408 KKASSERT(td->td_lwp); 409 KASSERT(td->td_lwp->lwp_md.md_regs == frame, 410 ("_test_frame_exit: Frame mismatch %p %p", 411 td->td_lwp->lwp_md.md_regs, frame)); 412 td->td_lwp->lwp_saveusp = (void *)frame->tf_rsp; 413 td->td_lwp->lwp_saveupc = (void *)frame->tf_rip; 414 } 415 if ((char *)frame < td->td_kstack || 416 (char *)frame > td->td_kstack + td->td_kstack_size) { 417 panic("_test_frame_exit: frame not on kstack %p kstack=%p", 418 frame, td->td_kstack); 419 } 420 } 421 422 void 423 _test_frame_exit(struct trapframe *frame) 424 { 425 thread_t td = curthread; 426 427 if (ISPL(frame->tf_cs) == SEL_UPL) { 428 KKASSERT(td->td_lwp); 429 KASSERT(td->td_lwp->lwp_md.md_regs == frame, 430 ("_test_frame_exit: Frame mismatch %p %p", 431 td->td_lwp->lwp_md.md_regs, frame)); 432 if (td->td_lwp->lwp_saveusp != (void *)frame->tf_rsp) { 433 kprintf("_test_frame_exit: %s:%d usp mismatch %p/%p\n", 434 td->td_comm, td->td_proc->p_pid, 435 td->td_lwp->lwp_saveusp, 436 (void *)frame->tf_rsp); 437 } 438 if (td->td_lwp->lwp_saveupc != (void *)frame->tf_rip) { 439 kprintf("_test_frame_exit: %s:%d upc mismatch %p/%p\n", 440 td->td_comm, td->td_proc->p_pid, 441 td->td_lwp->lwp_saveupc, 442 (void *)frame->tf_rip); 443 } 444 445 /* 446 * adulterate the fields to catch entries that 447 * don't run through test_frame_enter 448 */ 449 td->td_lwp->lwp_saveusp = 450 (void *)~(intptr_t)td->td_lwp->lwp_saveusp; 451 td->td_lwp->lwp_saveupc = 452 (void *)~(intptr_t)td->td_lwp->lwp_saveupc; 453 } 454 if ((char *)frame < td->td_kstack || 455 (char *)frame > td->td_kstack + td->td_kstack_size) { 456 panic("_test_frame_exit: frame not on kstack %p kstack=%p", 457 frame, td->td_kstack); 458 } 459 } 460 461 #endif 462