1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 1982, 1986 The Regents of the University of California. 5 * Copyright (c) 1989, 1990 William Jolitz 6 * Copyright (c) 1994 John Dyson 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * the Systems Programming Group of the University of Utah Computer 11 * Science Department, and William Jolitz. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by the University of 24 * California, Berkeley and its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 42 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include "opt_isa.h" 49 #include "opt_cpu.h" 50 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/bio.h> 54 #include <sys/buf.h> 55 #include <sys/kernel.h> 56 #include <sys/ktr.h> 57 #include <sys/lock.h> 58 #include <sys/malloc.h> 59 #include <sys/mbuf.h> 60 #include <sys/mutex.h> 61 #include <sys/priv.h> 62 #include <sys/proc.h> 63 #include <sys/procctl.h> 64 #include <sys/smp.h> 65 #include <sys/sysctl.h> 66 #include <sys/sysent.h> 67 #include <sys/unistd.h> 68 #include <sys/vnode.h> 69 #include <sys/vmmeter.h> 70 #include <sys/wait.h> 71 72 #include <machine/cpu.h> 73 #include <machine/md_var.h> 74 #include <machine/pcb.h> 75 #include <machine/smp.h> 76 #include <machine/specialreg.h> 77 #include <machine/tss.h> 78 79 #include <vm/vm.h> 80 #include <vm/vm_extern.h> 81 #include <vm/vm_kern.h> 82 #include <vm/vm_page.h> 83 #include <vm/vm_map.h> 84 #include <vm/vm_param.h> 85 86 _Static_assert(OFFSETOF_MONITORBUF == offsetof(struct pcpu, pc_monitorbuf), 87 "OFFSETOF_MONITORBUF does not correspond with offset of pc_monitorbuf."); 88 89 void 90 set_top_of_stack_td(struct thread *td) 91 { 92 td->td_md.md_stack_base = td->td_kstack + 93 td->td_kstack_pages * PAGE_SIZE - 94 roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN); 95 } 96 97 struct savefpu * 98 get_pcb_user_save_td(struct thread *td) 99 { 100 vm_offset_t p; 101 102 p = td->td_md.md_stack_base; 103 KASSERT((p % XSAVE_AREA_ALIGN) == 0, 104 ("Unaligned pcb_user_save area ptr %#lx td %p", p, td)); 105 return ((struct savefpu *)p); 106 } 107 108 struct pcb * 109 get_pcb_td(struct thread *td) 110 { 111 112 return (&td->td_md.md_pcb); 113 } 114 115 struct savefpu * 116 get_pcb_user_save_pcb(struct pcb *pcb) 117 { 118 struct thread *td; 119 120 td = __containerof(pcb, struct thread, td_md.md_pcb); 121 return (get_pcb_user_save_td(td)); 122 } 123 124 void * 125 alloc_fpusave(int flags) 126 { 127 void *res; 128 struct savefpu_ymm *sf; 129 130 res = malloc(cpu_max_ext_state_size, M_DEVBUF, flags); 131 if (use_xsave) { 132 sf = (struct savefpu_ymm *)res; 133 bzero(&sf->sv_xstate.sx_hd, sizeof(sf->sv_xstate.sx_hd)); 134 sf->sv_xstate.sx_hd.xstate_bv = xsave_mask; 135 } 136 return (res); 137 } 138 139 /* 140 * Finish a fork operation, with process p2 nearly set up. 141 * Copy and update the pcb, set up the stack so that the child 142 * ready to run and return to user mode. 143 */ 144 void 145 cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags) 146 { 147 struct proc *p1; 148 struct pcb *pcb2; 149 struct mdproc *mdp1, *mdp2; 150 struct proc_ldt *pldt; 151 152 p1 = td1->td_proc; 153 if ((flags & RFPROC) == 0) { 154 if ((flags & RFMEM) == 0) { 155 /* unshare user LDT */ 156 mdp1 = &p1->p_md; 157 mtx_lock(&dt_lock); 158 if ((pldt = mdp1->md_ldt) != NULL && 159 pldt->ldt_refcnt > 1 && 160 user_ldt_alloc(p1, 1) == NULL) 161 panic("could not copy LDT"); 162 mtx_unlock(&dt_lock); 163 } 164 return; 165 } 166 167 /* Ensure that td1's pcb is up to date. */ 168 fpuexit(td1); 169 update_pcb_bases(td1->td_pcb); 170 171 /* Point the stack and pcb to the actual location */ 172 set_top_of_stack_td(td2); 173 td2->td_pcb = pcb2 = get_pcb_td(td2); 174 175 /* Copy td1's pcb */ 176 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2)); 177 178 /* Properly initialize pcb_save */ 179 pcb2->pcb_save = get_pcb_user_save_pcb(pcb2); 180 bcopy(get_pcb_user_save_td(td1), get_pcb_user_save_pcb(pcb2), 181 cpu_max_ext_state_size); 182 183 /* Point mdproc and then copy over td1's contents */ 184 mdp2 = &p2->p_md; 185 bcopy(&p1->p_md, mdp2, sizeof(*mdp2)); 186 187 /* 188 * Create a new fresh stack for the new process. 189 * Copy the trap frame for the return to user mode as if from a 190 * syscall. This copies most of the user mode register values. 191 */ 192 td2->td_frame = (struct trapframe *)td2->td_md.md_stack_base - 1; 193 bcopy(td1->td_frame, td2->td_frame, sizeof(struct trapframe)); 194 195 td2->td_frame->tf_rax = 0; /* Child returns zero */ 196 td2->td_frame->tf_rflags &= ~PSL_C; /* success */ 197 td2->td_frame->tf_rdx = 1; 198 199 /* 200 * If the parent process has the trap bit set (i.e. a debugger 201 * had single stepped the process to the system call), we need 202 * to clear the trap flag from the new frame. 203 */ 204 td2->td_frame->tf_rflags &= ~PSL_T; 205 206 /* 207 * Set registers for trampoline to user mode. Leave space for the 208 * return address on stack. These are the kernel mode register values. 209 */ 210 pcb2->pcb_r12 = (register_t)fork_return; /* fork_trampoline argument */ 211 pcb2->pcb_rbp = 0; 212 pcb2->pcb_rsp = (register_t)td2->td_frame - sizeof(void *); 213 pcb2->pcb_rbx = (register_t)td2; /* fork_trampoline argument */ 214 pcb2->pcb_rip = (register_t)fork_trampoline; 215 /*- 216 * pcb2->pcb_dr*: cloned above. 217 * pcb2->pcb_savefpu: cloned above. 218 * pcb2->pcb_flags: cloned above. 219 * pcb2->pcb_onfault: cloned above (always NULL here?). 220 * pcb2->pcb_[fg]sbase: cloned above 221 */ 222 223 /* Setup to release spin count in fork_exit(). */ 224 td2->td_md.md_spinlock_count = 1; 225 td2->td_md.md_saved_flags = PSL_KERNEL | PSL_I; 226 pmap_thread_init_invl_gen(td2); 227 228 /* As an i386, do not copy io permission bitmap. */ 229 pcb2->pcb_tssp = NULL; 230 231 /* New segment registers. */ 232 set_pcb_flags_raw(pcb2, PCB_FULL_IRET); 233 234 /* Copy the LDT, if necessary. */ 235 mdp1 = &td1->td_proc->p_md; 236 mdp2 = &p2->p_md; 237 if (mdp1->md_ldt == NULL) { 238 mdp2->md_ldt = NULL; 239 return; 240 } 241 mtx_lock(&dt_lock); 242 if (mdp1->md_ldt != NULL) { 243 if (flags & RFMEM) { 244 mdp1->md_ldt->ldt_refcnt++; 245 mdp2->md_ldt = mdp1->md_ldt; 246 bcopy(&mdp1->md_ldt_sd, &mdp2->md_ldt_sd, sizeof(struct 247 system_segment_descriptor)); 248 } else { 249 mdp2->md_ldt = NULL; 250 mdp2->md_ldt = user_ldt_alloc(p2, 0); 251 if (mdp2->md_ldt == NULL) 252 panic("could not copy LDT"); 253 amd64_set_ldt_data(td2, 0, max_ldt_segment, 254 (struct user_segment_descriptor *) 255 mdp1->md_ldt->ldt_base); 256 } 257 } else 258 mdp2->md_ldt = NULL; 259 mtx_unlock(&dt_lock); 260 261 /* 262 * Now, cpu_switch() can schedule the new process. 263 * pcb_rsp is loaded pointing to the cpu_switch() stack frame 264 * containing the return address when exiting cpu_switch. 265 * This will normally be to fork_trampoline(), which will have 266 * %ebx loaded with the new proc's pointer. fork_trampoline() 267 * will set up a stack to call fork_return(p, frame); to complete 268 * the return to user-mode. 269 */ 270 } 271 272 /* 273 * Intercept the return address from a freshly forked process that has NOT 274 * been scheduled yet. 275 * 276 * This is needed to make kernel threads stay in kernel mode. 277 */ 278 void 279 cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg) 280 { 281 /* 282 * Note that the trap frame follows the args, so the function 283 * is really called like this: func(arg, frame); 284 */ 285 td->td_pcb->pcb_r12 = (long) func; /* function */ 286 td->td_pcb->pcb_rbx = (long) arg; /* first arg */ 287 } 288 289 void 290 cpu_exit(struct thread *td) 291 { 292 293 /* 294 * If this process has a custom LDT, release it. 295 */ 296 if (td->td_proc->p_md.md_ldt != NULL) 297 user_ldt_free(td); 298 } 299 300 void 301 cpu_thread_exit(struct thread *td) 302 { 303 struct pcb *pcb; 304 305 critical_enter(); 306 if (td == PCPU_GET(fpcurthread)) 307 fpudrop(); 308 critical_exit(); 309 310 pcb = td->td_pcb; 311 312 /* Disable any hardware breakpoints. */ 313 if (pcb->pcb_flags & PCB_DBREGS) { 314 reset_dbregs(); 315 clear_pcb_flags(pcb, PCB_DBREGS); 316 } 317 } 318 319 void 320 cpu_thread_clean(struct thread *td) 321 { 322 struct pcb *pcb; 323 324 pcb = td->td_pcb; 325 326 /* 327 * Clean TSS/iomap 328 */ 329 if (pcb->pcb_tssp != NULL) { 330 pmap_pti_remove_kva((vm_offset_t)pcb->pcb_tssp, 331 (vm_offset_t)pcb->pcb_tssp + ctob(IOPAGES + 1)); 332 kmem_free((vm_offset_t)pcb->pcb_tssp, ctob(IOPAGES + 1)); 333 pcb->pcb_tssp = NULL; 334 } 335 } 336 337 void 338 cpu_thread_swapin(struct thread *td) 339 { 340 } 341 342 void 343 cpu_thread_swapout(struct thread *td) 344 { 345 } 346 347 void 348 cpu_thread_alloc(struct thread *td) 349 { 350 struct pcb *pcb; 351 struct xstate_hdr *xhdr; 352 353 set_top_of_stack_td(td); 354 td->td_pcb = pcb = get_pcb_td(td); 355 td->td_frame = (struct trapframe *)td->td_md.md_stack_base - 1; 356 pcb->pcb_save = get_pcb_user_save_pcb(pcb); 357 if (use_xsave) { 358 xhdr = (struct xstate_hdr *)(pcb->pcb_save + 1); 359 bzero(xhdr, sizeof(*xhdr)); 360 xhdr->xstate_bv = xsave_mask; 361 } 362 } 363 364 void 365 cpu_thread_free(struct thread *td) 366 { 367 368 cpu_thread_clean(td); 369 } 370 371 bool 372 cpu_exec_vmspace_reuse(struct proc *p, vm_map_t map) 373 { 374 375 return (((curproc->p_md.md_flags & P_MD_KPTI) != 0) == 376 (vm_map_pmap(map)->pm_ucr3 != PMAP_NO_CR3)); 377 } 378 379 static void 380 cpu_procctl_kpti(struct proc *p, int com, int *val) 381 { 382 383 if (com == PROC_KPTI_CTL) { 384 if (pti && *val == PROC_KPTI_CTL_ENABLE_ON_EXEC) 385 p->p_md.md_flags |= P_MD_KPTI; 386 if (*val == PROC_KPTI_CTL_DISABLE_ON_EXEC) 387 p->p_md.md_flags &= ~P_MD_KPTI; 388 } else /* PROC_KPTI_STATUS */ { 389 *val = (p->p_md.md_flags & P_MD_KPTI) != 0 ? 390 PROC_KPTI_CTL_ENABLE_ON_EXEC: 391 PROC_KPTI_CTL_DISABLE_ON_EXEC; 392 if (vmspace_pmap(p->p_vmspace)->pm_ucr3 != PMAP_NO_CR3) 393 *val |= PROC_KPTI_STATUS_ACTIVE; 394 } 395 } 396 397 int 398 cpu_procctl(struct thread *td, int idtype, id_t id, int com, void *data) 399 { 400 struct proc *p; 401 int error, val; 402 403 switch (com) { 404 case PROC_KPTI_CTL: 405 case PROC_KPTI_STATUS: 406 if (idtype != P_PID) { 407 error = EINVAL; 408 break; 409 } 410 if (com == PROC_KPTI_CTL) { 411 /* sad but true and not a joke */ 412 error = priv_check(td, PRIV_IO); 413 if (error != 0) 414 break; 415 error = copyin(data, &val, sizeof(val)); 416 if (error != 0) 417 break; 418 if (val != PROC_KPTI_CTL_ENABLE_ON_EXEC && 419 val != PROC_KPTI_CTL_DISABLE_ON_EXEC) { 420 error = EINVAL; 421 break; 422 } 423 } 424 error = pget(id, PGET_CANSEE | PGET_NOTWEXIT | PGET_NOTID, &p); 425 if (error == 0) { 426 cpu_procctl_kpti(p, com, &val); 427 PROC_UNLOCK(p); 428 if (com == PROC_KPTI_STATUS) 429 error = copyout(&val, data, sizeof(val)); 430 } 431 break; 432 default: 433 error = EINVAL; 434 break; 435 } 436 return (error); 437 } 438 439 void 440 cpu_set_syscall_retval(struct thread *td, int error) 441 { 442 struct trapframe *frame; 443 444 frame = td->td_frame; 445 if (__predict_true(error == 0)) { 446 frame->tf_rax = td->td_retval[0]; 447 frame->tf_rdx = td->td_retval[1]; 448 frame->tf_rflags &= ~PSL_C; 449 return; 450 } 451 452 switch (error) { 453 case ERESTART: 454 /* 455 * Reconstruct pc, we know that 'syscall' is 2 bytes, 456 * lcall $X,y is 7 bytes, int 0x80 is 2 bytes. 457 * We saved this in tf_err. 458 * %r10 (which was holding the value of %rcx) is restored 459 * for the next iteration. 460 * %r10 restore is only required for freebsd/amd64 processes, 461 * but shall be innocent for any ia32 ABI. 462 * 463 * Require full context restore to get the arguments 464 * in the registers reloaded at return to usermode. 465 */ 466 frame->tf_rip -= frame->tf_err; 467 frame->tf_r10 = frame->tf_rcx; 468 set_pcb_flags(td->td_pcb, PCB_FULL_IRET); 469 break; 470 471 case EJUSTRETURN: 472 break; 473 474 default: 475 frame->tf_rax = SV_ABI_ERRNO(td->td_proc, error); 476 frame->tf_rflags |= PSL_C; 477 break; 478 } 479 } 480 481 /* 482 * Initialize machine state, mostly pcb and trap frame for a new 483 * thread, about to return to userspace. Put enough state in the new 484 * thread's PCB to get it to go back to the fork_return(), which 485 * finalizes the thread state and handles peculiarities of the first 486 * return to userspace for the new thread. 487 */ 488 void 489 cpu_copy_thread(struct thread *td, struct thread *td0) 490 { 491 struct pcb *pcb2; 492 493 pcb2 = td->td_pcb; 494 495 /* 496 * Copy the upcall pcb. This loads kernel regs. 497 * Those not loaded individually below get their default 498 * values here. 499 */ 500 update_pcb_bases(td0->td_pcb); 501 bcopy(td0->td_pcb, pcb2, sizeof(*pcb2)); 502 clear_pcb_flags(pcb2, PCB_FPUINITDONE | PCB_USERFPUINITDONE | 503 PCB_KERNFPU); 504 pcb2->pcb_save = get_pcb_user_save_pcb(pcb2); 505 bcopy(get_pcb_user_save_td(td0), pcb2->pcb_save, 506 cpu_max_ext_state_size); 507 set_pcb_flags_raw(pcb2, PCB_FULL_IRET); 508 509 /* 510 * Create a new fresh stack for the new thread. 511 */ 512 bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe)); 513 514 /* If the current thread has the trap bit set (i.e. a debugger had 515 * single stepped the process to the system call), we need to clear 516 * the trap flag from the new frame. Otherwise, the new thread will 517 * receive a (likely unexpected) SIGTRAP when it executes the first 518 * instruction after returning to userland. 519 */ 520 td->td_frame->tf_rflags &= ~PSL_T; 521 522 /* 523 * Set registers for trampoline to user mode. Leave space for the 524 * return address on stack. These are the kernel mode register values. 525 */ 526 pcb2->pcb_r12 = (register_t)fork_return; /* trampoline arg */ 527 pcb2->pcb_rbp = 0; 528 pcb2->pcb_rsp = (register_t)td->td_frame - sizeof(void *); /* trampoline arg */ 529 pcb2->pcb_rbx = (register_t)td; /* trampoline arg */ 530 pcb2->pcb_rip = (register_t)fork_trampoline; 531 /* 532 * If we didn't copy the pcb, we'd need to do the following registers: 533 * pcb2->pcb_dr*: cloned above. 534 * pcb2->pcb_savefpu: cloned above. 535 * pcb2->pcb_onfault: cloned above (always NULL here?). 536 * pcb2->pcb_[fg]sbase: cloned above 537 */ 538 539 /* Setup to release spin count in fork_exit(). */ 540 td->td_md.md_spinlock_count = 1; 541 td->td_md.md_saved_flags = PSL_KERNEL | PSL_I; 542 pmap_thread_init_invl_gen(td); 543 } 544 545 /* 546 * Set that machine state for performing an upcall that starts 547 * the entry function with the given argument. 548 */ 549 void 550 cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg, 551 stack_t *stack) 552 { 553 554 /* 555 * Do any extra cleaning that needs to be done. 556 * The thread may have optional components 557 * that are not present in a fresh thread. 558 * This may be a recycled thread so make it look 559 * as though it's newly allocated. 560 */ 561 cpu_thread_clean(td); 562 563 #ifdef COMPAT_FREEBSD32 564 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { 565 /* 566 * Set the trap frame to point at the beginning of the entry 567 * function. 568 */ 569 td->td_frame->tf_rbp = 0; 570 td->td_frame->tf_rsp = 571 (((uintptr_t)stack->ss_sp + stack->ss_size - 4) & ~0x0f) - 4; 572 td->td_frame->tf_rip = (uintptr_t)entry; 573 574 /* Return address sentinel value to stop stack unwinding. */ 575 suword32((void *)td->td_frame->tf_rsp, 0); 576 577 /* Pass the argument to the entry point. */ 578 suword32((void *)(td->td_frame->tf_rsp + sizeof(int32_t)), 579 (uint32_t)(uintptr_t)arg); 580 581 return; 582 } 583 #endif 584 585 /* 586 * Set the trap frame to point at the beginning of the uts 587 * function. 588 */ 589 td->td_frame->tf_rbp = 0; 590 td->td_frame->tf_rsp = 591 ((register_t)stack->ss_sp + stack->ss_size) & ~0x0f; 592 td->td_frame->tf_rsp -= 8; 593 td->td_frame->tf_rip = (register_t)entry; 594 td->td_frame->tf_ds = _udatasel; 595 td->td_frame->tf_es = _udatasel; 596 td->td_frame->tf_fs = _ufssel; 597 td->td_frame->tf_gs = _ugssel; 598 td->td_frame->tf_flags = TF_HASSEGS; 599 600 /* Return address sentinel value to stop stack unwinding. */ 601 suword((void *)td->td_frame->tf_rsp, 0); 602 603 /* Pass the argument to the entry point. */ 604 td->td_frame->tf_rdi = (register_t)arg; 605 } 606 607 int 608 cpu_set_user_tls(struct thread *td, void *tls_base) 609 { 610 struct pcb *pcb; 611 612 if ((u_int64_t)tls_base >= VM_MAXUSER_ADDRESS) 613 return (EINVAL); 614 615 pcb = td->td_pcb; 616 set_pcb_flags(pcb, PCB_FULL_IRET); 617 #ifdef COMPAT_FREEBSD32 618 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { 619 pcb->pcb_gsbase = (register_t)tls_base; 620 return (0); 621 } 622 #endif 623 pcb->pcb_fsbase = (register_t)tls_base; 624 return (0); 625 } 626 627 /* 628 * Software interrupt handler for queued VM system processing. 629 */ 630 void 631 swi_vm(void *dummy) 632 { 633 if (busdma_swi_pending != 0) 634 busdma_swi(); 635 } 636 637 /* 638 * Tell whether this address is in some physical memory region. 639 * Currently used by the kernel coredump code in order to avoid 640 * dumping the ``ISA memory hole'' which could cause indefinite hangs, 641 * or other unpredictable behaviour. 642 */ 643 644 int 645 is_physical_memory(vm_paddr_t addr) 646 { 647 648 #ifdef DEV_ISA 649 /* The ISA ``memory hole''. */ 650 if (addr >= 0xa0000 && addr < 0x100000) 651 return 0; 652 #endif 653 654 /* 655 * stuff other tests for known memory-mapped devices (PCI?) 656 * here 657 */ 658 659 return 1; 660 } 661