1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 1982, 1986 The Regents of the University of California. 5 * Copyright (c) 1989, 1990 William Jolitz 6 * Copyright (c) 1994 John Dyson 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * the Systems Programming Group of the University of Utah Computer 11 * Science Department, and William Jolitz. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by the University of 24 * California, Berkeley and its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 42 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include "opt_isa.h" 49 #include "opt_cpu.h" 50 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/bio.h> 54 #include <sys/buf.h> 55 #include <sys/kernel.h> 56 #include <sys/ktr.h> 57 #include <sys/lock.h> 58 #include <sys/malloc.h> 59 #include <sys/mbuf.h> 60 #include <sys/mutex.h> 61 #include <sys/priv.h> 62 #include <sys/proc.h> 63 #include <sys/procctl.h> 64 #include <sys/smp.h> 65 #include <sys/sysctl.h> 66 #include <sys/sysent.h> 67 #include <sys/unistd.h> 68 #include <sys/vnode.h> 69 #include <sys/vmmeter.h> 70 #include <sys/wait.h> 71 72 #include <machine/cpu.h> 73 #include <machine/md_var.h> 74 #include <machine/pcb.h> 75 #include <machine/smp.h> 76 #include <machine/specialreg.h> 77 #include <machine/tss.h> 78 79 #include <vm/vm.h> 80 #include <vm/vm_extern.h> 81 #include <vm/vm_kern.h> 82 #include <vm/vm_page.h> 83 #include <vm/vm_map.h> 84 #include <vm/vm_param.h> 85 86 _Static_assert(OFFSETOF_MONITORBUF == offsetof(struct pcpu, pc_monitorbuf), 87 "OFFSETOF_MONITORBUF does not correspond with offset of pc_monitorbuf."); 88 89 void 90 set_top_of_stack_td(struct thread *td) 91 { 92 td->td_md.md_stack_base = td->td_kstack + 93 td->td_kstack_pages * PAGE_SIZE - 94 roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN); 95 } 96 97 struct savefpu * 98 get_pcb_user_save_td(struct thread *td) 99 { 100 vm_offset_t p; 101 102 p = td->td_md.md_stack_base; 103 KASSERT((p % XSAVE_AREA_ALIGN) == 0, 104 ("Unaligned pcb_user_save area ptr %#lx td %p", p, td)); 105 return ((struct savefpu *)p); 106 } 107 108 struct pcb * 109 get_pcb_td(struct thread *td) 110 { 111 112 return (&td->td_md.md_pcb); 113 } 114 115 struct savefpu * 116 get_pcb_user_save_pcb(struct pcb *pcb) 117 { 118 struct thread *td; 119 120 td = __containerof(pcb, struct thread, td_md.md_pcb); 121 return (get_pcb_user_save_td(td)); 122 } 123 124 void * 125 alloc_fpusave(int flags) 126 { 127 void *res; 128 struct savefpu_ymm *sf; 129 130 res = malloc(cpu_max_ext_state_size, M_DEVBUF, flags); 131 if (use_xsave) { 132 sf = (struct savefpu_ymm *)res; 133 bzero(&sf->sv_xstate.sx_hd, sizeof(sf->sv_xstate.sx_hd)); 134 sf->sv_xstate.sx_hd.xstate_bv = xsave_mask; 135 } 136 return (res); 137 } 138 139 /* 140 * Finish a fork operation, with process p2 nearly set up. 141 * Copy and update the pcb, set up the stack so that the child 142 * ready to run and return to user mode. 143 */ 144 void 145 cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags) 146 { 147 struct proc *p1; 148 struct pcb *pcb2; 149 struct mdproc *mdp1, *mdp2; 150 struct proc_ldt *pldt; 151 152 p1 = td1->td_proc; 153 if ((flags & RFPROC) == 0) { 154 if ((flags & RFMEM) == 0) { 155 /* unshare user LDT */ 156 mdp1 = &p1->p_md; 157 mtx_lock(&dt_lock); 158 if ((pldt = mdp1->md_ldt) != NULL && 159 pldt->ldt_refcnt > 1 && 160 user_ldt_alloc(p1, 1) == NULL) 161 panic("could not copy LDT"); 162 mtx_unlock(&dt_lock); 163 } 164 return; 165 } 166 167 /* Ensure that td1's pcb is up to date for user processes. */ 168 if ((td2->td_pflags & TDP_KTHREAD) == 0) { 169 MPASS(td1 == curthread); 170 fpuexit(td1); 171 update_pcb_bases(td1->td_pcb); 172 } 173 174 /* Point the stack and pcb to the actual location */ 175 set_top_of_stack_td(td2); 176 td2->td_pcb = pcb2 = get_pcb_td(td2); 177 178 /* Copy td1's pcb */ 179 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2)); 180 181 /* Properly initialize pcb_save */ 182 pcb2->pcb_save = get_pcb_user_save_pcb(pcb2); 183 184 /* Kernel processes start with clean FPU and segment bases. */ 185 if ((td2->td_pflags & TDP_KTHREAD) != 0) { 186 pcb2->pcb_fsbase = 0; 187 pcb2->pcb_gsbase = 0; 188 clear_pcb_flags(pcb2, PCB_FPUINITDONE | PCB_USERFPUINITDONE | 189 PCB_KERNFPU | PCB_KERNFPU_THR); 190 } else { 191 MPASS((pcb2->pcb_flags & (PCB_KERNFPU | PCB_KERNFPU_THR)) == 0); 192 bcopy(get_pcb_user_save_td(td1), get_pcb_user_save_pcb(pcb2), 193 cpu_max_ext_state_size); 194 } 195 196 /* Point mdproc and then copy over td1's contents */ 197 mdp2 = &p2->p_md; 198 bcopy(&p1->p_md, mdp2, sizeof(*mdp2)); 199 200 /* 201 * Create a new fresh stack for the new process. 202 * Copy the trap frame for the return to user mode as if from a 203 * syscall. This copies most of the user mode register values. 204 */ 205 td2->td_frame = (struct trapframe *)td2->td_md.md_stack_base - 1; 206 bcopy(td1->td_frame, td2->td_frame, sizeof(struct trapframe)); 207 208 td2->td_frame->tf_rax = 0; /* Child returns zero */ 209 td2->td_frame->tf_rflags &= ~PSL_C; /* success */ 210 td2->td_frame->tf_rdx = 1; 211 212 /* 213 * If the parent process has the trap bit set (i.e. a debugger 214 * had single stepped the process to the system call), we need 215 * to clear the trap flag from the new frame. 216 */ 217 td2->td_frame->tf_rflags &= ~PSL_T; 218 219 /* 220 * Set registers for trampoline to user mode. Leave space for the 221 * return address on stack. These are the kernel mode register values. 222 */ 223 pcb2->pcb_r12 = (register_t)fork_return; /* fork_trampoline argument */ 224 pcb2->pcb_rbp = 0; 225 pcb2->pcb_rsp = (register_t)td2->td_frame - sizeof(void *); 226 pcb2->pcb_rbx = (register_t)td2; /* fork_trampoline argument */ 227 pcb2->pcb_rip = (register_t)fork_trampoline; 228 /*- 229 * pcb2->pcb_dr*: cloned above. 230 * pcb2->pcb_savefpu: cloned above. 231 * pcb2->pcb_flags: cloned above. 232 * pcb2->pcb_onfault: cloned above (always NULL here?). 233 * pcb2->pcb_[fg]sbase: cloned above 234 */ 235 236 /* Setup to release spin count in fork_exit(). */ 237 td2->td_md.md_spinlock_count = 1; 238 td2->td_md.md_saved_flags = PSL_KERNEL | PSL_I; 239 pmap_thread_init_invl_gen(td2); 240 241 /* As an i386, do not copy io permission bitmap. */ 242 pcb2->pcb_tssp = NULL; 243 244 /* New segment registers. */ 245 set_pcb_flags_raw(pcb2, PCB_FULL_IRET); 246 247 /* Copy the LDT, if necessary. */ 248 mdp1 = &td1->td_proc->p_md; 249 mdp2 = &p2->p_md; 250 if (mdp1->md_ldt == NULL) { 251 mdp2->md_ldt = NULL; 252 return; 253 } 254 mtx_lock(&dt_lock); 255 if (mdp1->md_ldt != NULL) { 256 if (flags & RFMEM) { 257 mdp1->md_ldt->ldt_refcnt++; 258 mdp2->md_ldt = mdp1->md_ldt; 259 bcopy(&mdp1->md_ldt_sd, &mdp2->md_ldt_sd, sizeof(struct 260 system_segment_descriptor)); 261 } else { 262 mdp2->md_ldt = NULL; 263 mdp2->md_ldt = user_ldt_alloc(p2, 0); 264 if (mdp2->md_ldt == NULL) 265 panic("could not copy LDT"); 266 amd64_set_ldt_data(td2, 0, max_ldt_segment, 267 (struct user_segment_descriptor *) 268 mdp1->md_ldt->ldt_base); 269 } 270 } else 271 mdp2->md_ldt = NULL; 272 mtx_unlock(&dt_lock); 273 274 /* 275 * Now, cpu_switch() can schedule the new process. 276 * pcb_rsp is loaded pointing to the cpu_switch() stack frame 277 * containing the return address when exiting cpu_switch. 278 * This will normally be to fork_trampoline(), which will have 279 * %ebx loaded with the new proc's pointer. fork_trampoline() 280 * will set up a stack to call fork_return(p, frame); to complete 281 * the return to user-mode. 282 */ 283 } 284 285 /* 286 * Intercept the return address from a freshly forked process that has NOT 287 * been scheduled yet. 288 * 289 * This is needed to make kernel threads stay in kernel mode. 290 */ 291 void 292 cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg) 293 { 294 /* 295 * Note that the trap frame follows the args, so the function 296 * is really called like this: func(arg, frame); 297 */ 298 td->td_pcb->pcb_r12 = (long) func; /* function */ 299 td->td_pcb->pcb_rbx = (long) arg; /* first arg */ 300 } 301 302 void 303 cpu_exit(struct thread *td) 304 { 305 306 /* 307 * If this process has a custom LDT, release it. 308 */ 309 if (td->td_proc->p_md.md_ldt != NULL) 310 user_ldt_free(td); 311 } 312 313 void 314 cpu_thread_exit(struct thread *td) 315 { 316 struct pcb *pcb; 317 318 critical_enter(); 319 if (td == PCPU_GET(fpcurthread)) 320 fpudrop(); 321 critical_exit(); 322 323 pcb = td->td_pcb; 324 325 /* Disable any hardware breakpoints. */ 326 if (pcb->pcb_flags & PCB_DBREGS) { 327 reset_dbregs(); 328 clear_pcb_flags(pcb, PCB_DBREGS); 329 } 330 } 331 332 void 333 cpu_thread_clean(struct thread *td) 334 { 335 struct pcb *pcb; 336 337 pcb = td->td_pcb; 338 339 /* 340 * Clean TSS/iomap 341 */ 342 if (pcb->pcb_tssp != NULL) { 343 pmap_pti_remove_kva((vm_offset_t)pcb->pcb_tssp, 344 (vm_offset_t)pcb->pcb_tssp + ctob(IOPAGES + 1)); 345 kmem_free((vm_offset_t)pcb->pcb_tssp, ctob(IOPAGES + 1)); 346 pcb->pcb_tssp = NULL; 347 } 348 } 349 350 void 351 cpu_thread_swapin(struct thread *td) 352 { 353 } 354 355 void 356 cpu_thread_swapout(struct thread *td) 357 { 358 } 359 360 void 361 cpu_thread_alloc(struct thread *td) 362 { 363 struct pcb *pcb; 364 struct xstate_hdr *xhdr; 365 366 set_top_of_stack_td(td); 367 td->td_pcb = pcb = get_pcb_td(td); 368 td->td_frame = (struct trapframe *)td->td_md.md_stack_base - 1; 369 pcb->pcb_save = get_pcb_user_save_pcb(pcb); 370 if (use_xsave) { 371 xhdr = (struct xstate_hdr *)(pcb->pcb_save + 1); 372 bzero(xhdr, sizeof(*xhdr)); 373 xhdr->xstate_bv = xsave_mask; 374 } 375 } 376 377 void 378 cpu_thread_free(struct thread *td) 379 { 380 381 cpu_thread_clean(td); 382 } 383 384 bool 385 cpu_exec_vmspace_reuse(struct proc *p, vm_map_t map) 386 { 387 388 return (((curproc->p_md.md_flags & P_MD_KPTI) != 0) == 389 (vm_map_pmap(map)->pm_ucr3 != PMAP_NO_CR3)); 390 } 391 392 static void 393 cpu_procctl_kpti_ctl(struct proc *p, int val) 394 { 395 396 if (pti && val == PROC_KPTI_CTL_ENABLE_ON_EXEC) 397 p->p_md.md_flags |= P_MD_KPTI; 398 if (val == PROC_KPTI_CTL_DISABLE_ON_EXEC) 399 p->p_md.md_flags &= ~P_MD_KPTI; 400 } 401 402 static void 403 cpu_procctl_kpti_status(struct proc *p, int *val) 404 { 405 *val = (p->p_md.md_flags & P_MD_KPTI) != 0 ? 406 PROC_KPTI_CTL_ENABLE_ON_EXEC: 407 PROC_KPTI_CTL_DISABLE_ON_EXEC; 408 if (vmspace_pmap(p->p_vmspace)->pm_ucr3 != PMAP_NO_CR3) 409 *val |= PROC_KPTI_STATUS_ACTIVE; 410 } 411 412 static int 413 cpu_procctl_la_ctl(struct proc *p, int val) 414 { 415 int error; 416 417 error = 0; 418 switch (val) { 419 case PROC_LA_CTL_LA48_ON_EXEC: 420 p->p_md.md_flags |= P_MD_LA48; 421 p->p_md.md_flags &= ~P_MD_LA57; 422 break; 423 case PROC_LA_CTL_LA57_ON_EXEC: 424 if (la57) { 425 p->p_md.md_flags &= ~P_MD_LA48; 426 p->p_md.md_flags |= P_MD_LA57; 427 } else { 428 error = ENOTSUP; 429 } 430 break; 431 case PROC_LA_CTL_DEFAULT_ON_EXEC: 432 p->p_md.md_flags &= ~(P_MD_LA48 | P_MD_LA57); 433 break; 434 } 435 return (error); 436 } 437 438 static void 439 cpu_procctl_la_status(struct proc *p, int *val) 440 { 441 int res; 442 443 if ((p->p_md.md_flags & P_MD_LA48) != 0) 444 res = PROC_LA_CTL_LA48_ON_EXEC; 445 else if ((p->p_md.md_flags & P_MD_LA57) != 0) 446 res = PROC_LA_CTL_LA57_ON_EXEC; 447 else 448 res = PROC_LA_CTL_DEFAULT_ON_EXEC; 449 if (p->p_sysent->sv_maxuser == VM_MAXUSER_ADDRESS_LA48) 450 res |= PROC_LA_STATUS_LA48; 451 else 452 res |= PROC_LA_STATUS_LA57; 453 *val = res; 454 } 455 456 int 457 cpu_procctl(struct thread *td, int idtype, id_t id, int com, void *data) 458 { 459 struct proc *p; 460 int error, val; 461 462 switch (com) { 463 case PROC_KPTI_CTL: 464 case PROC_KPTI_STATUS: 465 case PROC_LA_CTL: 466 case PROC_LA_STATUS: 467 if (idtype != P_PID) { 468 error = EINVAL; 469 break; 470 } 471 if (com == PROC_KPTI_CTL) { 472 /* sad but true and not a joke */ 473 error = priv_check(td, PRIV_IO); 474 if (error != 0) 475 break; 476 } 477 if (com == PROC_KPTI_CTL || com == PROC_LA_CTL) { 478 error = copyin(data, &val, sizeof(val)); 479 if (error != 0) 480 break; 481 } 482 if (com == PROC_KPTI_CTL && 483 val != PROC_KPTI_CTL_ENABLE_ON_EXEC && 484 val != PROC_KPTI_CTL_DISABLE_ON_EXEC) { 485 error = EINVAL; 486 break; 487 } 488 if (com == PROC_LA_CTL && 489 val != PROC_LA_CTL_LA48_ON_EXEC && 490 val != PROC_LA_CTL_LA57_ON_EXEC && 491 val != PROC_LA_CTL_DEFAULT_ON_EXEC) { 492 error = EINVAL; 493 break; 494 } 495 error = pget(id, PGET_CANSEE | PGET_NOTWEXIT | PGET_NOTID, &p); 496 if (error != 0) 497 break; 498 switch (com) { 499 case PROC_KPTI_CTL: 500 cpu_procctl_kpti_ctl(p, val); 501 break; 502 case PROC_KPTI_STATUS: 503 cpu_procctl_kpti_status(p, &val); 504 break; 505 case PROC_LA_CTL: 506 error = cpu_procctl_la_ctl(p, val); 507 break; 508 case PROC_LA_STATUS: 509 cpu_procctl_la_status(p, &val); 510 break; 511 } 512 PROC_UNLOCK(p); 513 if (com == PROC_KPTI_STATUS || com == PROC_LA_STATUS) 514 error = copyout(&val, data, sizeof(val)); 515 break; 516 default: 517 error = EINVAL; 518 break; 519 } 520 return (error); 521 } 522 523 void 524 cpu_set_syscall_retval(struct thread *td, int error) 525 { 526 struct trapframe *frame; 527 528 frame = td->td_frame; 529 if (__predict_true(error == 0)) { 530 frame->tf_rax = td->td_retval[0]; 531 frame->tf_rdx = td->td_retval[1]; 532 frame->tf_rflags &= ~PSL_C; 533 return; 534 } 535 536 switch (error) { 537 case ERESTART: 538 /* 539 * Reconstruct pc, we know that 'syscall' is 2 bytes, 540 * lcall $X,y is 7 bytes, int 0x80 is 2 bytes. 541 * We saved this in tf_err. 542 * %r10 (which was holding the value of %rcx) is restored 543 * for the next iteration. 544 * %r10 restore is only required for freebsd/amd64 processes, 545 * but shall be innocent for any ia32 ABI. 546 * 547 * Require full context restore to get the arguments 548 * in the registers reloaded at return to usermode. 549 */ 550 frame->tf_rip -= frame->tf_err; 551 frame->tf_r10 = frame->tf_rcx; 552 set_pcb_flags(td->td_pcb, PCB_FULL_IRET); 553 break; 554 555 case EJUSTRETURN: 556 break; 557 558 default: 559 frame->tf_rax = error; 560 frame->tf_rflags |= PSL_C; 561 break; 562 } 563 } 564 565 /* 566 * Initialize machine state, mostly pcb and trap frame for a new 567 * thread, about to return to userspace. Put enough state in the new 568 * thread's PCB to get it to go back to the fork_return(), which 569 * finalizes the thread state and handles peculiarities of the first 570 * return to userspace for the new thread. 571 */ 572 void 573 cpu_copy_thread(struct thread *td, struct thread *td0) 574 { 575 struct pcb *pcb2; 576 577 pcb2 = td->td_pcb; 578 579 /* Ensure that td0's pcb is up to date for user threads. */ 580 if ((td->td_pflags & TDP_KTHREAD) == 0) { 581 MPASS(td0 == curthread); 582 fpuexit(td0); 583 update_pcb_bases(td0->td_pcb); 584 } 585 586 /* 587 * Copy the upcall pcb. This loads kernel regs. 588 * Those not loaded individually below get their default 589 * values here. 590 */ 591 bcopy(td0->td_pcb, pcb2, sizeof(*pcb2)); 592 pcb2->pcb_save = get_pcb_user_save_pcb(pcb2); 593 594 /* Kernel threads start with clean FPU and segment bases. */ 595 if ((td->td_pflags & TDP_KTHREAD) != 0) { 596 pcb2->pcb_fsbase = 0; 597 pcb2->pcb_gsbase = 0; 598 clear_pcb_flags(pcb2, PCB_FPUINITDONE | PCB_USERFPUINITDONE | 599 PCB_KERNFPU | PCB_KERNFPU_THR); 600 } else { 601 MPASS((pcb2->pcb_flags & (PCB_KERNFPU | PCB_KERNFPU_THR)) == 0); 602 bcopy(get_pcb_user_save_td(td0), pcb2->pcb_save, 603 cpu_max_ext_state_size); 604 } 605 set_pcb_flags_raw(pcb2, PCB_FULL_IRET); 606 607 608 /* 609 * Create a new fresh stack for the new thread. 610 */ 611 bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe)); 612 613 /* If the current thread has the trap bit set (i.e. a debugger had 614 * single stepped the process to the system call), we need to clear 615 * the trap flag from the new frame. Otherwise, the new thread will 616 * receive a (likely unexpected) SIGTRAP when it executes the first 617 * instruction after returning to userland. 618 */ 619 td->td_frame->tf_rflags &= ~PSL_T; 620 621 /* 622 * Set registers for trampoline to user mode. Leave space for the 623 * return address on stack. These are the kernel mode register values. 624 */ 625 pcb2->pcb_r12 = (register_t)fork_return; /* trampoline arg */ 626 pcb2->pcb_rbp = 0; 627 pcb2->pcb_rsp = (register_t)td->td_frame - sizeof(void *); /* trampoline arg */ 628 pcb2->pcb_rbx = (register_t)td; /* trampoline arg */ 629 pcb2->pcb_rip = (register_t)fork_trampoline; 630 /* 631 * If we didn't copy the pcb, we'd need to do the following registers: 632 * pcb2->pcb_dr*: cloned above. 633 * pcb2->pcb_savefpu: cloned above. 634 * pcb2->pcb_onfault: cloned above (always NULL here?). 635 * pcb2->pcb_[fg]sbase: cloned above 636 */ 637 638 /* Setup to release spin count in fork_exit(). */ 639 td->td_md.md_spinlock_count = 1; 640 td->td_md.md_saved_flags = PSL_KERNEL | PSL_I; 641 pmap_thread_init_invl_gen(td); 642 } 643 644 /* 645 * Set that machine state for performing an upcall that starts 646 * the entry function with the given argument. 647 */ 648 void 649 cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg, 650 stack_t *stack) 651 { 652 653 /* 654 * Do any extra cleaning that needs to be done. 655 * The thread may have optional components 656 * that are not present in a fresh thread. 657 * This may be a recycled thread so make it look 658 * as though it's newly allocated. 659 */ 660 cpu_thread_clean(td); 661 662 #ifdef COMPAT_FREEBSD32 663 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { 664 /* 665 * Set the trap frame to point at the beginning of the entry 666 * function. 667 */ 668 td->td_frame->tf_rbp = 0; 669 td->td_frame->tf_rsp = 670 (((uintptr_t)stack->ss_sp + stack->ss_size - 4) & ~0x0f) - 4; 671 td->td_frame->tf_rip = (uintptr_t)entry; 672 673 /* Return address sentinel value to stop stack unwinding. */ 674 suword32((void *)td->td_frame->tf_rsp, 0); 675 676 /* Pass the argument to the entry point. */ 677 suword32((void *)(td->td_frame->tf_rsp + sizeof(int32_t)), 678 (uint32_t)(uintptr_t)arg); 679 680 return; 681 } 682 #endif 683 684 /* 685 * Set the trap frame to point at the beginning of the uts 686 * function. 687 */ 688 td->td_frame->tf_rbp = 0; 689 td->td_frame->tf_rsp = 690 ((register_t)stack->ss_sp + stack->ss_size) & ~0x0f; 691 td->td_frame->tf_rsp -= 8; 692 td->td_frame->tf_rip = (register_t)entry; 693 td->td_frame->tf_ds = _udatasel; 694 td->td_frame->tf_es = _udatasel; 695 td->td_frame->tf_fs = _ufssel; 696 td->td_frame->tf_gs = _ugssel; 697 td->td_frame->tf_flags = TF_HASSEGS; 698 699 /* Return address sentinel value to stop stack unwinding. */ 700 suword((void *)td->td_frame->tf_rsp, 0); 701 702 /* Pass the argument to the entry point. */ 703 td->td_frame->tf_rdi = (register_t)arg; 704 } 705 706 int 707 cpu_set_user_tls(struct thread *td, void *tls_base) 708 { 709 struct pcb *pcb; 710 711 if ((u_int64_t)tls_base >= VM_MAXUSER_ADDRESS) 712 return (EINVAL); 713 714 pcb = td->td_pcb; 715 set_pcb_flags(pcb, PCB_FULL_IRET); 716 #ifdef COMPAT_FREEBSD32 717 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { 718 pcb->pcb_gsbase = (register_t)tls_base; 719 return (0); 720 } 721 #endif 722 pcb->pcb_fsbase = (register_t)tls_base; 723 return (0); 724 } 725 726 /* 727 * Software interrupt handler for queued VM system processing. 728 */ 729 void 730 swi_vm(void *dummy) 731 { 732 if (busdma_swi_pending != 0) 733 busdma_swi(); 734 } 735 736 /* 737 * Tell whether this address is in some physical memory region. 738 * Currently used by the kernel coredump code in order to avoid 739 * dumping the ``ISA memory hole'' which could cause indefinite hangs, 740 * or other unpredictable behaviour. 741 */ 742 743 int 744 is_physical_memory(vm_paddr_t addr) 745 { 746 747 #ifdef DEV_ISA 748 /* The ISA ``memory hole''. */ 749 if (addr >= 0xa0000 && addr < 0x100000) 750 return 0; 751 #endif 752 753 /* 754 * stuff other tests for known memory-mapped devices (PCI?) 755 * here 756 */ 757 758 return 1; 759 } 760