1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 2018 The FreeBSD Foundation 5 * Copyright (c) 1992 Terrence R. Lambert. 6 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * William Jolitz. 11 * 12 * Portions of this software were developed by A. Joseph Koshy under 13 * sponsorship from the FreeBSD Foundation and Google, Inc. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by the University of 26 * California, Berkeley and its contributors. 27 * 4. Neither the name of the University nor the names of its contributors 28 * may be used to endorse or promote products derived from this software 29 * without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41 * SUCH DAMAGE. 42 * 43 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 44 */ 45 46 #include <sys/cdefs.h> 47 #include "opt_cpu.h" 48 #include "opt_ddb.h" 49 #include "opt_kstack_pages.h" 50 51 #include <sys/param.h> 52 #include <sys/proc.h> 53 #include <sys/systm.h> 54 #include <sys/exec.h> 55 #include <sys/imgact.h> 56 #include <sys/kdb.h> 57 #include <sys/kernel.h> 58 #include <sys/ktr.h> 59 #include <sys/linker.h> 60 #include <sys/lock.h> 61 #include <sys/malloc.h> 62 #include <sys/mutex.h> 63 #include <sys/pcpu.h> 64 #include <sys/ptrace.h> 65 #include <sys/reg.h> 66 #include <sys/rwlock.h> 67 #include <sys/signalvar.h> 68 #include <sys/syscallsubr.h> 69 #include <sys/sysctl.h> 70 #include <sys/sysent.h> 71 #include <sys/sysproto.h> 72 #include <sys/ucontext.h> 73 #include <sys/vmmeter.h> 74 75 #include <vm/vm.h> 76 #include <vm/vm_param.h> 77 #include <vm/vm_extern.h> 78 #include <vm/vm_kern.h> 79 #include <vm/vm_page.h> 80 #include <vm/vm_map.h> 81 #include <vm/vm_object.h> 82 83 #ifdef DDB 84 #ifndef KDB 85 #error KDB must be enabled in order for DDB to work! 86 #endif 87 #include <ddb/ddb.h> 88 #include <ddb/db_sym.h> 89 #endif 90 91 #include <machine/cpu.h> 92 #include <machine/cputypes.h> 93 #include <machine/md_var.h> 94 #include <machine/pcb.h> 95 #include <machine/pcb_ext.h> 96 #include <machine/proc.h> 97 #include <machine/sigframe.h> 98 #include <machine/specialreg.h> 99 #include <machine/sysarch.h> 100 #include <machine/trap.h> 101 102 static void fpstate_drop(struct thread *td); 103 static void get_fpcontext(struct thread *td, mcontext_t *mcp, 104 char *xfpusave, size_t xfpusave_len); 105 static int set_fpcontext(struct thread *td, mcontext_t *mcp, 106 char *xfpustate, size_t xfpustate_len); 107 #ifdef COMPAT_43 108 static void osendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask); 109 #endif 110 #ifdef COMPAT_FREEBSD4 111 static void freebsd4_sendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask); 112 #endif 113 114 extern struct sysentvec elf32_freebsd_sysvec; 115 116 _Static_assert(sizeof(mcontext_t) == 640, "mcontext_t size incorrect"); 117 _Static_assert(sizeof(ucontext_t) == 704, "ucontext_t size incorrect"); 118 _Static_assert(sizeof(siginfo_t) == 64, "siginfo_t size incorrect"); 119 120 /* 121 * Send an interrupt to process. 122 * 123 * Stack is set up to allow sigcode stored at top to call routine, 124 * followed by call to sigreturn routine below. After sigreturn 125 * resets the signal mask, the stack, and the frame pointer, it 126 * returns to the user specified pc, psl. 127 */ 128 #ifdef COMPAT_43 129 static void 130 osendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) 131 { 132 struct osigframe sf, *fp; 133 struct proc *p; 134 struct thread *td; 135 struct sigacts *psp; 136 struct trapframe *regs; 137 int sig; 138 int oonstack; 139 140 td = curthread; 141 p = td->td_proc; 142 PROC_LOCK_ASSERT(p, MA_OWNED); 143 sig = ksi->ksi_signo; 144 psp = p->p_sigacts; 145 mtx_assert(&psp->ps_mtx, MA_OWNED); 146 regs = td->td_frame; 147 oonstack = sigonstack(regs->tf_esp); 148 149 /* Allocate space for the signal handler context. */ 150 if ((td->td_pflags & TDP_ALTSTACK) && !oonstack && 151 SIGISMEMBER(psp->ps_sigonstack, sig)) { 152 fp = (struct osigframe *)((uintptr_t)td->td_sigstk.ss_sp + 153 td->td_sigstk.ss_size - sizeof(struct osigframe)); 154 #if defined(COMPAT_43) 155 td->td_sigstk.ss_flags |= SS_ONSTACK; 156 #endif 157 } else 158 fp = (struct osigframe *)regs->tf_esp - 1; 159 160 /* Build the argument list for the signal handler. */ 161 sf.sf_signum = sig; 162 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc; 163 bzero(&sf.sf_siginfo, sizeof(sf.sf_siginfo)); 164 if (SIGISMEMBER(psp->ps_siginfo, sig)) { 165 /* Signal handler installed with SA_SIGINFO. */ 166 sf.sf_arg2 = (register_t)&fp->sf_siginfo; 167 sf.sf_siginfo.si_signo = sig; 168 sf.sf_siginfo.si_code = ksi->ksi_code; 169 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher; 170 sf.sf_addr = 0; 171 } else { 172 /* Old FreeBSD-style arguments. */ 173 sf.sf_arg2 = ksi->ksi_code; 174 sf.sf_addr = (register_t)ksi->ksi_addr; 175 sf.sf_ahu.sf_handler = catcher; 176 } 177 mtx_unlock(&psp->ps_mtx); 178 PROC_UNLOCK(p); 179 180 /* Save most if not all of trap frame. */ 181 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax; 182 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx; 183 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx; 184 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx; 185 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi; 186 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi; 187 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs; 188 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds; 189 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss; 190 sf.sf_siginfo.si_sc.sc_es = regs->tf_es; 191 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs; 192 sf.sf_siginfo.si_sc.sc_gs = rgs(); 193 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp; 194 195 /* Build the signal context to be used by osigreturn(). */ 196 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0; 197 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask); 198 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp; 199 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp; 200 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip; 201 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags; 202 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno; 203 sf.sf_siginfo.si_sc.sc_err = regs->tf_err; 204 205 /* 206 * If we're a vm86 process, we want to save the segment registers. 207 * We also change eflags to be our emulated eflags, not the actual 208 * eflags. 209 */ 210 if (regs->tf_eflags & PSL_VM) { 211 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */ 212 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 213 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86; 214 215 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs; 216 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs; 217 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es; 218 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds; 219 220 if (vm86->vm86_has_vme == 0) 221 sf.sf_siginfo.si_sc.sc_ps = 222 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 223 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 224 225 /* See sendsig() for comments. */ 226 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP); 227 } 228 229 /* 230 * Copy the sigframe out to the user's stack. 231 */ 232 if (copyout(&sf, fp, sizeof(*fp)) != 0) { 233 PROC_LOCK(p); 234 sigexit(td, SIGILL); 235 } 236 237 regs->tf_esp = (int)fp; 238 if (PROC_HAS_SHP(p)) { 239 regs->tf_eip = PROC_SIGCODE(p) + szsigcode - 240 szosigcode; 241 } else { 242 /* a.out sysentvec does not use shared page */ 243 regs->tf_eip = PROC_PS_STRINGS(p) - szosigcode; 244 } 245 regs->tf_eflags &= ~(PSL_T | PSL_D); 246 regs->tf_cs = _ucodesel; 247 regs->tf_ds = _udatasel; 248 regs->tf_es = _udatasel; 249 regs->tf_fs = _udatasel; 250 load_gs(_udatasel); 251 regs->tf_ss = _udatasel; 252 PROC_LOCK(p); 253 mtx_lock(&psp->ps_mtx); 254 } 255 #endif /* COMPAT_43 */ 256 257 #ifdef COMPAT_FREEBSD4 258 static void 259 freebsd4_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) 260 { 261 struct freebsd4_sigframe sf, *sfp; 262 struct proc *p; 263 struct thread *td; 264 struct sigacts *psp; 265 struct trapframe *regs; 266 int sig; 267 int oonstack; 268 269 td = curthread; 270 p = td->td_proc; 271 PROC_LOCK_ASSERT(p, MA_OWNED); 272 sig = ksi->ksi_signo; 273 psp = p->p_sigacts; 274 mtx_assert(&psp->ps_mtx, MA_OWNED); 275 regs = td->td_frame; 276 oonstack = sigonstack(regs->tf_esp); 277 278 /* Save user context. */ 279 bzero(&sf, sizeof(sf)); 280 sf.sf_uc.uc_sigmask = *mask; 281 sf.sf_uc.uc_stack = td->td_sigstk; 282 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) 283 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 284 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; 285 sf.sf_uc.uc_mcontext.mc_gs = rgs(); 286 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs)); 287 bzero(sf.sf_uc.uc_mcontext.mc_fpregs, 288 sizeof(sf.sf_uc.uc_mcontext.mc_fpregs)); 289 bzero(sf.sf_uc.uc_mcontext.__spare__, 290 sizeof(sf.sf_uc.uc_mcontext.__spare__)); 291 bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__)); 292 293 /* Allocate space for the signal handler context. */ 294 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && 295 SIGISMEMBER(psp->ps_sigonstack, sig)) { 296 sfp = (struct freebsd4_sigframe *)((uintptr_t)td->td_sigstk.ss_sp + 297 td->td_sigstk.ss_size - sizeof(struct freebsd4_sigframe)); 298 #if defined(COMPAT_43) 299 td->td_sigstk.ss_flags |= SS_ONSTACK; 300 #endif 301 } else 302 sfp = (struct freebsd4_sigframe *)regs->tf_esp - 1; 303 304 /* Build the argument list for the signal handler. */ 305 sf.sf_signum = sig; 306 sf.sf_ucontext = (register_t)&sfp->sf_uc; 307 bzero(&sf.sf_si, sizeof(sf.sf_si)); 308 if (SIGISMEMBER(psp->ps_siginfo, sig)) { 309 /* Signal handler installed with SA_SIGINFO. */ 310 sf.sf_siginfo = (register_t)&sfp->sf_si; 311 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 312 313 /* Fill in POSIX parts */ 314 sf.sf_si.si_signo = sig; 315 sf.sf_si.si_code = ksi->ksi_code; 316 sf.sf_si.si_addr = ksi->ksi_addr; 317 } else { 318 /* Old FreeBSD-style arguments. */ 319 sf.sf_siginfo = ksi->ksi_code; 320 sf.sf_addr = (register_t)ksi->ksi_addr; 321 sf.sf_ahu.sf_handler = catcher; 322 } 323 mtx_unlock(&psp->ps_mtx); 324 PROC_UNLOCK(p); 325 326 /* 327 * If we're a vm86 process, we want to save the segment registers. 328 * We also change eflags to be our emulated eflags, not the actual 329 * eflags. 330 */ 331 if (regs->tf_eflags & PSL_VM) { 332 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 333 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86; 334 335 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs; 336 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs; 337 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es; 338 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds; 339 340 if (vm86->vm86_has_vme == 0) 341 sf.sf_uc.uc_mcontext.mc_eflags = 342 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 343 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 344 345 /* 346 * Clear PSL_NT to inhibit T_TSSFLT faults on return from 347 * syscalls made by the signal handler. This just avoids 348 * wasting time for our lazy fixup of such faults. PSL_NT 349 * does nothing in vm86 mode, but vm86 programs can set it 350 * almost legitimately in probes for old cpu types. 351 */ 352 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP); 353 } 354 355 /* 356 * Copy the sigframe out to the user's stack. 357 */ 358 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) { 359 PROC_LOCK(p); 360 sigexit(td, SIGILL); 361 } 362 363 regs->tf_esp = (int)sfp; 364 regs->tf_eip = PROC_SIGCODE(p) + szsigcode - 365 szfreebsd4_sigcode; 366 regs->tf_eflags &= ~(PSL_T | PSL_D); 367 regs->tf_cs = _ucodesel; 368 regs->tf_ds = _udatasel; 369 regs->tf_es = _udatasel; 370 regs->tf_fs = _udatasel; 371 regs->tf_ss = _udatasel; 372 PROC_LOCK(p); 373 mtx_lock(&psp->ps_mtx); 374 } 375 #endif /* COMPAT_FREEBSD4 */ 376 377 void 378 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) 379 { 380 struct sigframe sf, *sfp; 381 struct proc *p; 382 struct thread *td; 383 struct sigacts *psp; 384 char *sp; 385 struct trapframe *regs; 386 struct segment_descriptor *sdp; 387 char *xfpusave; 388 size_t xfpusave_len; 389 int sig; 390 int oonstack; 391 392 td = curthread; 393 p = td->td_proc; 394 PROC_LOCK_ASSERT(p, MA_OWNED); 395 sig = ksi->ksi_signo; 396 psp = p->p_sigacts; 397 mtx_assert(&psp->ps_mtx, MA_OWNED); 398 #ifdef COMPAT_FREEBSD4 399 if (SIGISMEMBER(psp->ps_freebsd4, sig)) { 400 freebsd4_sendsig(catcher, ksi, mask); 401 return; 402 } 403 #endif 404 #ifdef COMPAT_43 405 if (SIGISMEMBER(psp->ps_osigset, sig)) { 406 osendsig(catcher, ksi, mask); 407 return; 408 } 409 #endif 410 regs = td->td_frame; 411 oonstack = sigonstack(regs->tf_esp); 412 413 if (cpu_max_ext_state_size > sizeof(union savefpu) && use_xsave) { 414 xfpusave_len = cpu_max_ext_state_size - sizeof(union savefpu); 415 xfpusave = __builtin_alloca(xfpusave_len); 416 } else { 417 xfpusave_len = 0; 418 xfpusave = NULL; 419 } 420 421 /* Save user context. */ 422 bzero(&sf, sizeof(sf)); 423 sf.sf_uc.uc_sigmask = *mask; 424 sf.sf_uc.uc_stack = td->td_sigstk; 425 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) 426 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 427 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; 428 sf.sf_uc.uc_mcontext.mc_gs = rgs(); 429 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs)); 430 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */ 431 get_fpcontext(td, &sf.sf_uc.uc_mcontext, xfpusave, xfpusave_len); 432 fpstate_drop(td); 433 /* 434 * Unconditionally fill the fsbase and gsbase into the mcontext. 435 */ 436 sdp = &td->td_pcb->pcb_fsd; 437 sf.sf_uc.uc_mcontext.mc_fsbase = sdp->sd_hibase << 24 | 438 sdp->sd_lobase; 439 sdp = &td->td_pcb->pcb_gsd; 440 sf.sf_uc.uc_mcontext.mc_gsbase = sdp->sd_hibase << 24 | 441 sdp->sd_lobase; 442 bzero(sf.sf_uc.uc_mcontext.mc_spare2, 443 sizeof(sf.sf_uc.uc_mcontext.mc_spare2)); 444 445 /* Allocate space for the signal handler context. */ 446 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && 447 SIGISMEMBER(psp->ps_sigonstack, sig)) { 448 sp = (char *)td->td_sigstk.ss_sp + td->td_sigstk.ss_size; 449 #if defined(COMPAT_43) 450 td->td_sigstk.ss_flags |= SS_ONSTACK; 451 #endif 452 } else 453 sp = (char *)regs->tf_esp - 128; 454 if (xfpusave != NULL) { 455 sp -= xfpusave_len; 456 sp = (char *)((unsigned int)sp & ~0x3F); 457 sf.sf_uc.uc_mcontext.mc_xfpustate = (register_t)sp; 458 } 459 sp -= sizeof(struct sigframe); 460 461 /* Align to 16 bytes. */ 462 sfp = (struct sigframe *)((unsigned int)sp & ~0xF); 463 464 /* Build the argument list for the signal handler. */ 465 sf.sf_signum = sig; 466 sf.sf_ucontext = (register_t)&sfp->sf_uc; 467 bzero(&sf.sf_si, sizeof(sf.sf_si)); 468 if (SIGISMEMBER(psp->ps_siginfo, sig)) { 469 /* Signal handler installed with SA_SIGINFO. */ 470 sf.sf_siginfo = (register_t)&sfp->sf_si; 471 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 472 473 /* Fill in POSIX parts */ 474 sf.sf_si = ksi->ksi_info; 475 sf.sf_si.si_signo = sig; /* maybe a translated signal */ 476 } else { 477 /* Old FreeBSD-style arguments. */ 478 sf.sf_siginfo = ksi->ksi_code; 479 sf.sf_addr = (register_t)ksi->ksi_addr; 480 sf.sf_ahu.sf_handler = catcher; 481 } 482 mtx_unlock(&psp->ps_mtx); 483 PROC_UNLOCK(p); 484 485 /* 486 * If we're a vm86 process, we want to save the segment registers. 487 * We also change eflags to be our emulated eflags, not the actual 488 * eflags. 489 */ 490 if (regs->tf_eflags & PSL_VM) { 491 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 492 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86; 493 494 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs; 495 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs; 496 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es; 497 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds; 498 499 if (vm86->vm86_has_vme == 0) 500 sf.sf_uc.uc_mcontext.mc_eflags = 501 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 502 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 503 504 /* 505 * Clear PSL_NT to inhibit T_TSSFLT faults on return from 506 * syscalls made by the signal handler. This just avoids 507 * wasting time for our lazy fixup of such faults. PSL_NT 508 * does nothing in vm86 mode, but vm86 programs can set it 509 * almost legitimately in probes for old cpu types. 510 */ 511 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP); 512 } 513 514 /* 515 * Copy the sigframe out to the user's stack. 516 */ 517 if (copyout(&sf, sfp, sizeof(*sfp)) != 0 || 518 (xfpusave != NULL && copyout(xfpusave, 519 (void *)sf.sf_uc.uc_mcontext.mc_xfpustate, xfpusave_len) 520 != 0)) { 521 PROC_LOCK(p); 522 sigexit(td, SIGILL); 523 } 524 525 regs->tf_esp = (int)sfp; 526 regs->tf_eip = PROC_SIGCODE(p); 527 if (regs->tf_eip == 0) 528 regs->tf_eip = PROC_PS_STRINGS(p) - szsigcode; 529 regs->tf_eflags &= ~(PSL_T | PSL_D); 530 regs->tf_cs = _ucodesel; 531 regs->tf_ds = _udatasel; 532 regs->tf_es = _udatasel; 533 regs->tf_fs = _udatasel; 534 regs->tf_ss = _udatasel; 535 PROC_LOCK(p); 536 mtx_lock(&psp->ps_mtx); 537 } 538 539 /* 540 * System call to cleanup state after a signal has been taken. Reset 541 * signal mask and stack state from context left by sendsig (above). 542 * Return to previous pc and psl as specified by context left by 543 * sendsig. Check carefully to make sure that the user has not 544 * modified the state to gain improper privileges. 545 */ 546 #ifdef COMPAT_43 547 int 548 osigreturn(struct thread *td, struct osigreturn_args *uap) 549 { 550 struct osigcontext sc; 551 struct trapframe *regs; 552 struct osigcontext *scp; 553 int eflags, error; 554 ksiginfo_t ksi; 555 556 regs = td->td_frame; 557 error = copyin(uap->sigcntxp, &sc, sizeof(sc)); 558 if (error != 0) 559 return (error); 560 scp = ≻ 561 eflags = scp->sc_ps; 562 if (eflags & PSL_VM) { 563 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 564 struct vm86_kernel *vm86; 565 566 /* 567 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 568 * set up the vm86 area, and we can't enter vm86 mode. 569 */ 570 if (td->td_pcb->pcb_ext == 0) 571 return (EINVAL); 572 vm86 = &td->td_pcb->pcb_ext->ext_vm86; 573 if (vm86->vm86_inited == 0) 574 return (EINVAL); 575 576 /* Go back to user mode if both flags are set. */ 577 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) { 578 ksiginfo_init_trap(&ksi); 579 ksi.ksi_signo = SIGBUS; 580 ksi.ksi_code = BUS_OBJERR; 581 ksi.ksi_addr = (void *)regs->tf_eip; 582 trapsignal(td, &ksi); 583 } 584 585 if (vm86->vm86_has_vme) { 586 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 587 (eflags & VME_USERCHANGE) | PSL_VM; 588 } else { 589 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 590 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | 591 (eflags & VM_USERCHANGE) | PSL_VM; 592 } 593 tf->tf_vm86_ds = scp->sc_ds; 594 tf->tf_vm86_es = scp->sc_es; 595 tf->tf_vm86_fs = scp->sc_fs; 596 tf->tf_vm86_gs = scp->sc_gs; 597 tf->tf_ds = _udatasel; 598 tf->tf_es = _udatasel; 599 tf->tf_fs = _udatasel; 600 } else { 601 /* 602 * Don't allow users to change privileged or reserved flags. 603 */ 604 if (!EFL_SECURE(eflags, regs->tf_eflags)) { 605 return (EINVAL); 606 } 607 608 /* 609 * Don't allow users to load a valid privileged %cs. Let the 610 * hardware check for invalid selectors, excess privilege in 611 * other selectors, invalid %eip's and invalid %esp's. 612 */ 613 if (!CS_SECURE(scp->sc_cs)) { 614 ksiginfo_init_trap(&ksi); 615 ksi.ksi_signo = SIGBUS; 616 ksi.ksi_code = BUS_OBJERR; 617 ksi.ksi_trapno = T_PROTFLT; 618 ksi.ksi_addr = (void *)regs->tf_eip; 619 trapsignal(td, &ksi); 620 return (EINVAL); 621 } 622 regs->tf_ds = scp->sc_ds; 623 regs->tf_es = scp->sc_es; 624 regs->tf_fs = scp->sc_fs; 625 } 626 627 /* Restore remaining registers. */ 628 regs->tf_eax = scp->sc_eax; 629 regs->tf_ebx = scp->sc_ebx; 630 regs->tf_ecx = scp->sc_ecx; 631 regs->tf_edx = scp->sc_edx; 632 regs->tf_esi = scp->sc_esi; 633 regs->tf_edi = scp->sc_edi; 634 regs->tf_cs = scp->sc_cs; 635 regs->tf_ss = scp->sc_ss; 636 regs->tf_isp = scp->sc_isp; 637 regs->tf_ebp = scp->sc_fp; 638 regs->tf_esp = scp->sc_sp; 639 regs->tf_eip = scp->sc_pc; 640 regs->tf_eflags = eflags; 641 regs->tf_trapno = T_RESERVED; 642 643 #if defined(COMPAT_43) 644 if (scp->sc_onstack & 1) 645 td->td_sigstk.ss_flags |= SS_ONSTACK; 646 else 647 td->td_sigstk.ss_flags &= ~SS_ONSTACK; 648 #endif 649 kern_sigprocmask(td, SIG_SETMASK, (sigset_t *)&scp->sc_mask, NULL, 650 SIGPROCMASK_OLD); 651 return (EJUSTRETURN); 652 } 653 #endif /* COMPAT_43 */ 654 655 #ifdef COMPAT_FREEBSD4 656 int 657 freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap) 658 { 659 struct freebsd4_ucontext uc; 660 struct trapframe *regs; 661 struct freebsd4_ucontext *ucp; 662 int cs, eflags, error; 663 ksiginfo_t ksi; 664 665 error = copyin(uap->sigcntxp, &uc, sizeof(uc)); 666 if (error != 0) 667 return (error); 668 ucp = &uc; 669 regs = td->td_frame; 670 eflags = ucp->uc_mcontext.mc_eflags; 671 if (eflags & PSL_VM) { 672 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 673 struct vm86_kernel *vm86; 674 675 /* 676 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 677 * set up the vm86 area, and we can't enter vm86 mode. 678 */ 679 if (td->td_pcb->pcb_ext == 0) 680 return (EINVAL); 681 vm86 = &td->td_pcb->pcb_ext->ext_vm86; 682 if (vm86->vm86_inited == 0) 683 return (EINVAL); 684 685 /* Go back to user mode if both flags are set. */ 686 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) { 687 ksiginfo_init_trap(&ksi); 688 ksi.ksi_signo = SIGBUS; 689 ksi.ksi_code = BUS_OBJERR; 690 ksi.ksi_addr = (void *)regs->tf_eip; 691 trapsignal(td, &ksi); 692 } 693 if (vm86->vm86_has_vme) { 694 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 695 (eflags & VME_USERCHANGE) | PSL_VM; 696 } else { 697 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 698 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | 699 (eflags & VM_USERCHANGE) | PSL_VM; 700 } 701 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe)); 702 tf->tf_eflags = eflags; 703 tf->tf_vm86_ds = tf->tf_ds; 704 tf->tf_vm86_es = tf->tf_es; 705 tf->tf_vm86_fs = tf->tf_fs; 706 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs; 707 tf->tf_ds = _udatasel; 708 tf->tf_es = _udatasel; 709 tf->tf_fs = _udatasel; 710 } else { 711 /* 712 * Don't allow users to change privileged or reserved flags. 713 */ 714 if (!EFL_SECURE(eflags, regs->tf_eflags)) { 715 uprintf( 716 "pid %d (%s): freebsd4_sigreturn eflags = 0x%x\n", 717 td->td_proc->p_pid, td->td_name, eflags); 718 return (EINVAL); 719 } 720 721 /* 722 * Don't allow users to load a valid privileged %cs. Let the 723 * hardware check for invalid selectors, excess privilege in 724 * other selectors, invalid %eip's and invalid %esp's. 725 */ 726 cs = ucp->uc_mcontext.mc_cs; 727 if (!CS_SECURE(cs)) { 728 uprintf("pid %d (%s): freebsd4_sigreturn cs = 0x%x\n", 729 td->td_proc->p_pid, td->td_name, cs); 730 ksiginfo_init_trap(&ksi); 731 ksi.ksi_signo = SIGBUS; 732 ksi.ksi_code = BUS_OBJERR; 733 ksi.ksi_trapno = T_PROTFLT; 734 ksi.ksi_addr = (void *)regs->tf_eip; 735 trapsignal(td, &ksi); 736 return (EINVAL); 737 } 738 739 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs)); 740 } 741 regs->tf_trapno = T_RESERVED; 742 743 #if defined(COMPAT_43) 744 if (ucp->uc_mcontext.mc_onstack & 1) 745 td->td_sigstk.ss_flags |= SS_ONSTACK; 746 else 747 td->td_sigstk.ss_flags &= ~SS_ONSTACK; 748 #endif 749 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0); 750 return (EJUSTRETURN); 751 } 752 #endif /* COMPAT_FREEBSD4 */ 753 754 int 755 sys_sigreturn(struct thread *td, struct sigreturn_args *uap) 756 { 757 ucontext_t uc; 758 struct proc *p; 759 struct trapframe *regs; 760 ucontext_t *ucp; 761 char *xfpustate; 762 size_t xfpustate_len; 763 int cs, eflags, error, ret; 764 ksiginfo_t ksi; 765 766 p = td->td_proc; 767 768 error = copyin(uap->sigcntxp, &uc, sizeof(uc)); 769 if (error != 0) 770 return (error); 771 ucp = &uc; 772 if ((ucp->uc_mcontext.mc_flags & ~_MC_FLAG_MASK) != 0) { 773 uprintf("pid %d (%s): sigreturn mc_flags %x\n", p->p_pid, 774 td->td_name, ucp->uc_mcontext.mc_flags); 775 return (EINVAL); 776 } 777 regs = td->td_frame; 778 eflags = ucp->uc_mcontext.mc_eflags; 779 if (eflags & PSL_VM) { 780 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 781 struct vm86_kernel *vm86; 782 783 /* 784 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 785 * set up the vm86 area, and we can't enter vm86 mode. 786 */ 787 if (td->td_pcb->pcb_ext == 0) 788 return (EINVAL); 789 vm86 = &td->td_pcb->pcb_ext->ext_vm86; 790 if (vm86->vm86_inited == 0) 791 return (EINVAL); 792 793 /* Go back to user mode if both flags are set. */ 794 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) { 795 ksiginfo_init_trap(&ksi); 796 ksi.ksi_signo = SIGBUS; 797 ksi.ksi_code = BUS_OBJERR; 798 ksi.ksi_addr = (void *)regs->tf_eip; 799 trapsignal(td, &ksi); 800 } 801 802 if (vm86->vm86_has_vme) { 803 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 804 (eflags & VME_USERCHANGE) | PSL_VM; 805 } else { 806 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 807 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | 808 (eflags & VM_USERCHANGE) | PSL_VM; 809 } 810 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe)); 811 tf->tf_eflags = eflags; 812 tf->tf_vm86_ds = tf->tf_ds; 813 tf->tf_vm86_es = tf->tf_es; 814 tf->tf_vm86_fs = tf->tf_fs; 815 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs; 816 tf->tf_ds = _udatasel; 817 tf->tf_es = _udatasel; 818 tf->tf_fs = _udatasel; 819 } else { 820 /* 821 * Don't allow users to change privileged or reserved flags. 822 */ 823 if (!EFL_SECURE(eflags, regs->tf_eflags)) { 824 uprintf("pid %d (%s): sigreturn eflags = 0x%x\n", 825 td->td_proc->p_pid, td->td_name, eflags); 826 return (EINVAL); 827 } 828 829 /* 830 * Don't allow users to load a valid privileged %cs. Let the 831 * hardware check for invalid selectors, excess privilege in 832 * other selectors, invalid %eip's and invalid %esp's. 833 */ 834 cs = ucp->uc_mcontext.mc_cs; 835 if (!CS_SECURE(cs)) { 836 uprintf("pid %d (%s): sigreturn cs = 0x%x\n", 837 td->td_proc->p_pid, td->td_name, cs); 838 ksiginfo_init_trap(&ksi); 839 ksi.ksi_signo = SIGBUS; 840 ksi.ksi_code = BUS_OBJERR; 841 ksi.ksi_trapno = T_PROTFLT; 842 ksi.ksi_addr = (void *)regs->tf_eip; 843 trapsignal(td, &ksi); 844 return (EINVAL); 845 } 846 847 if ((uc.uc_mcontext.mc_flags & _MC_HASFPXSTATE) != 0) { 848 xfpustate_len = uc.uc_mcontext.mc_xfpustate_len; 849 if (xfpustate_len > cpu_max_ext_state_size - 850 sizeof(union savefpu)) { 851 uprintf( 852 "pid %d (%s): sigreturn xfpusave_len = 0x%zx\n", 853 p->p_pid, td->td_name, xfpustate_len); 854 return (EINVAL); 855 } 856 xfpustate = __builtin_alloca(xfpustate_len); 857 error = copyin( 858 (const void *)uc.uc_mcontext.mc_xfpustate, 859 xfpustate, xfpustate_len); 860 if (error != 0) { 861 uprintf( 862 "pid %d (%s): sigreturn copying xfpustate failed\n", 863 p->p_pid, td->td_name); 864 return (error); 865 } 866 } else { 867 xfpustate = NULL; 868 xfpustate_len = 0; 869 } 870 ret = set_fpcontext(td, &ucp->uc_mcontext, xfpustate, 871 xfpustate_len); 872 if (ret != 0) 873 return (ret); 874 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs)); 875 } 876 regs->tf_trapno = T_RESERVED; 877 878 #if defined(COMPAT_43) 879 if (ucp->uc_mcontext.mc_onstack & 1) 880 td->td_sigstk.ss_flags |= SS_ONSTACK; 881 else 882 td->td_sigstk.ss_flags &= ~SS_ONSTACK; 883 #endif 884 885 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0); 886 return (EJUSTRETURN); 887 } 888 889 /* 890 * Reset the hardware debug registers if they were in use. 891 * They won't have any meaning for the newly exec'd process. 892 */ 893 void 894 x86_clear_dbregs(struct pcb *pcb) 895 { 896 if ((pcb->pcb_flags & PCB_DBREGS) == 0) 897 return; 898 899 pcb->pcb_dr0 = 0; 900 pcb->pcb_dr1 = 0; 901 pcb->pcb_dr2 = 0; 902 pcb->pcb_dr3 = 0; 903 pcb->pcb_dr6 = 0; 904 pcb->pcb_dr7 = 0; 905 906 if (pcb == curpcb) { 907 /* 908 * Clear the debug registers on the running CPU, 909 * otherwise they will end up affecting the next 910 * process we switch to. 911 */ 912 reset_dbregs(); 913 } 914 pcb->pcb_flags &= ~PCB_DBREGS; 915 } 916 917 #ifdef COMPAT_43 918 static void 919 setup_priv_lcall_gate(struct proc *p) 920 { 921 struct i386_ldt_args uap; 922 union descriptor desc; 923 u_int lcall_addr; 924 925 bzero(&uap, sizeof(uap)); 926 uap.start = 0; 927 uap.num = 1; 928 lcall_addr = p->p_sysent->sv_psstrings - sz_lcall_tramp; 929 bzero(&desc, sizeof(desc)); 930 desc.sd.sd_type = SDT_MEMERA; 931 desc.sd.sd_dpl = SEL_UPL; 932 desc.sd.sd_p = 1; 933 desc.sd.sd_def32 = 1; 934 desc.sd.sd_gran = 1; 935 desc.sd.sd_lolimit = 0xffff; 936 desc.sd.sd_hilimit = 0xf; 937 desc.sd.sd_lobase = lcall_addr; 938 desc.sd.sd_hibase = lcall_addr >> 24; 939 i386_set_ldt(curthread, &uap, &desc); 940 } 941 #endif 942 943 /* 944 * Reset registers to default values on exec. 945 */ 946 void 947 exec_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack) 948 { 949 struct trapframe *regs; 950 struct pcb *pcb; 951 register_t saved_eflags; 952 953 regs = td->td_frame; 954 pcb = td->td_pcb; 955 956 /* Reset pc->pcb_gs and %gs before possibly invalidating it. */ 957 pcb->pcb_gs = _udatasel; 958 load_gs(_udatasel); 959 960 mtx_lock_spin(&dt_lock); 961 if (td->td_proc->p_md.md_ldt != NULL) 962 user_ldt_free(td); 963 else 964 mtx_unlock_spin(&dt_lock); 965 966 #ifdef COMPAT_43 967 if (td->td_proc->p_sysent->sv_psstrings != 968 elf32_freebsd_sysvec.sv_psstrings) 969 setup_priv_lcall_gate(td->td_proc); 970 #endif 971 972 /* 973 * Reset the fs and gs bases. The values from the old address 974 * space do not make sense for the new program. In particular, 975 * gsbase might be the TLS base for the old program but the new 976 * program has no TLS now. 977 */ 978 set_fsbase(td, 0); 979 set_gsbase(td, 0); 980 981 /* Make sure edx is 0x0 on entry. Linux binaries depend on it. */ 982 saved_eflags = regs->tf_eflags & PSL_T; 983 bzero((char *)regs, sizeof(struct trapframe)); 984 regs->tf_eip = imgp->entry_addr; 985 regs->tf_esp = stack; 986 regs->tf_eflags = PSL_USER | saved_eflags; 987 regs->tf_ss = _udatasel; 988 regs->tf_ds = _udatasel; 989 regs->tf_es = _udatasel; 990 regs->tf_fs = _udatasel; 991 regs->tf_cs = _ucodesel; 992 993 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */ 994 regs->tf_ebx = (register_t)imgp->ps_strings; 995 996 x86_clear_dbregs(pcb); 997 998 pcb->pcb_initial_npxcw = __INITIAL_NPXCW__; 999 1000 /* 1001 * Drop the FP state if we hold it, so that the process gets a 1002 * clean FP state if it uses the FPU again. 1003 */ 1004 fpstate_drop(td); 1005 } 1006 1007 int 1008 fill_regs(struct thread *td, struct reg *regs) 1009 { 1010 struct pcb *pcb; 1011 struct trapframe *tp; 1012 1013 tp = td->td_frame; 1014 pcb = td->td_pcb; 1015 regs->r_gs = pcb->pcb_gs; 1016 return (fill_frame_regs(tp, regs)); 1017 } 1018 1019 int 1020 fill_frame_regs(struct trapframe *tp, struct reg *regs) 1021 { 1022 1023 regs->r_fs = tp->tf_fs; 1024 regs->r_es = tp->tf_es; 1025 regs->r_ds = tp->tf_ds; 1026 regs->r_edi = tp->tf_edi; 1027 regs->r_esi = tp->tf_esi; 1028 regs->r_ebp = tp->tf_ebp; 1029 regs->r_ebx = tp->tf_ebx; 1030 regs->r_edx = tp->tf_edx; 1031 regs->r_ecx = tp->tf_ecx; 1032 regs->r_eax = tp->tf_eax; 1033 regs->r_eip = tp->tf_eip; 1034 regs->r_cs = tp->tf_cs; 1035 regs->r_eflags = tp->tf_eflags; 1036 regs->r_esp = tp->tf_esp; 1037 regs->r_ss = tp->tf_ss; 1038 regs->r_err = 0; 1039 regs->r_trapno = 0; 1040 return (0); 1041 } 1042 1043 int 1044 set_regs(struct thread *td, struct reg *regs) 1045 { 1046 struct pcb *pcb; 1047 struct trapframe *tp; 1048 1049 tp = td->td_frame; 1050 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) || 1051 !CS_SECURE(regs->r_cs)) 1052 return (EINVAL); 1053 pcb = td->td_pcb; 1054 tp->tf_fs = regs->r_fs; 1055 tp->tf_es = regs->r_es; 1056 tp->tf_ds = regs->r_ds; 1057 tp->tf_edi = regs->r_edi; 1058 tp->tf_esi = regs->r_esi; 1059 tp->tf_ebp = regs->r_ebp; 1060 tp->tf_ebx = regs->r_ebx; 1061 tp->tf_edx = regs->r_edx; 1062 tp->tf_ecx = regs->r_ecx; 1063 tp->tf_eax = regs->r_eax; 1064 tp->tf_eip = regs->r_eip; 1065 tp->tf_cs = regs->r_cs; 1066 tp->tf_eflags = regs->r_eflags; 1067 tp->tf_esp = regs->r_esp; 1068 tp->tf_ss = regs->r_ss; 1069 pcb->pcb_gs = regs->r_gs; 1070 return (0); 1071 } 1072 1073 int 1074 fill_fpregs(struct thread *td, struct fpreg *fpregs) 1075 { 1076 1077 KASSERT(td == curthread || TD_IS_SUSPENDED(td) || 1078 P_SHOULDSTOP(td->td_proc), 1079 ("not suspended thread %p", td)); 1080 npxgetregs(td); 1081 if (cpu_fxsr) 1082 npx_fill_fpregs_xmm(&get_pcb_user_save_td(td)->sv_xmm, 1083 (struct save87 *)fpregs); 1084 else 1085 bcopy(&get_pcb_user_save_td(td)->sv_87, fpregs, 1086 sizeof(*fpregs)); 1087 return (0); 1088 } 1089 1090 int 1091 set_fpregs(struct thread *td, struct fpreg *fpregs) 1092 { 1093 1094 critical_enter(); 1095 if (cpu_fxsr) 1096 npx_set_fpregs_xmm((struct save87 *)fpregs, 1097 &get_pcb_user_save_td(td)->sv_xmm); 1098 else 1099 bcopy(fpregs, &get_pcb_user_save_td(td)->sv_87, 1100 sizeof(*fpregs)); 1101 npxuserinited(td); 1102 critical_exit(); 1103 return (0); 1104 } 1105 1106 /* 1107 * Get machine context. 1108 */ 1109 int 1110 get_mcontext(struct thread *td, mcontext_t *mcp, int flags) 1111 { 1112 struct trapframe *tp; 1113 struct segment_descriptor *sdp; 1114 1115 tp = td->td_frame; 1116 1117 PROC_LOCK(curthread->td_proc); 1118 mcp->mc_onstack = sigonstack(tp->tf_esp); 1119 PROC_UNLOCK(curthread->td_proc); 1120 mcp->mc_gs = td->td_pcb->pcb_gs; 1121 mcp->mc_fs = tp->tf_fs; 1122 mcp->mc_es = tp->tf_es; 1123 mcp->mc_ds = tp->tf_ds; 1124 mcp->mc_edi = tp->tf_edi; 1125 mcp->mc_esi = tp->tf_esi; 1126 mcp->mc_ebp = tp->tf_ebp; 1127 mcp->mc_isp = tp->tf_isp; 1128 mcp->mc_eflags = tp->tf_eflags; 1129 if (flags & GET_MC_CLEAR_RET) { 1130 mcp->mc_eax = 0; 1131 mcp->mc_edx = 0; 1132 mcp->mc_eflags &= ~PSL_C; 1133 } else { 1134 mcp->mc_eax = tp->tf_eax; 1135 mcp->mc_edx = tp->tf_edx; 1136 } 1137 mcp->mc_ebx = tp->tf_ebx; 1138 mcp->mc_ecx = tp->tf_ecx; 1139 mcp->mc_eip = tp->tf_eip; 1140 mcp->mc_cs = tp->tf_cs; 1141 mcp->mc_esp = tp->tf_esp; 1142 mcp->mc_ss = tp->tf_ss; 1143 mcp->mc_len = sizeof(*mcp); 1144 get_fpcontext(td, mcp, NULL, 0); 1145 sdp = &td->td_pcb->pcb_fsd; 1146 mcp->mc_fsbase = sdp->sd_hibase << 24 | sdp->sd_lobase; 1147 sdp = &td->td_pcb->pcb_gsd; 1148 mcp->mc_gsbase = sdp->sd_hibase << 24 | sdp->sd_lobase; 1149 mcp->mc_flags = 0; 1150 mcp->mc_xfpustate = 0; 1151 mcp->mc_xfpustate_len = 0; 1152 bzero(mcp->mc_spare2, sizeof(mcp->mc_spare2)); 1153 return (0); 1154 } 1155 1156 /* 1157 * Set machine context. 1158 * 1159 * However, we don't set any but the user modifiable flags, and we won't 1160 * touch the cs selector. 1161 */ 1162 int 1163 set_mcontext(struct thread *td, mcontext_t *mcp) 1164 { 1165 struct trapframe *tp; 1166 char *xfpustate; 1167 int eflags, ret; 1168 1169 tp = td->td_frame; 1170 if (mcp->mc_len != sizeof(*mcp) || 1171 (mcp->mc_flags & ~_MC_FLAG_MASK) != 0) 1172 return (EINVAL); 1173 eflags = (mcp->mc_eflags & PSL_USERCHANGE) | 1174 (tp->tf_eflags & ~PSL_USERCHANGE); 1175 if (mcp->mc_flags & _MC_HASFPXSTATE) { 1176 if (mcp->mc_xfpustate_len > cpu_max_ext_state_size - 1177 sizeof(union savefpu)) 1178 return (EINVAL); 1179 xfpustate = __builtin_alloca(mcp->mc_xfpustate_len); 1180 ret = copyin((void *)mcp->mc_xfpustate, xfpustate, 1181 mcp->mc_xfpustate_len); 1182 if (ret != 0) 1183 return (ret); 1184 } else 1185 xfpustate = NULL; 1186 ret = set_fpcontext(td, mcp, xfpustate, mcp->mc_xfpustate_len); 1187 if (ret != 0) 1188 return (ret); 1189 tp->tf_fs = mcp->mc_fs; 1190 tp->tf_es = mcp->mc_es; 1191 tp->tf_ds = mcp->mc_ds; 1192 tp->tf_edi = mcp->mc_edi; 1193 tp->tf_esi = mcp->mc_esi; 1194 tp->tf_ebp = mcp->mc_ebp; 1195 tp->tf_ebx = mcp->mc_ebx; 1196 tp->tf_edx = mcp->mc_edx; 1197 tp->tf_ecx = mcp->mc_ecx; 1198 tp->tf_eax = mcp->mc_eax; 1199 tp->tf_eip = mcp->mc_eip; 1200 tp->tf_eflags = eflags; 1201 tp->tf_esp = mcp->mc_esp; 1202 tp->tf_ss = mcp->mc_ss; 1203 td->td_pcb->pcb_gs = mcp->mc_gs; 1204 return (0); 1205 } 1206 1207 static void 1208 get_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpusave, 1209 size_t xfpusave_len) 1210 { 1211 size_t max_len, len; 1212 1213 mcp->mc_ownedfp = npxgetregs(td); 1214 bcopy(get_pcb_user_save_td(td), &mcp->mc_fpstate[0], 1215 sizeof(mcp->mc_fpstate)); 1216 mcp->mc_fpformat = npxformat(); 1217 if (!use_xsave || xfpusave_len == 0) 1218 return; 1219 max_len = cpu_max_ext_state_size - sizeof(union savefpu); 1220 len = xfpusave_len; 1221 if (len > max_len) { 1222 len = max_len; 1223 bzero(xfpusave + max_len, len - max_len); 1224 } 1225 mcp->mc_flags |= _MC_HASFPXSTATE; 1226 mcp->mc_xfpustate_len = len; 1227 bcopy(get_pcb_user_save_td(td) + 1, xfpusave, len); 1228 } 1229 1230 static int 1231 set_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpustate, 1232 size_t xfpustate_len) 1233 { 1234 int error; 1235 1236 if (mcp->mc_fpformat == _MC_FPFMT_NODEV) 1237 return (0); 1238 else if (mcp->mc_fpformat != _MC_FPFMT_387 && 1239 mcp->mc_fpformat != _MC_FPFMT_XMM) 1240 return (EINVAL); 1241 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE) { 1242 /* We don't care what state is left in the FPU or PCB. */ 1243 fpstate_drop(td); 1244 error = 0; 1245 } else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU || 1246 mcp->mc_ownedfp == _MC_FPOWNED_PCB) { 1247 error = npxsetregs(td, (union savefpu *)&mcp->mc_fpstate, 1248 xfpustate, xfpustate_len); 1249 } else 1250 return (EINVAL); 1251 return (error); 1252 } 1253 1254 static void 1255 fpstate_drop(struct thread *td) 1256 { 1257 1258 KASSERT(PCB_USER_FPU(td->td_pcb), ("fpstate_drop: kernel-owned fpu")); 1259 critical_enter(); 1260 if (PCPU_GET(fpcurthread) == td) 1261 npxdrop(); 1262 /* 1263 * XXX force a full drop of the npx. The above only drops it if we 1264 * owned it. npxgetregs() has the same bug in the !cpu_fxsr case. 1265 * 1266 * XXX I don't much like npxgetregs()'s semantics of doing a full 1267 * drop. Dropping only to the pcb matches fnsave's behaviour. 1268 * We only need to drop to !PCB_INITDONE in sendsig(). But 1269 * sendsig() is the only caller of npxgetregs()... perhaps we just 1270 * have too many layers. 1271 */ 1272 curthread->td_pcb->pcb_flags &= ~(PCB_NPXINITDONE | 1273 PCB_NPXUSERINITDONE); 1274 critical_exit(); 1275 } 1276 1277 int 1278 fill_dbregs(struct thread *td, struct dbreg *dbregs) 1279 { 1280 struct pcb *pcb; 1281 1282 if (td == NULL) { 1283 dbregs->dr[0] = rdr0(); 1284 dbregs->dr[1] = rdr1(); 1285 dbregs->dr[2] = rdr2(); 1286 dbregs->dr[3] = rdr3(); 1287 dbregs->dr[6] = rdr6(); 1288 dbregs->dr[7] = rdr7(); 1289 } else { 1290 pcb = td->td_pcb; 1291 dbregs->dr[0] = pcb->pcb_dr0; 1292 dbregs->dr[1] = pcb->pcb_dr1; 1293 dbregs->dr[2] = pcb->pcb_dr2; 1294 dbregs->dr[3] = pcb->pcb_dr3; 1295 dbregs->dr[6] = pcb->pcb_dr6; 1296 dbregs->dr[7] = pcb->pcb_dr7; 1297 } 1298 dbregs->dr[4] = 0; 1299 dbregs->dr[5] = 0; 1300 return (0); 1301 } 1302 1303 int 1304 set_dbregs(struct thread *td, struct dbreg *dbregs) 1305 { 1306 struct pcb *pcb; 1307 int i; 1308 1309 if (td == NULL) { 1310 load_dr0(dbregs->dr[0]); 1311 load_dr1(dbregs->dr[1]); 1312 load_dr2(dbregs->dr[2]); 1313 load_dr3(dbregs->dr[3]); 1314 load_dr6(dbregs->dr[6]); 1315 load_dr7(dbregs->dr[7]); 1316 } else { 1317 /* 1318 * Don't let an illegal value for dr7 get set. Specifically, 1319 * check for undefined settings. Setting these bit patterns 1320 * result in undefined behaviour and can lead to an unexpected 1321 * TRCTRAP. 1322 */ 1323 for (i = 0; i < 4; i++) { 1324 if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02) 1325 return (EINVAL); 1326 if (DBREG_DR7_LEN(dbregs->dr[7], i) == 0x02) 1327 return (EINVAL); 1328 } 1329 1330 pcb = td->td_pcb; 1331 1332 /* 1333 * Don't let a process set a breakpoint that is not within the 1334 * process's address space. If a process could do this, it 1335 * could halt the system by setting a breakpoint in the kernel 1336 * (if ddb was enabled). Thus, we need to check to make sure 1337 * that no breakpoints are being enabled for addresses outside 1338 * process's address space. 1339 * 1340 * XXX - what about when the watched area of the user's 1341 * address space is written into from within the kernel 1342 * ... wouldn't that still cause a breakpoint to be generated 1343 * from within kernel mode? 1344 */ 1345 1346 if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) { 1347 /* dr0 is enabled */ 1348 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS) 1349 return (EINVAL); 1350 } 1351 1352 if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) { 1353 /* dr1 is enabled */ 1354 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS) 1355 return (EINVAL); 1356 } 1357 1358 if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) { 1359 /* dr2 is enabled */ 1360 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS) 1361 return (EINVAL); 1362 } 1363 1364 if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) { 1365 /* dr3 is enabled */ 1366 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS) 1367 return (EINVAL); 1368 } 1369 1370 pcb->pcb_dr0 = dbregs->dr[0]; 1371 pcb->pcb_dr1 = dbregs->dr[1]; 1372 pcb->pcb_dr2 = dbregs->dr[2]; 1373 pcb->pcb_dr3 = dbregs->dr[3]; 1374 pcb->pcb_dr6 = dbregs->dr[6]; 1375 pcb->pcb_dr7 = dbregs->dr[7]; 1376 1377 pcb->pcb_flags |= PCB_DBREGS; 1378 } 1379 1380 return (0); 1381 } 1382 1383 /* 1384 * Return > 0 if a hardware breakpoint has been hit, and the 1385 * breakpoint was in user space. Return 0, otherwise. 1386 */ 1387 int 1388 user_dbreg_trap(register_t dr6) 1389 { 1390 u_int32_t dr7; 1391 u_int32_t bp; /* breakpoint bits extracted from dr6 */ 1392 int nbp; /* number of breakpoints that triggered */ 1393 caddr_t addr[4]; /* breakpoint addresses */ 1394 int i; 1395 1396 bp = dr6 & DBREG_DR6_BMASK; 1397 if (bp == 0) { 1398 /* 1399 * None of the breakpoint bits are set meaning this 1400 * trap was not caused by any of the debug registers 1401 */ 1402 return (0); 1403 } 1404 1405 dr7 = rdr7(); 1406 if ((dr7 & 0x000000ff) == 0) { 1407 /* 1408 * all GE and LE bits in the dr7 register are zero, 1409 * thus the trap couldn't have been caused by the 1410 * hardware debug registers 1411 */ 1412 return (0); 1413 } 1414 1415 nbp = 0; 1416 1417 /* 1418 * at least one of the breakpoints were hit, check to see 1419 * which ones and if any of them are user space addresses 1420 */ 1421 1422 if (bp & 0x01) { 1423 addr[nbp++] = (caddr_t)rdr0(); 1424 } 1425 if (bp & 0x02) { 1426 addr[nbp++] = (caddr_t)rdr1(); 1427 } 1428 if (bp & 0x04) { 1429 addr[nbp++] = (caddr_t)rdr2(); 1430 } 1431 if (bp & 0x08) { 1432 addr[nbp++] = (caddr_t)rdr3(); 1433 } 1434 1435 for (i = 0; i < nbp; i++) { 1436 if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) { 1437 /* 1438 * addr[i] is in user space 1439 */ 1440 return (nbp); 1441 } 1442 } 1443 1444 /* 1445 * None of the breakpoints are in user space. 1446 */ 1447 return (0); 1448 } 1449