1 /* 2 * Copyright (c) 1994, Sean Eric Fagan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Sean Eric Fagan. 16 * 4. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * $FreeBSD: src/sys/kern/sys_process.c,v 1.51.2.6 2003/01/08 03:06:45 kan Exp $ 32 * $DragonFly: src/sys/kern/sys_process.c,v 1.30 2007/02/19 01:14:23 corecode Exp $ 33 */ 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/sysproto.h> 38 #include <sys/proc.h> 39 #include <sys/priv.h> 40 #include <sys/vnode.h> 41 #include <sys/ptrace.h> 42 #include <sys/reg.h> 43 #include <sys/lock.h> 44 45 #include <vm/vm.h> 46 #include <vm/pmap.h> 47 #include <vm/vm_map.h> 48 #include <vm/vm_page.h> 49 50 #include <sys/user.h> 51 #include <vfs/procfs/procfs.h> 52 53 #include <sys/thread2.h> 54 #include <sys/spinlock2.h> 55 56 /* use the equivalent procfs code */ 57 #if 0 58 static int 59 pread (struct proc *procp, unsigned int addr, unsigned int *retval) { 60 int rv; 61 vm_map_t map, tmap; 62 vm_object_t object; 63 vm_offset_t kva = 0; 64 int page_offset; /* offset into page */ 65 vm_offset_t pageno; /* page number */ 66 vm_map_entry_t out_entry; 67 vm_prot_t out_prot; 68 boolean_t wired; 69 vm_pindex_t pindex; 70 71 /* Map page into kernel space */ 72 73 map = &procp->p_vmspace->vm_map; 74 75 page_offset = addr - trunc_page(addr); 76 pageno = trunc_page(addr); 77 78 tmap = map; 79 rv = vm_map_lookup(&tmap, pageno, VM_PROT_READ, &out_entry, 80 &object, &pindex, &out_prot, &wired); 81 82 if (rv != KERN_SUCCESS) 83 return EINVAL; 84 85 vm_map_lookup_done (tmap, out_entry, 0); 86 87 /* Find space in kernel_map for the page we're interested in */ 88 rv = vm_map_find (&kernel_map, object, IDX_TO_OFF(pindex), 89 &kva, 90 PAGE_SIZE, PAGE_SIZE, 91 0, VM_MAPTYPE_NORMAL, 92 VM_PROT_ALL, VM_PROT_ALL, 93 0); 94 95 if (!rv) { 96 vm_object_reference XXX (object); 97 98 rv = vm_map_wire (&kernel_map, kva, kva + PAGE_SIZE, 0); 99 if (!rv) { 100 *retval = 0; 101 bcopy ((caddr_t)kva + page_offset, 102 retval, sizeof *retval); 103 } 104 vm_map_remove (&kernel_map, kva, kva + PAGE_SIZE); 105 } 106 107 return rv; 108 } 109 110 static int 111 pwrite (struct proc *procp, unsigned int addr, unsigned int datum) { 112 int rv; 113 vm_map_t map, tmap; 114 vm_object_t object; 115 vm_offset_t kva = 0; 116 int page_offset; /* offset into page */ 117 vm_offset_t pageno; /* page number */ 118 vm_map_entry_t out_entry; 119 vm_prot_t out_prot; 120 boolean_t wired; 121 vm_pindex_t pindex; 122 boolean_t fix_prot = 0; 123 124 /* Map page into kernel space */ 125 126 map = &procp->p_vmspace->vm_map; 127 128 page_offset = addr - trunc_page(addr); 129 pageno = trunc_page(addr); 130 131 /* 132 * Check the permissions for the area we're interested in. 133 */ 134 135 if (vm_map_check_protection (map, pageno, pageno + PAGE_SIZE, 136 VM_PROT_WRITE, FALSE) == FALSE) { 137 /* 138 * If the page was not writable, we make it so. 139 * XXX It is possible a page may *not* be read/executable, 140 * if a process changes that! 141 */ 142 fix_prot = 1; 143 /* The page isn't writable, so let's try making it so... */ 144 if ((rv = vm_map_protect (map, pageno, pageno + PAGE_SIZE, 145 VM_PROT_ALL, 0)) != KERN_SUCCESS) 146 return EFAULT; /* I guess... */ 147 } 148 149 /* 150 * Now we need to get the page. out_entry, out_prot, wired, and 151 * single_use aren't used. One would think the vm code would be 152 * a *bit* nicer... We use tmap because vm_map_lookup() can 153 * change the map argument. 154 */ 155 156 tmap = map; 157 rv = vm_map_lookup(&tmap, pageno, VM_PROT_WRITE, &out_entry, 158 &object, &pindex, &out_prot, &wired); 159 if (rv != KERN_SUCCESS) 160 return EINVAL; 161 162 /* 163 * Okay, we've got the page. Let's release tmap. 164 */ 165 vm_map_lookup_done (tmap, out_entry, 0); 166 167 /* 168 * Fault the page in... 169 */ 170 rv = vm_fault(map, pageno, VM_PROT_WRITE|VM_PROT_READ, FALSE); 171 if (rv != KERN_SUCCESS) 172 return EFAULT; 173 174 /* Find space in kernel_map for the page we're interested in */ 175 rv = vm_map_find (&kernel_map, object, IDX_TO_OFF(pindex), 176 &kva, 177 PAGE_SIZE, PAGE_SIZE, 178 0, VM_MAPTYPE_NORMAL, 179 VM_PROT_ALL, VM_PROT_ALL, 180 0); 181 if (!rv) { 182 vm_object_reference XXX (object); 183 184 rv = vm_map_wire (&kernel_map, kva, kva + PAGE_SIZE, 0); 185 if (!rv) { 186 bcopy (&datum, (caddr_t)kva + page_offset, sizeof datum); 187 } 188 vm_map_remove (&kernel_map, kva, kva + PAGE_SIZE); 189 } 190 191 if (fix_prot) 192 vm_map_protect (map, pageno, pageno + PAGE_SIZE, 193 VM_PROT_READ|VM_PROT_EXECUTE, 0); 194 return rv; 195 } 196 #endif 197 198 /* 199 * Process debugging system call. 200 * 201 * MPALMOSTSAFE 202 */ 203 int 204 sys_ptrace(struct ptrace_args *uap) 205 { 206 struct proc *p = curproc; 207 208 /* 209 * XXX this obfuscation is to reduce stack usage, but the register 210 * structs may be too large to put on the stack anyway. 211 */ 212 union { 213 struct ptrace_io_desc piod; 214 struct dbreg dbreg; 215 struct fpreg fpreg; 216 struct reg reg; 217 } r; 218 void *addr; 219 int error = 0; 220 221 addr = &r; 222 switch (uap->req) { 223 case PT_GETREGS: 224 case PT_GETFPREGS: 225 #ifdef PT_GETDBREGS 226 case PT_GETDBREGS: 227 #endif 228 break; 229 case PT_SETREGS: 230 error = copyin(uap->addr, &r.reg, sizeof r.reg); 231 break; 232 case PT_SETFPREGS: 233 error = copyin(uap->addr, &r.fpreg, sizeof r.fpreg); 234 break; 235 #ifdef PT_SETDBREGS 236 case PT_SETDBREGS: 237 error = copyin(uap->addr, &r.dbreg, sizeof r.dbreg); 238 break; 239 #endif 240 case PT_IO: 241 error = copyin(uap->addr, &r.piod, sizeof r.piod); 242 break; 243 default: 244 addr = uap->addr; 245 } 246 if (error) 247 return (error); 248 249 error = kern_ptrace(p, uap->req, uap->pid, addr, uap->data, 250 &uap->sysmsg_result); 251 if (error) 252 return (error); 253 254 switch (uap->req) { 255 case PT_IO: 256 (void)copyout(&r.piod, uap->addr, sizeof r.piod); 257 break; 258 case PT_GETREGS: 259 error = copyout(&r.reg, uap->addr, sizeof r.reg); 260 break; 261 case PT_GETFPREGS: 262 error = copyout(&r.fpreg, uap->addr, sizeof r.fpreg); 263 break; 264 #ifdef PT_GETDBREGS 265 case PT_GETDBREGS: 266 error = copyout(&r.dbreg, uap->addr, sizeof r.dbreg); 267 break; 268 #endif 269 } 270 271 return (error); 272 } 273 274 int 275 kern_ptrace(struct proc *curp, int req, pid_t pid, void *addr, 276 int data, int *res) 277 { 278 struct proc *p, *pp; 279 struct lwp *lp; 280 struct iovec iov; 281 struct uio uio; 282 struct ptrace_io_desc *piod; 283 int error = 0; 284 int write, tmp; 285 int t; 286 287 write = 0; 288 if (req == PT_TRACE_ME) { 289 p = curp; 290 PHOLD(p); 291 } else { 292 if ((p = pfind(pid)) == NULL) 293 return ESRCH; 294 } 295 if (!PRISON_CHECK(curp->p_ucred, p->p_ucred)) { 296 PRELE(p); 297 return (ESRCH); 298 } 299 if (p->p_flags & P_SYSTEM) { 300 PRELE(p); 301 return EINVAL; 302 } 303 304 lwkt_gettoken(&p->p_token); 305 /* Can't trace a process that's currently exec'ing. */ 306 if ((p->p_flags & P_INEXEC) != 0) { 307 lwkt_reltoken(&p->p_token); 308 PRELE(p); 309 return EAGAIN; 310 } 311 312 /* 313 * Permissions check 314 */ 315 switch (req) { 316 case PT_TRACE_ME: 317 /* Always legal. */ 318 break; 319 320 case PT_ATTACH: 321 /* Self */ 322 if (p->p_pid == curp->p_pid) { 323 lwkt_reltoken(&p->p_token); 324 PRELE(p); 325 return EINVAL; 326 } 327 328 /* Already traced */ 329 if (p->p_flags & P_TRACED) { 330 lwkt_reltoken(&p->p_token); 331 PRELE(p); 332 return EBUSY; 333 } 334 335 if (curp->p_flags & P_TRACED) 336 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) 337 if (pp == p) { 338 lwkt_reltoken(&p->p_token); 339 PRELE(p); 340 return (EINVAL); 341 } 342 343 /* not owned by you, has done setuid (unless you're root) */ 344 if ((p->p_ucred->cr_ruid != curp->p_ucred->cr_ruid) || 345 (p->p_flags & P_SUGID)) { 346 if ((error = priv_check_cred(curp->p_ucred, PRIV_ROOT, 0)) != 0) { 347 lwkt_reltoken(&p->p_token); 348 PRELE(p); 349 return error; 350 } 351 } 352 353 /* can't trace init when securelevel > 0 */ 354 if (securelevel > 0 && p->p_pid == 1) { 355 lwkt_reltoken(&p->p_token); 356 PRELE(p); 357 return EPERM; 358 } 359 360 /* OK */ 361 break; 362 363 case PT_READ_I: 364 case PT_READ_D: 365 case PT_WRITE_I: 366 case PT_WRITE_D: 367 case PT_IO: 368 case PT_CONTINUE: 369 case PT_KILL: 370 case PT_STEP: 371 case PT_DETACH: 372 #ifdef PT_GETREGS 373 case PT_GETREGS: 374 #endif 375 #ifdef PT_SETREGS 376 case PT_SETREGS: 377 #endif 378 #ifdef PT_GETFPREGS 379 case PT_GETFPREGS: 380 #endif 381 #ifdef PT_SETFPREGS 382 case PT_SETFPREGS: 383 #endif 384 #ifdef PT_GETDBREGS 385 case PT_GETDBREGS: 386 #endif 387 #ifdef PT_SETDBREGS 388 case PT_SETDBREGS: 389 #endif 390 /* not being traced... */ 391 if ((p->p_flags & P_TRACED) == 0) { 392 lwkt_reltoken(&p->p_token); 393 PRELE(p); 394 return EPERM; 395 } 396 397 /* not being traced by YOU */ 398 if (p->p_pptr != curp) { 399 lwkt_reltoken(&p->p_token); 400 PRELE(p); 401 return EBUSY; 402 } 403 404 /* not currently stopped */ 405 if (p->p_stat != SSTOP || 406 (p->p_flags & P_WAITED) == 0) { 407 lwkt_reltoken(&p->p_token); 408 PRELE(p); 409 return EBUSY; 410 } 411 412 /* OK */ 413 break; 414 415 default: 416 lwkt_reltoken(&p->p_token); 417 PRELE(p); 418 return EINVAL; 419 } 420 421 /* XXX lwp */ 422 lp = FIRST_LWP_IN_PROC(p); 423 #ifdef FIX_SSTEP 424 /* 425 * Single step fixup ala procfs 426 */ 427 FIX_SSTEP(lp); 428 #endif 429 430 /* 431 * Actually do the requests 432 */ 433 434 *res = 0; 435 436 switch (req) { 437 case PT_TRACE_ME: 438 /* set my trace flag and "owner" so it can read/write me */ 439 p->p_flags |= P_TRACED; 440 p->p_oppid = p->p_pptr->p_pid; 441 lwkt_reltoken(&p->p_token); 442 PRELE(p); 443 return 0; 444 445 case PT_ATTACH: 446 /* security check done above */ 447 p->p_flags |= P_TRACED; 448 p->p_oppid = p->p_pptr->p_pid; 449 proc_reparent(p, curp); 450 data = SIGSTOP; 451 goto sendsig; /* in PT_CONTINUE below */ 452 453 case PT_STEP: 454 case PT_CONTINUE: 455 case PT_DETACH: 456 /* Zero means do not send any signal */ 457 if (data < 0 || data > _SIG_MAXSIG) { 458 lwkt_reltoken(&p->p_token); 459 PRELE(p); 460 return EINVAL; 461 } 462 463 LWPHOLD(lp); 464 465 if (req == PT_STEP) { 466 if ((error = ptrace_single_step (lp))) { 467 LWPRELE(lp); 468 lwkt_reltoken(&p->p_token); 469 PRELE(p); 470 return error; 471 } 472 } 473 474 if (addr != (void *)1) { 475 if ((error = ptrace_set_pc (lp, 476 (u_long)(uintfptr_t)addr))) { 477 LWPRELE(lp); 478 lwkt_reltoken(&p->p_token); 479 PRELE(p); 480 return error; 481 } 482 } 483 LWPRELE(lp); 484 485 if (req == PT_DETACH) { 486 /* reset process parent */ 487 if (p->p_oppid != p->p_pptr->p_pid) { 488 struct proc *pp; 489 490 pp = pfind(p->p_oppid); 491 if (pp) { 492 proc_reparent(p, pp); 493 PRELE(pp); 494 } 495 } 496 497 p->p_flags &= ~(P_TRACED | P_WAITED); 498 p->p_oppid = 0; 499 500 /* should we send SIGCHLD? */ 501 } 502 503 sendsig: 504 /* 505 * Deliver or queue signal. If the process is stopped 506 * force it to be SACTIVE again. 507 */ 508 crit_enter(); 509 if (p->p_stat == SSTOP) { 510 p->p_xstat = data; 511 proc_unstop(p); 512 } else if (data) { 513 ksignal(p, data); 514 } 515 crit_exit(); 516 lwkt_reltoken(&p->p_token); 517 PRELE(p); 518 return 0; 519 520 case PT_WRITE_I: 521 case PT_WRITE_D: 522 write = 1; 523 /* fallthrough */ 524 case PT_READ_I: 525 case PT_READ_D: 526 /* 527 * NOTE! uio_offset represents the offset in the target 528 * process. The iov is in the current process (the guy 529 * making the ptrace call) so uio_td must be the current 530 * process (though for a SYSSPACE transfer it doesn't 531 * really matter). 532 */ 533 tmp = 0; 534 /* write = 0 set above */ 535 iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp; 536 iov.iov_len = sizeof(int); 537 uio.uio_iov = &iov; 538 uio.uio_iovcnt = 1; 539 uio.uio_offset = (off_t)(uintptr_t)addr; 540 uio.uio_resid = sizeof(int); 541 uio.uio_segflg = UIO_SYSSPACE; 542 uio.uio_rw = write ? UIO_WRITE : UIO_READ; 543 uio.uio_td = curthread; 544 error = procfs_domem(curp, lp, NULL, &uio); 545 if (uio.uio_resid != 0) { 546 /* 547 * XXX procfs_domem() doesn't currently return ENOSPC, 548 * so I think write() can bogusly return 0. 549 * XXX what happens for short writes? We don't want 550 * to write partial data. 551 * XXX procfs_domem() returns EPERM for other invalid 552 * addresses. Convert this to EINVAL. Does this 553 * clobber returns of EPERM for other reasons? 554 */ 555 if (error == 0 || error == ENOSPC || error == EPERM) 556 error = EINVAL; /* EOF */ 557 } 558 if (!write) 559 *res = tmp; 560 lwkt_reltoken(&p->p_token); 561 PRELE(p); 562 return (error); 563 564 case PT_IO: 565 /* 566 * NOTE! uio_offset represents the offset in the target 567 * process. The iov is in the current process (the guy 568 * making the ptrace call) so uio_td must be the current 569 * process. 570 */ 571 piod = addr; 572 iov.iov_base = piod->piod_addr; 573 iov.iov_len = piod->piod_len; 574 uio.uio_iov = &iov; 575 uio.uio_iovcnt = 1; 576 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs; 577 uio.uio_resid = piod->piod_len; 578 uio.uio_segflg = UIO_USERSPACE; 579 uio.uio_td = curthread; 580 switch (piod->piod_op) { 581 case PIOD_READ_D: 582 case PIOD_READ_I: 583 uio.uio_rw = UIO_READ; 584 break; 585 case PIOD_WRITE_D: 586 case PIOD_WRITE_I: 587 uio.uio_rw = UIO_WRITE; 588 break; 589 default: 590 lwkt_reltoken(&p->p_token); 591 PRELE(p); 592 return (EINVAL); 593 } 594 error = procfs_domem(curp, lp, NULL, &uio); 595 piod->piod_len -= uio.uio_resid; 596 lwkt_reltoken(&p->p_token); 597 PRELE(p); 598 return (error); 599 600 case PT_KILL: 601 data = SIGKILL; 602 goto sendsig; /* in PT_CONTINUE above */ 603 604 #ifdef PT_SETREGS 605 case PT_SETREGS: 606 write = 1; 607 /* fallthrough */ 608 #endif /* PT_SETREGS */ 609 #ifdef PT_GETREGS 610 case PT_GETREGS: 611 /* write = 0 above */ 612 #endif /* PT_SETREGS */ 613 #if defined(PT_SETREGS) || defined(PT_GETREGS) 614 if (!procfs_validregs(lp)) { 615 lwkt_reltoken(&p->p_token); 616 PRELE(p); 617 return EINVAL; 618 } else { 619 iov.iov_base = addr; 620 iov.iov_len = sizeof(struct reg); 621 uio.uio_iov = &iov; 622 uio.uio_iovcnt = 1; 623 uio.uio_offset = 0; 624 uio.uio_resid = sizeof(struct reg); 625 uio.uio_segflg = UIO_SYSSPACE; 626 uio.uio_rw = write ? UIO_WRITE : UIO_READ; 627 uio.uio_td = curthread; 628 t = procfs_doregs(curp, lp, NULL, &uio); 629 lwkt_reltoken(&p->p_token); 630 PRELE(p); 631 return t; 632 } 633 #endif /* defined(PT_SETREGS) || defined(PT_GETREGS) */ 634 635 #ifdef PT_SETFPREGS 636 case PT_SETFPREGS: 637 write = 1; 638 /* fallthrough */ 639 #endif /* PT_SETFPREGS */ 640 #ifdef PT_GETFPREGS 641 case PT_GETFPREGS: 642 /* write = 0 above */ 643 #endif /* PT_SETFPREGS */ 644 #if defined(PT_SETFPREGS) || defined(PT_GETFPREGS) 645 if (!procfs_validfpregs(lp)) { 646 lwkt_reltoken(&p->p_token); 647 PRELE(p); 648 return EINVAL; 649 } else { 650 iov.iov_base = addr; 651 iov.iov_len = sizeof(struct fpreg); 652 uio.uio_iov = &iov; 653 uio.uio_iovcnt = 1; 654 uio.uio_offset = 0; 655 uio.uio_resid = sizeof(struct fpreg); 656 uio.uio_segflg = UIO_SYSSPACE; 657 uio.uio_rw = write ? UIO_WRITE : UIO_READ; 658 uio.uio_td = curthread; 659 t = procfs_dofpregs(curp, lp, NULL, &uio); 660 lwkt_reltoken(&p->p_token); 661 PRELE(p); 662 return t; 663 } 664 #endif /* defined(PT_SETFPREGS) || defined(PT_GETFPREGS) */ 665 666 #ifdef PT_SETDBREGS 667 case PT_SETDBREGS: 668 write = 1; 669 /* fallthrough */ 670 #endif /* PT_SETDBREGS */ 671 #ifdef PT_GETDBREGS 672 case PT_GETDBREGS: 673 /* write = 0 above */ 674 #endif /* PT_SETDBREGS */ 675 #if defined(PT_SETDBREGS) || defined(PT_GETDBREGS) 676 if (!procfs_validdbregs(lp)) { 677 lwkt_reltoken(&p->p_token); 678 PRELE(p); 679 return EINVAL; 680 } else { 681 iov.iov_base = addr; 682 iov.iov_len = sizeof(struct dbreg); 683 uio.uio_iov = &iov; 684 uio.uio_iovcnt = 1; 685 uio.uio_offset = 0; 686 uio.uio_resid = sizeof(struct dbreg); 687 uio.uio_segflg = UIO_SYSSPACE; 688 uio.uio_rw = write ? UIO_WRITE : UIO_READ; 689 uio.uio_td = curthread; 690 t = procfs_dodbregs(curp, lp, NULL, &uio); 691 lwkt_reltoken(&p->p_token); 692 PRELE(p); 693 return t; 694 } 695 #endif /* defined(PT_SETDBREGS) || defined(PT_GETDBREGS) */ 696 697 default: 698 break; 699 } 700 701 lwkt_reltoken(&p->p_token); 702 PRELE(p); 703 704 return 0; 705 } 706 707 int 708 trace_req(struct proc *p) 709 { 710 return 1; 711 } 712 713 /* 714 * stopevent() 715 * 716 * Stop a process because of a procfs event. Stay stopped until p->p_step 717 * is cleared (cleared by PIOCCONT in procfs). 718 * 719 * MPSAFE 720 */ 721 void 722 stopevent(struct proc *p, unsigned int event, unsigned int val) 723 { 724 /* 725 * Set event info. Recheck p_stops in case we are 726 * racing a close() on procfs. 727 */ 728 spin_lock(&p->p_spin); 729 if ((p->p_stops & event) == 0) { 730 spin_unlock(&p->p_spin); 731 return; 732 } 733 p->p_xstat = val; 734 p->p_stype = event; 735 p->p_step = 1; 736 tsleep_interlock(&p->p_step, 0); 737 spin_unlock(&p->p_spin); 738 739 /* 740 * Wakeup any PIOCWAITing procs and wait for p_step to 741 * be cleared. 742 */ 743 for (;;) { 744 wakeup(&p->p_stype); 745 tsleep(&p->p_step, PINTERLOCKED, "stopevent", 0); 746 spin_lock(&p->p_spin); 747 if (p->p_step == 0) { 748 spin_unlock(&p->p_spin); 749 break; 750 } 751 tsleep_interlock(&p->p_step, 0); 752 spin_unlock(&p->p_spin); 753 } 754 } 755 756