1 /* 2 * Copyright (c) 1994, Sean Eric Fagan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Sean Eric Fagan. 16 * 4. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * $FreeBSD: src/sys/kern/sys_process.c,v 1.51.2.6 2003/01/08 03:06:45 kan Exp $ 32 */ 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/sysmsg.h> 37 #include <sys/uio.h> 38 #include <sys/proc.h> 39 #include <sys/priv.h> 40 #include <sys/vnode.h> 41 #include <sys/ptrace.h> 42 #include <sys/reg.h> 43 #include <sys/lock.h> 44 45 #include <vm/vm.h> 46 #include <vm/pmap.h> 47 #include <vm/vm_map.h> 48 #include <vm/vm_page.h> 49 50 #include <vfs/procfs/procfs.h> 51 52 #include <sys/thread2.h> 53 #include <sys/spinlock2.h> 54 55 /* use the equivalent procfs code */ 56 #if 0 57 static int 58 pread (struct proc *procp, unsigned int addr, unsigned int *retval) 59 { 60 int rv; 61 vm_map_t map, tmap; 62 vm_object_t object; 63 vm_map_backing_t ba; 64 vm_offset_t kva = 0; 65 int page_offset; /* offset into page */ 66 vm_offset_t pageno; /* page number */ 67 vm_map_entry_t out_entry; 68 vm_prot_t out_prot; 69 int wflags; 70 vm_pindex_t pindex; 71 vm_pindex_t pcount; 72 73 /* Map page into kernel space */ 74 75 map = &procp->p_vmspace->vm_map; 76 77 page_offset = addr - trunc_page(addr); 78 pageno = trunc_page(addr); 79 80 tmap = map; 81 rv = vm_map_lookup(&tmap, pageno, VM_PROT_READ, &out_entry, 82 &ba, &pindex, &pcount, &out_prot, &wflags); 83 if (ba) 84 object = ba->object; 85 else 86 object = NULL; 87 88 89 if (rv != KERN_SUCCESS) 90 return EINVAL; 91 92 vm_map_lookup_done (tmap, out_entry, 0); 93 94 /* Find space in kernel_map for the page we're interested in */ 95 rv = vm_map_find (kernel_map, object, NULL, 96 IDX_TO_OFF(pindex), &kva, PAGE_SIZE, 97 PAGE_SIZE, FALSE, 98 VM_MAPTYPE_NORMAL, VM_SUBSYS_PROC, 99 VM_PROT_ALL, VM_PROT_ALL, 0); 100 101 if (!rv) { 102 vm_object_reference XXX (object); 103 104 /* wire the pages */ 105 rv = vm_map_kernel_wiring(kernel_map, kva, kva + PAGE_SIZE, 0); 106 if (!rv) { 107 *retval = 0; 108 bcopy ((caddr_t)kva + page_offset, 109 retval, sizeof *retval); 110 } 111 vm_map_remove (kernel_map, kva, kva + PAGE_SIZE); 112 } 113 114 return rv; 115 } 116 117 static int 118 pwrite (struct proc *procp, unsigned int addr, unsigned int datum) 119 { 120 int rv; 121 vm_map_t map, tmap; 122 vm_object_t object; 123 vm_map_backing_t ba; 124 vm_offset_t kva = 0; 125 int page_offset; /* offset into page */ 126 vm_offset_t pageno; /* page number */ 127 vm_map_entry_t out_entry; 128 vm_prot_t out_prot; 129 int wflags; 130 vm_pindex_t pindex; 131 vm_pindex_t pcount; 132 boolean_t fix_prot = 0; 133 134 /* Map page into kernel space */ 135 136 map = &procp->p_vmspace->vm_map; 137 138 page_offset = addr - trunc_page(addr); 139 pageno = trunc_page(addr); 140 141 /* 142 * Check the permissions for the area we're interested in. 143 */ 144 145 if (vm_map_check_protection (map, pageno, pageno + PAGE_SIZE, 146 VM_PROT_WRITE, FALSE) == FALSE) { 147 /* 148 * If the page was not writable, we make it so. 149 * XXX It is possible a page may *not* be read/executable, 150 * if a process changes that! 151 */ 152 fix_prot = 1; 153 /* The page isn't writable, so let's try making it so... */ 154 if ((rv = vm_map_protect (map, pageno, pageno + PAGE_SIZE, 155 VM_PROT_ALL, 0)) != KERN_SUCCESS) 156 return EFAULT; /* I guess... */ 157 } 158 159 /* 160 * Now we need to get the page. out_entry, out_prot, wflags, and 161 * single_use aren't used. One would think the vm code would be 162 * a *bit* nicer... We use tmap because vm_map_lookup() can 163 * change the map argument. 164 */ 165 166 tmap = map; 167 rv = vm_map_lookup(&tmap, pageno, VM_PROT_WRITE, &out_entry, 168 &ba, &pindex, &pcount, &out_prot, &wflags); 169 if (ba) 170 object = ba->object; 171 else 172 object = NULL; 173 174 if (rv != KERN_SUCCESS) 175 return EINVAL; 176 177 /* 178 * Okay, we've got the page. Let's release tmap. 179 */ 180 vm_map_lookup_done (tmap, out_entry, 0); 181 182 /* 183 * Fault the page in... 184 */ 185 rv = vm_fault(map, pageno, VM_PROT_WRITE|VM_PROT_READ, FALSE); 186 if (rv != KERN_SUCCESS) 187 return EFAULT; 188 189 /* Find space in kernel_map for the page we're interested in */ 190 rv = vm_map_find (kernel_map, object, NULL, 191 IDX_TO_OFF(pindex), &kva, PAGE_SIZE, 192 PAGE_SIZE, FALSE, 193 VM_MAPTYPE_NORMAL, VM_SUBSYS_PROC, 194 VM_PROT_ALL, VM_PROT_ALL, 0); 195 if (!rv) { 196 vm_object_reference XXX (object); 197 198 /* wire the pages */ 199 rv = vm_map_kernel_wiring(kernel_map, kva, kva + PAGE_SIZE, 0); 200 if (!rv) { 201 bcopy (&datum, (caddr_t)kva + page_offset, sizeof datum); 202 } 203 vm_map_remove (kernel_map, kva, kva + PAGE_SIZE); 204 } 205 206 if (fix_prot) 207 vm_map_protect (map, pageno, pageno + PAGE_SIZE, 208 VM_PROT_READ|VM_PROT_EXECUTE, 0); 209 return rv; 210 } 211 #endif 212 213 /* 214 * Process debugging system call. 215 * 216 * MPALMOSTSAFE 217 */ 218 int 219 sys_ptrace(struct sysmsg *sysmsg, const struct ptrace_args *uap) 220 { 221 struct proc *p = curproc; 222 223 /* 224 * XXX this obfuscation is to reduce stack usage, but the register 225 * structs may be too large to put on the stack anyway. 226 */ 227 union { 228 struct ptrace_io_desc piod; 229 struct dbreg dbreg; 230 struct fpreg fpreg; 231 struct reg reg; 232 } r; 233 void *addr; 234 int error = 0; 235 236 addr = &r; 237 switch (uap->req) { 238 case PT_GETREGS: 239 case PT_GETFPREGS: 240 #ifdef PT_GETDBREGS 241 case PT_GETDBREGS: 242 #endif 243 break; 244 case PT_SETREGS: 245 error = copyin(uap->addr, &r.reg, sizeof r.reg); 246 break; 247 case PT_SETFPREGS: 248 error = copyin(uap->addr, &r.fpreg, sizeof r.fpreg); 249 break; 250 #ifdef PT_SETDBREGS 251 case PT_SETDBREGS: 252 error = copyin(uap->addr, &r.dbreg, sizeof r.dbreg); 253 break; 254 #endif 255 case PT_IO: 256 error = copyin(uap->addr, &r.piod, sizeof r.piod); 257 break; 258 default: 259 addr = uap->addr; 260 } 261 if (error) 262 return (error); 263 264 error = kern_ptrace(p, uap->req, uap->pid, addr, uap->data, 265 &sysmsg->sysmsg_result); 266 if (error) 267 return (error); 268 269 switch (uap->req) { 270 case PT_IO: 271 (void)copyout(&r.piod, uap->addr, sizeof r.piod); 272 break; 273 case PT_GETREGS: 274 error = copyout(&r.reg, uap->addr, sizeof r.reg); 275 break; 276 case PT_GETFPREGS: 277 error = copyout(&r.fpreg, uap->addr, sizeof r.fpreg); 278 break; 279 #ifdef PT_GETDBREGS 280 case PT_GETDBREGS: 281 error = copyout(&r.dbreg, uap->addr, sizeof r.dbreg); 282 break; 283 #endif 284 } 285 286 return (error); 287 } 288 289 int 290 kern_ptrace(struct proc *curp, int req, pid_t pid, void *addr, 291 int data, int *res) 292 { 293 struct proc *p, *pp; 294 struct lwp *lp; 295 struct iovec iov; 296 struct uio uio; 297 struct ptrace_io_desc *piod; 298 int error = 0; 299 int write, tmp; 300 int t; 301 302 write = 0; 303 if (req == PT_TRACE_ME) { 304 p = curp; 305 PHOLD(p); 306 } else { 307 if ((p = pfind(pid)) == NULL) 308 return ESRCH; 309 } 310 if (!PRISON_CHECK(curp->p_ucred, p->p_ucred)) { 311 PRELE(p); 312 return (ESRCH); 313 } 314 if (p->p_flags & P_SYSTEM) { 315 PRELE(p); 316 return EINVAL; 317 } 318 319 lwkt_gettoken(&p->p_token); 320 /* Can't trace a process that's currently exec'ing. */ 321 if ((p->p_flags & P_INEXEC) != 0) { 322 lwkt_reltoken(&p->p_token); 323 PRELE(p); 324 return EAGAIN; 325 } 326 327 /* 328 * Permissions check 329 */ 330 switch (req) { 331 case PT_TRACE_ME: 332 /* Always legal. */ 333 break; 334 335 case PT_ATTACH: 336 /* Self */ 337 if (p->p_pid == curp->p_pid) { 338 lwkt_reltoken(&p->p_token); 339 PRELE(p); 340 return EINVAL; 341 } 342 343 /* Already traced */ 344 if (p->p_flags & P_TRACED) { 345 lwkt_reltoken(&p->p_token); 346 PRELE(p); 347 return EBUSY; 348 } 349 350 if (curp->p_flags & P_TRACED) 351 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) 352 if (pp == p) { 353 lwkt_reltoken(&p->p_token); 354 PRELE(p); 355 return (EINVAL); 356 } 357 358 /* not owned by you, has done setuid (unless you're root) */ 359 if ((p->p_ucred->cr_ruid != curp->p_ucred->cr_ruid) || 360 (p->p_flags & P_SUGID)) { 361 if ((error = priv_check_cred(curp->p_ucred, PRIV_ROOT, 0)) != 0) { 362 lwkt_reltoken(&p->p_token); 363 PRELE(p); 364 return error; 365 } 366 } 367 368 /* can't trace init when securelevel > 0 */ 369 if (securelevel > 0 && p->p_pid == 1) { 370 lwkt_reltoken(&p->p_token); 371 PRELE(p); 372 return EPERM; 373 } 374 375 /* OK */ 376 break; 377 378 case PT_READ_I: 379 case PT_READ_D: 380 case PT_WRITE_I: 381 case PT_WRITE_D: 382 case PT_IO: 383 case PT_CONTINUE: 384 case PT_KILL: 385 case PT_STEP: 386 case PT_DETACH: 387 #ifdef PT_GETREGS 388 case PT_GETREGS: 389 #endif 390 #ifdef PT_SETREGS 391 case PT_SETREGS: 392 #endif 393 #ifdef PT_GETFPREGS 394 case PT_GETFPREGS: 395 #endif 396 #ifdef PT_SETFPREGS 397 case PT_SETFPREGS: 398 #endif 399 #ifdef PT_GETDBREGS 400 case PT_GETDBREGS: 401 #endif 402 #ifdef PT_SETDBREGS 403 case PT_SETDBREGS: 404 #endif 405 /* not being traced... */ 406 if ((p->p_flags & P_TRACED) == 0) { 407 lwkt_reltoken(&p->p_token); 408 PRELE(p); 409 return EPERM; 410 } 411 412 /* not being traced by YOU */ 413 if (p->p_pptr != curp) { 414 lwkt_reltoken(&p->p_token); 415 PRELE(p); 416 return EBUSY; 417 } 418 419 /* not currently stopped */ 420 if (p->p_stat != SSTOP || 421 (p->p_flags & P_WAITED) == 0) { 422 lwkt_reltoken(&p->p_token); 423 PRELE(p); 424 return EBUSY; 425 } 426 427 /* OK */ 428 break; 429 430 default: 431 lwkt_reltoken(&p->p_token); 432 PRELE(p); 433 return EINVAL; 434 } 435 436 /* XXX lwp */ 437 lp = FIRST_LWP_IN_PROC(p); 438 if (lp == NULL) { 439 lwkt_reltoken(&p->p_token); 440 PRELE(p); 441 return EINVAL; 442 } 443 444 #ifdef FIX_SSTEP 445 /* 446 * Single step fixup ala procfs 447 */ 448 FIX_SSTEP(lp); 449 #endif 450 451 /* 452 * Actually do the requests 453 */ 454 455 *res = 0; 456 457 switch (req) { 458 case PT_TRACE_ME: 459 /* set my trace flag and "owner" so it can read/write me */ 460 p->p_flags |= P_TRACED; 461 p->p_oppid = p->p_pptr->p_pid; 462 lwkt_reltoken(&p->p_token); 463 PRELE(p); 464 return 0; 465 466 case PT_ATTACH: 467 /* security check done above */ 468 p->p_flags |= P_TRACED; 469 p->p_oppid = p->p_pptr->p_pid; 470 proc_reparent(p, curp); 471 data = SIGSTOP; 472 goto sendsig; /* in PT_CONTINUE below */ 473 474 case PT_STEP: 475 case PT_CONTINUE: 476 case PT_DETACH: 477 /* Zero means do not send any signal */ 478 if (data < 0 || data >= _SIG_MAXSIG) { 479 lwkt_reltoken(&p->p_token); 480 PRELE(p); 481 return EINVAL; 482 } 483 484 LWPHOLD(lp); 485 486 if (req == PT_STEP) { 487 if ((error = ptrace_single_step (lp))) { 488 LWPRELE(lp); 489 lwkt_reltoken(&p->p_token); 490 PRELE(p); 491 return error; 492 } 493 } 494 495 if (addr != (void *)1) { 496 if ((error = ptrace_set_pc (lp, (u_long)addr))) { 497 LWPRELE(lp); 498 lwkt_reltoken(&p->p_token); 499 PRELE(p); 500 return error; 501 } 502 } 503 LWPRELE(lp); 504 505 if (req == PT_DETACH) { 506 /* reset process parent */ 507 if (p->p_oppid != p->p_pptr->p_pid) { 508 struct proc *pp; 509 510 pp = pfind(p->p_oppid); 511 if (pp) { 512 proc_reparent(p, pp); 513 PRELE(pp); 514 } 515 } 516 517 p->p_flags &= ~(P_TRACED | P_WAITED); 518 p->p_oppid = 0; 519 520 /* should we send SIGCHLD? */ 521 } 522 523 sendsig: 524 /* 525 * Deliver or queue signal. If the process is stopped 526 * force it to be SACTIVE again. 527 */ 528 crit_enter(); 529 if (p->p_stat == SSTOP) { 530 p->p_xstat = data; 531 proc_unstop(p, SSTOP); 532 } else if (data) { 533 ksignal(p, data); 534 } 535 crit_exit(); 536 lwkt_reltoken(&p->p_token); 537 PRELE(p); 538 return 0; 539 540 case PT_WRITE_I: 541 case PT_WRITE_D: 542 write = 1; 543 /* fallthrough */ 544 case PT_READ_I: 545 case PT_READ_D: 546 /* 547 * NOTE! uio_offset represents the offset in the target 548 * process. The iov is in the current process (the guy 549 * making the ptrace call) so uio_td must be the current 550 * process (though for a SYSSPACE transfer it doesn't 551 * really matter). 552 */ 553 tmp = 0; 554 /* write = 0 set above */ 555 iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp; 556 iov.iov_len = sizeof(int); 557 uio.uio_iov = &iov; 558 uio.uio_iovcnt = 1; 559 uio.uio_offset = (off_t)(uintptr_t)addr; 560 uio.uio_resid = sizeof(int); 561 uio.uio_segflg = UIO_SYSSPACE; 562 uio.uio_rw = write ? UIO_WRITE : UIO_READ; 563 uio.uio_td = curthread; 564 error = procfs_domem(curp, lp, NULL, &uio); 565 if (uio.uio_resid != 0) { 566 /* 567 * XXX procfs_domem() doesn't currently return ENOSPC, 568 * so I think write() can bogusly return 0. 569 * XXX what happens for short writes? We don't want 570 * to write partial data. 571 * XXX procfs_domem() returns EPERM for other invalid 572 * addresses. Convert this to EINVAL. Does this 573 * clobber returns of EPERM for other reasons? 574 */ 575 if (error == 0 || error == ENOSPC || error == EPERM) 576 error = EINVAL; /* EOF */ 577 } 578 if (!write) 579 *res = tmp; 580 lwkt_reltoken(&p->p_token); 581 PRELE(p); 582 return (error); 583 584 case PT_IO: 585 /* 586 * NOTE! uio_offset represents the offset in the target 587 * process. The iov is in the current process (the guy 588 * making the ptrace call) so uio_td must be the current 589 * process. 590 */ 591 piod = addr; 592 iov.iov_base = piod->piod_addr; 593 iov.iov_len = piod->piod_len; 594 uio.uio_iov = &iov; 595 uio.uio_iovcnt = 1; 596 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs; 597 uio.uio_resid = piod->piod_len; 598 uio.uio_segflg = UIO_USERSPACE; 599 uio.uio_td = curthread; 600 switch (piod->piod_op) { 601 case PIOD_READ_D: 602 case PIOD_READ_I: 603 uio.uio_rw = UIO_READ; 604 break; 605 case PIOD_WRITE_D: 606 case PIOD_WRITE_I: 607 uio.uio_rw = UIO_WRITE; 608 break; 609 default: 610 lwkt_reltoken(&p->p_token); 611 PRELE(p); 612 return (EINVAL); 613 } 614 error = procfs_domem(curp, lp, NULL, &uio); 615 piod->piod_len -= uio.uio_resid; 616 lwkt_reltoken(&p->p_token); 617 PRELE(p); 618 return (error); 619 620 case PT_KILL: 621 data = SIGKILL; 622 goto sendsig; /* in PT_CONTINUE above */ 623 624 #ifdef PT_SETREGS 625 case PT_SETREGS: 626 write = 1; 627 /* fallthrough */ 628 #endif /* PT_SETREGS */ 629 #ifdef PT_GETREGS 630 case PT_GETREGS: 631 /* write = 0 above */ 632 #endif /* PT_SETREGS */ 633 #if defined(PT_SETREGS) || defined(PT_GETREGS) 634 if (!procfs_validregs(lp)) { 635 lwkt_reltoken(&p->p_token); 636 PRELE(p); 637 return EINVAL; 638 } else { 639 iov.iov_base = addr; 640 iov.iov_len = sizeof(struct reg); 641 uio.uio_iov = &iov; 642 uio.uio_iovcnt = 1; 643 uio.uio_offset = 0; 644 uio.uio_resid = sizeof(struct reg); 645 uio.uio_segflg = UIO_SYSSPACE; 646 uio.uio_rw = write ? UIO_WRITE : UIO_READ; 647 uio.uio_td = curthread; 648 t = procfs_doregs(curp, lp, NULL, &uio); 649 lwkt_reltoken(&p->p_token); 650 PRELE(p); 651 return t; 652 } 653 #endif /* defined(PT_SETREGS) || defined(PT_GETREGS) */ 654 655 #ifdef PT_SETFPREGS 656 case PT_SETFPREGS: 657 write = 1; 658 /* fallthrough */ 659 #endif /* PT_SETFPREGS */ 660 #ifdef PT_GETFPREGS 661 case PT_GETFPREGS: 662 /* write = 0 above */ 663 #endif /* PT_SETFPREGS */ 664 #if defined(PT_SETFPREGS) || defined(PT_GETFPREGS) 665 if (!procfs_validfpregs(lp)) { 666 lwkt_reltoken(&p->p_token); 667 PRELE(p); 668 return EINVAL; 669 } else { 670 iov.iov_base = addr; 671 iov.iov_len = sizeof(struct fpreg); 672 uio.uio_iov = &iov; 673 uio.uio_iovcnt = 1; 674 uio.uio_offset = 0; 675 uio.uio_resid = sizeof(struct fpreg); 676 uio.uio_segflg = UIO_SYSSPACE; 677 uio.uio_rw = write ? UIO_WRITE : UIO_READ; 678 uio.uio_td = curthread; 679 t = procfs_dofpregs(curp, lp, NULL, &uio); 680 lwkt_reltoken(&p->p_token); 681 PRELE(p); 682 return t; 683 } 684 #endif /* defined(PT_SETFPREGS) || defined(PT_GETFPREGS) */ 685 686 #ifdef PT_SETDBREGS 687 case PT_SETDBREGS: 688 write = 1; 689 /* fallthrough */ 690 #endif /* PT_SETDBREGS */ 691 #ifdef PT_GETDBREGS 692 case PT_GETDBREGS: 693 /* write = 0 above */ 694 #endif /* PT_SETDBREGS */ 695 #if defined(PT_SETDBREGS) || defined(PT_GETDBREGS) 696 if (!procfs_validdbregs(lp)) { 697 lwkt_reltoken(&p->p_token); 698 PRELE(p); 699 return EINVAL; 700 } else { 701 iov.iov_base = addr; 702 iov.iov_len = sizeof(struct dbreg); 703 uio.uio_iov = &iov; 704 uio.uio_iovcnt = 1; 705 uio.uio_offset = 0; 706 uio.uio_resid = sizeof(struct dbreg); 707 uio.uio_segflg = UIO_SYSSPACE; 708 uio.uio_rw = write ? UIO_WRITE : UIO_READ; 709 uio.uio_td = curthread; 710 t = procfs_dodbregs(curp, lp, NULL, &uio); 711 lwkt_reltoken(&p->p_token); 712 PRELE(p); 713 return t; 714 } 715 #endif /* defined(PT_SETDBREGS) || defined(PT_GETDBREGS) */ 716 717 default: 718 break; 719 } 720 721 lwkt_reltoken(&p->p_token); 722 PRELE(p); 723 724 return 0; 725 } 726 727 int 728 trace_req(struct proc *p) 729 { 730 return 1; 731 } 732 733 /* 734 * stopevent() 735 * 736 * Stop a process because of a procfs event. Stay stopped until p->p_step 737 * is cleared (cleared by PIOCCONT in procfs). 738 * 739 * MPSAFE 740 */ 741 void 742 stopevent(struct proc *p, unsigned int event, unsigned int val) 743 { 744 /* 745 * Set event info. Recheck p_stops in case we are 746 * racing a close() on procfs. 747 */ 748 spin_lock(&p->p_spin); 749 if ((p->p_stops & event) == 0) { 750 spin_unlock(&p->p_spin); 751 return; 752 } 753 p->p_xstat = val; 754 p->p_stype = event; 755 p->p_step = 1; 756 tsleep_interlock(&p->p_step, 0); 757 spin_unlock(&p->p_spin); 758 759 /* 760 * Wakeup any PIOCWAITing procs and wait for p_step to 761 * be cleared. 762 */ 763 for (;;) { 764 wakeup(&p->p_stype); 765 tsleep(&p->p_step, PINTERLOCKED, "stopevent", 0); 766 spin_lock(&p->p_spin); 767 if (p->p_step == 0) { 768 spin_unlock(&p->p_spin); 769 break; 770 } 771 tsleep_interlock(&p->p_step, 0); 772 spin_unlock(&p->p_spin); 773 } 774 } 775 776