1 /* 2 * Copyright (c) 1994, Sean Eric Fagan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Sean Eric Fagan. 16 * 4. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * $FreeBSD: src/sys/kern/sys_process.c,v 1.51.2.6 2003/01/08 03:06:45 kan Exp $ 32 * $DragonFly: src/sys/kern/sys_process.c,v 1.30 2007/02/19 01:14:23 corecode Exp $ 33 */ 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/sysproto.h> 38 #include <sys/proc.h> 39 #include <sys/priv.h> 40 #include <sys/vnode.h> 41 #include <sys/ptrace.h> 42 #include <sys/reg.h> 43 #include <sys/lock.h> 44 45 #include <vm/vm.h> 46 #include <vm/pmap.h> 47 #include <vm/vm_map.h> 48 #include <vm/vm_page.h> 49 50 #include <sys/user.h> 51 #include <vfs/procfs/procfs.h> 52 53 #include <sys/thread2.h> 54 #include <sys/spinlock2.h> 55 56 /* use the equivalent procfs code */ 57 #if 0 58 static int 59 pread (struct proc *procp, unsigned int addr, unsigned int *retval) { 60 int rv; 61 vm_map_t map, tmap; 62 vm_object_t object; 63 vm_offset_t kva = 0; 64 int page_offset; /* offset into page */ 65 vm_offset_t pageno; /* page number */ 66 vm_map_entry_t out_entry; 67 vm_prot_t out_prot; 68 boolean_t wired; 69 vm_pindex_t pindex; 70 71 /* Map page into kernel space */ 72 73 map = &procp->p_vmspace->vm_map; 74 75 page_offset = addr - trunc_page(addr); 76 pageno = trunc_page(addr); 77 78 tmap = map; 79 rv = vm_map_lookup (&tmap, pageno, VM_PROT_READ, &out_entry, 80 &object, &pindex, &out_prot, &wired); 81 82 if (rv != KERN_SUCCESS) 83 return EINVAL; 84 85 vm_map_lookup_done (tmap, out_entry, 0); 86 87 /* Find space in kernel_map for the page we're interested in */ 88 rv = vm_map_find (&kernel_map, object, IDX_TO_OFF(pindex), 89 &kva, 90 PAGE_SIZE, PAGE_SIZE, 91 0, VM_MAPTYPE_NORMAL, 92 VM_PROT_ALL, VM_PROT_ALL, 93 0); 94 95 if (!rv) { 96 vm_object_reference (object); 97 98 rv = vm_map_wire (&kernel_map, kva, kva + PAGE_SIZE, 0); 99 if (!rv) { 100 *retval = 0; 101 bcopy ((caddr_t)kva + page_offset, 102 retval, sizeof *retval); 103 } 104 vm_map_remove (&kernel_map, kva, kva + PAGE_SIZE); 105 } 106 107 return rv; 108 } 109 110 static int 111 pwrite (struct proc *procp, unsigned int addr, unsigned int datum) { 112 int rv; 113 vm_map_t map, tmap; 114 vm_object_t object; 115 vm_offset_t kva = 0; 116 int page_offset; /* offset into page */ 117 vm_offset_t pageno; /* page number */ 118 vm_map_entry_t out_entry; 119 vm_prot_t out_prot; 120 boolean_t wired; 121 vm_pindex_t pindex; 122 boolean_t fix_prot = 0; 123 124 /* Map page into kernel space */ 125 126 map = &procp->p_vmspace->vm_map; 127 128 page_offset = addr - trunc_page(addr); 129 pageno = trunc_page(addr); 130 131 /* 132 * Check the permissions for the area we're interested in. 133 */ 134 135 if (vm_map_check_protection (map, pageno, pageno + PAGE_SIZE, 136 VM_PROT_WRITE, FALSE) == FALSE) { 137 /* 138 * If the page was not writable, we make it so. 139 * XXX It is possible a page may *not* be read/executable, 140 * if a process changes that! 141 */ 142 fix_prot = 1; 143 /* The page isn't writable, so let's try making it so... */ 144 if ((rv = vm_map_protect (map, pageno, pageno + PAGE_SIZE, 145 VM_PROT_ALL, 0)) != KERN_SUCCESS) 146 return EFAULT; /* I guess... */ 147 } 148 149 /* 150 * Now we need to get the page. out_entry, out_prot, wired, and 151 * single_use aren't used. One would think the vm code would be 152 * a *bit* nicer... We use tmap because vm_map_lookup() can 153 * change the map argument. 154 */ 155 156 tmap = map; 157 rv = vm_map_lookup (&tmap, pageno, VM_PROT_WRITE, &out_entry, 158 &object, &pindex, &out_prot, &wired); 159 if (rv != KERN_SUCCESS) { 160 return EINVAL; 161 } 162 163 /* 164 * Okay, we've got the page. Let's release tmap. 165 */ 166 167 vm_map_lookup_done (tmap, out_entry, 0); 168 169 /* 170 * Fault the page in... 171 */ 172 173 rv = vm_fault(map, pageno, VM_PROT_WRITE|VM_PROT_READ, FALSE); 174 if (rv != KERN_SUCCESS) 175 return EFAULT; 176 177 /* Find space in kernel_map for the page we're interested in */ 178 rv = vm_map_find (&kernel_map, object, IDX_TO_OFF(pindex), 179 &kva, 180 PAGE_SIZE, PAGE_SIZE, 181 0, VM_MAPTYPE_NORMAL, 182 VM_PROT_ALL, VM_PROT_ALL, 183 0); 184 if (!rv) { 185 vm_object_reference (object); 186 187 rv = vm_map_wire (&kernel_map, kva, kva + PAGE_SIZE, 0); 188 if (!rv) { 189 bcopy (&datum, (caddr_t)kva + page_offset, sizeof datum); 190 } 191 vm_map_remove (&kernel_map, kva, kva + PAGE_SIZE); 192 } 193 194 if (fix_prot) 195 vm_map_protect (map, pageno, pageno + PAGE_SIZE, 196 VM_PROT_READ|VM_PROT_EXECUTE, 0); 197 return rv; 198 } 199 #endif 200 201 /* 202 * Process debugging system call. 203 * 204 * MPALMOSTSAFE 205 */ 206 int 207 sys_ptrace(struct ptrace_args *uap) 208 { 209 struct proc *p = curproc; 210 211 /* 212 * XXX this obfuscation is to reduce stack usage, but the register 213 * structs may be too large to put on the stack anyway. 214 */ 215 union { 216 struct ptrace_io_desc piod; 217 struct dbreg dbreg; 218 struct fpreg fpreg; 219 struct reg reg; 220 } r; 221 void *addr; 222 int error = 0; 223 224 addr = &r; 225 switch (uap->req) { 226 case PT_GETREGS: 227 case PT_GETFPREGS: 228 #ifdef PT_GETDBREGS 229 case PT_GETDBREGS: 230 #endif 231 break; 232 case PT_SETREGS: 233 error = copyin(uap->addr, &r.reg, sizeof r.reg); 234 break; 235 case PT_SETFPREGS: 236 error = copyin(uap->addr, &r.fpreg, sizeof r.fpreg); 237 break; 238 #ifdef PT_SETDBREGS 239 case PT_SETDBREGS: 240 error = copyin(uap->addr, &r.dbreg, sizeof r.dbreg); 241 break; 242 #endif 243 case PT_IO: 244 error = copyin(uap->addr, &r.piod, sizeof r.piod); 245 break; 246 default: 247 addr = uap->addr; 248 } 249 if (error) 250 return (error); 251 252 error = kern_ptrace(p, uap->req, uap->pid, addr, uap->data, 253 &uap->sysmsg_result); 254 if (error) 255 return (error); 256 257 switch (uap->req) { 258 case PT_IO: 259 (void)copyout(&r.piod, uap->addr, sizeof r.piod); 260 break; 261 case PT_GETREGS: 262 error = copyout(&r.reg, uap->addr, sizeof r.reg); 263 break; 264 case PT_GETFPREGS: 265 error = copyout(&r.fpreg, uap->addr, sizeof r.fpreg); 266 break; 267 #ifdef PT_GETDBREGS 268 case PT_GETDBREGS: 269 error = copyout(&r.dbreg, uap->addr, sizeof r.dbreg); 270 break; 271 #endif 272 } 273 274 return (error); 275 } 276 277 int 278 kern_ptrace(struct proc *curp, int req, pid_t pid, void *addr, int data, int *res) 279 { 280 struct proc *p, *pp; 281 struct lwp *lp; 282 struct iovec iov; 283 struct uio uio; 284 struct ptrace_io_desc *piod; 285 int error = 0; 286 int write, tmp; 287 int t; 288 289 lwkt_gettoken(&proc_token); 290 291 write = 0; 292 if (req == PT_TRACE_ME) { 293 p = curp; 294 } else { 295 if ((p = pfind(pid)) == NULL) { 296 lwkt_reltoken(&proc_token); 297 return ESRCH; 298 } 299 } 300 if (!PRISON_CHECK(curp->p_ucred, p->p_ucred)) { 301 lwkt_reltoken(&proc_token); 302 return (ESRCH); 303 } 304 305 /* Can't trace a process that's currently exec'ing. */ 306 if ((p->p_flag & P_INEXEC) != 0) { 307 lwkt_reltoken(&proc_token); 308 return EAGAIN; 309 } 310 311 /* 312 * Permissions check 313 */ 314 switch (req) { 315 case PT_TRACE_ME: 316 /* Always legal. */ 317 break; 318 319 case PT_ATTACH: 320 /* Self */ 321 if (p->p_pid == curp->p_pid) { 322 lwkt_reltoken(&proc_token); 323 return EINVAL; 324 } 325 326 /* Already traced */ 327 if (p->p_flag & P_TRACED) { 328 lwkt_reltoken(&proc_token); 329 return EBUSY; 330 } 331 332 if (curp->p_flag & P_TRACED) 333 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) 334 if (pp == p) { 335 lwkt_reltoken(&proc_token); 336 return (EINVAL); 337 } 338 339 /* not owned by you, has done setuid (unless you're root) */ 340 if ((p->p_ucred->cr_ruid != curp->p_ucred->cr_ruid) || 341 (p->p_flag & P_SUGID)) { 342 if ((error = priv_check_cred(curp->p_ucred, PRIV_ROOT, 0)) != 0) { 343 lwkt_reltoken(&proc_token); 344 return error; 345 } 346 } 347 348 /* can't trace init when securelevel > 0 */ 349 if (securelevel > 0 && p->p_pid == 1) { 350 lwkt_reltoken(&proc_token); 351 return EPERM; 352 } 353 354 /* OK */ 355 break; 356 357 case PT_READ_I: 358 case PT_READ_D: 359 case PT_WRITE_I: 360 case PT_WRITE_D: 361 case PT_IO: 362 case PT_CONTINUE: 363 case PT_KILL: 364 case PT_STEP: 365 case PT_DETACH: 366 #ifdef PT_GETREGS 367 case PT_GETREGS: 368 #endif 369 #ifdef PT_SETREGS 370 case PT_SETREGS: 371 #endif 372 #ifdef PT_GETFPREGS 373 case PT_GETFPREGS: 374 #endif 375 #ifdef PT_SETFPREGS 376 case PT_SETFPREGS: 377 #endif 378 #ifdef PT_GETDBREGS 379 case PT_GETDBREGS: 380 #endif 381 #ifdef PT_SETDBREGS 382 case PT_SETDBREGS: 383 #endif 384 /* not being traced... */ 385 if ((p->p_flag & P_TRACED) == 0) { 386 lwkt_reltoken(&proc_token); 387 return EPERM; 388 } 389 390 /* not being traced by YOU */ 391 if (p->p_pptr != curp) { 392 lwkt_reltoken(&proc_token); 393 return EBUSY; 394 } 395 396 /* not currently stopped */ 397 if (p->p_stat != SSTOP || 398 (p->p_flag & P_WAITED) == 0) { 399 lwkt_reltoken(&proc_token); 400 return EBUSY; 401 } 402 403 /* OK */ 404 break; 405 406 default: 407 lwkt_reltoken(&proc_token); 408 return EINVAL; 409 } 410 411 /* XXX lwp */ 412 lp = FIRST_LWP_IN_PROC(p); 413 #ifdef FIX_SSTEP 414 /* 415 * Single step fixup ala procfs 416 */ 417 FIX_SSTEP(lp); 418 #endif 419 420 /* 421 * Actually do the requests 422 */ 423 424 *res = 0; 425 426 switch (req) { 427 case PT_TRACE_ME: 428 /* set my trace flag and "owner" so it can read/write me */ 429 p->p_flag |= P_TRACED; 430 p->p_oppid = p->p_pptr->p_pid; 431 lwkt_reltoken(&proc_token); 432 return 0; 433 434 case PT_ATTACH: 435 /* security check done above */ 436 p->p_flag |= P_TRACED; 437 p->p_oppid = p->p_pptr->p_pid; 438 if (p->p_pptr != curp) 439 proc_reparent(p, curp); 440 data = SIGSTOP; 441 goto sendsig; /* in PT_CONTINUE below */ 442 443 case PT_STEP: 444 case PT_CONTINUE: 445 case PT_DETACH: 446 /* Zero means do not send any signal */ 447 if (data < 0 || data > _SIG_MAXSIG) { 448 lwkt_reltoken(&proc_token); 449 return EINVAL; 450 } 451 452 LWPHOLD(lp); 453 454 if (req == PT_STEP) { 455 if ((error = ptrace_single_step (lp))) { 456 LWPRELE(lp); 457 lwkt_reltoken(&proc_token); 458 return error; 459 } 460 } 461 462 if (addr != (void *)1) { 463 if ((error = ptrace_set_pc (lp, 464 (u_long)(uintfptr_t)addr))) { 465 LWPRELE(lp); 466 lwkt_reltoken(&proc_token); 467 return error; 468 } 469 } 470 LWPRELE(lp); 471 472 if (req == PT_DETACH) { 473 /* reset process parent */ 474 if (p->p_oppid != p->p_pptr->p_pid) { 475 struct proc *pp; 476 477 pp = pfind(p->p_oppid); 478 proc_reparent(p, pp ? pp : initproc); 479 } 480 481 p->p_flag &= ~(P_TRACED | P_WAITED); 482 p->p_oppid = 0; 483 484 /* should we send SIGCHLD? */ 485 } 486 487 sendsig: 488 /* 489 * Deliver or queue signal. If the process is stopped 490 * force it to be SACTIVE again. 491 */ 492 crit_enter(); 493 if (p->p_stat == SSTOP) { 494 p->p_xstat = data; 495 lp->lwp_flag |= LWP_BREAKTSLEEP; 496 proc_unstop(p); 497 } else if (data) { 498 ksignal(p, data); 499 } 500 crit_exit(); 501 lwkt_reltoken(&proc_token); 502 return 0; 503 504 case PT_WRITE_I: 505 case PT_WRITE_D: 506 write = 1; 507 /* fallthrough */ 508 case PT_READ_I: 509 case PT_READ_D: 510 /* 511 * NOTE! uio_offset represents the offset in the target 512 * process. The iov is in the current process (the guy 513 * making the ptrace call) so uio_td must be the current 514 * process (though for a SYSSPACE transfer it doesn't 515 * really matter). 516 */ 517 tmp = 0; 518 /* write = 0 set above */ 519 iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp; 520 iov.iov_len = sizeof(int); 521 uio.uio_iov = &iov; 522 uio.uio_iovcnt = 1; 523 uio.uio_offset = (off_t)(uintptr_t)addr; 524 uio.uio_resid = sizeof(int); 525 uio.uio_segflg = UIO_SYSSPACE; 526 uio.uio_rw = write ? UIO_WRITE : UIO_READ; 527 uio.uio_td = curthread; 528 error = procfs_domem(curp, lp, NULL, &uio); 529 if (uio.uio_resid != 0) { 530 /* 531 * XXX procfs_domem() doesn't currently return ENOSPC, 532 * so I think write() can bogusly return 0. 533 * XXX what happens for short writes? We don't want 534 * to write partial data. 535 * XXX procfs_domem() returns EPERM for other invalid 536 * addresses. Convert this to EINVAL. Does this 537 * clobber returns of EPERM for other reasons? 538 */ 539 if (error == 0 || error == ENOSPC || error == EPERM) 540 error = EINVAL; /* EOF */ 541 } 542 if (!write) 543 *res = tmp; 544 lwkt_reltoken(&proc_token); 545 return (error); 546 547 case PT_IO: 548 /* 549 * NOTE! uio_offset represents the offset in the target 550 * process. The iov is in the current process (the guy 551 * making the ptrace call) so uio_td must be the current 552 * process. 553 */ 554 piod = addr; 555 iov.iov_base = piod->piod_addr; 556 iov.iov_len = piod->piod_len; 557 uio.uio_iov = &iov; 558 uio.uio_iovcnt = 1; 559 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs; 560 uio.uio_resid = piod->piod_len; 561 uio.uio_segflg = UIO_USERSPACE; 562 uio.uio_td = curthread; 563 switch (piod->piod_op) { 564 case PIOD_READ_D: 565 case PIOD_READ_I: 566 uio.uio_rw = UIO_READ; 567 break; 568 case PIOD_WRITE_D: 569 case PIOD_WRITE_I: 570 uio.uio_rw = UIO_WRITE; 571 break; 572 default: 573 lwkt_reltoken(&proc_token); 574 return (EINVAL); 575 } 576 error = procfs_domem(curp, lp, NULL, &uio); 577 piod->piod_len -= uio.uio_resid; 578 lwkt_reltoken(&proc_token); 579 return (error); 580 581 case PT_KILL: 582 data = SIGKILL; 583 goto sendsig; /* in PT_CONTINUE above */ 584 585 #ifdef PT_SETREGS 586 case PT_SETREGS: 587 write = 1; 588 /* fallthrough */ 589 #endif /* PT_SETREGS */ 590 #ifdef PT_GETREGS 591 case PT_GETREGS: 592 /* write = 0 above */ 593 #endif /* PT_SETREGS */ 594 #if defined(PT_SETREGS) || defined(PT_GETREGS) 595 if (!procfs_validregs(lp)) { /* no P_SYSTEM procs please */ 596 lwkt_reltoken(&proc_token); 597 return EINVAL; 598 } else { 599 iov.iov_base = addr; 600 iov.iov_len = sizeof(struct reg); 601 uio.uio_iov = &iov; 602 uio.uio_iovcnt = 1; 603 uio.uio_offset = 0; 604 uio.uio_resid = sizeof(struct reg); 605 uio.uio_segflg = UIO_SYSSPACE; 606 uio.uio_rw = write ? UIO_WRITE : UIO_READ; 607 uio.uio_td = curthread; 608 t = procfs_doregs(curp, lp, NULL, &uio); 609 lwkt_reltoken(&proc_token); 610 return t; 611 } 612 #endif /* defined(PT_SETREGS) || defined(PT_GETREGS) */ 613 614 #ifdef PT_SETFPREGS 615 case PT_SETFPREGS: 616 write = 1; 617 /* fallthrough */ 618 #endif /* PT_SETFPREGS */ 619 #ifdef PT_GETFPREGS 620 case PT_GETFPREGS: 621 /* write = 0 above */ 622 #endif /* PT_SETFPREGS */ 623 #if defined(PT_SETFPREGS) || defined(PT_GETFPREGS) 624 if (!procfs_validfpregs(lp)) { /* no P_SYSTEM procs please */ 625 lwkt_reltoken(&proc_token); 626 return EINVAL; 627 } else { 628 iov.iov_base = addr; 629 iov.iov_len = sizeof(struct fpreg); 630 uio.uio_iov = &iov; 631 uio.uio_iovcnt = 1; 632 uio.uio_offset = 0; 633 uio.uio_resid = sizeof(struct fpreg); 634 uio.uio_segflg = UIO_SYSSPACE; 635 uio.uio_rw = write ? UIO_WRITE : UIO_READ; 636 uio.uio_td = curthread; 637 t = procfs_dofpregs(curp, lp, NULL, &uio); 638 lwkt_reltoken(&proc_token); 639 return t; 640 } 641 #endif /* defined(PT_SETFPREGS) || defined(PT_GETFPREGS) */ 642 643 #ifdef PT_SETDBREGS 644 case PT_SETDBREGS: 645 write = 1; 646 /* fallthrough */ 647 #endif /* PT_SETDBREGS */ 648 #ifdef PT_GETDBREGS 649 case PT_GETDBREGS: 650 /* write = 0 above */ 651 #endif /* PT_SETDBREGS */ 652 #if defined(PT_SETDBREGS) || defined(PT_GETDBREGS) 653 if (!procfs_validdbregs(lp)) { /* no P_SYSTEM procs please */ 654 lwkt_reltoken(&proc_token); 655 return EINVAL; 656 } else { 657 iov.iov_base = addr; 658 iov.iov_len = sizeof(struct dbreg); 659 uio.uio_iov = &iov; 660 uio.uio_iovcnt = 1; 661 uio.uio_offset = 0; 662 uio.uio_resid = sizeof(struct dbreg); 663 uio.uio_segflg = UIO_SYSSPACE; 664 uio.uio_rw = write ? UIO_WRITE : UIO_READ; 665 uio.uio_td = curthread; 666 t = procfs_dodbregs(curp, lp, NULL, &uio); 667 lwkt_reltoken(&proc_token); 668 return t; 669 } 670 #endif /* defined(PT_SETDBREGS) || defined(PT_GETDBREGS) */ 671 672 default: 673 break; 674 } 675 676 lwkt_reltoken(&proc_token); 677 return 0; 678 } 679 680 int 681 trace_req(struct proc *p) 682 { 683 return 1; 684 } 685 686 /* 687 * stopevent() 688 * 689 * Stop a process because of a procfs event. Stay stopped until p->p_step 690 * is cleared (cleared by PIOCCONT in procfs). 691 * 692 * MPSAFE 693 */ 694 void 695 stopevent(struct proc *p, unsigned int event, unsigned int val) 696 { 697 /* 698 * Set event info. Recheck p_stops in case we are 699 * racing a close() on procfs. 700 */ 701 spin_lock(&p->p_spin); 702 if ((p->p_stops & event) == 0) { 703 spin_unlock(&p->p_spin); 704 return; 705 } 706 p->p_xstat = val; 707 p->p_stype = event; 708 p->p_step = 1; 709 tsleep_interlock(&p->p_step, 0); 710 spin_unlock(&p->p_spin); 711 712 /* 713 * Wakeup any PIOCWAITing procs and wait for p_step to 714 * be cleared. 715 */ 716 for (;;) { 717 wakeup(&p->p_stype); 718 tsleep(&p->p_step, PINTERLOCKED, "stopevent", 0); 719 spin_lock(&p->p_spin); 720 if (p->p_step == 0) { 721 spin_unlock(&p->p_spin); 722 break; 723 } 724 tsleep_interlock(&p->p_step, 0); 725 spin_unlock(&p->p_spin); 726 } 727 } 728 729