1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94 39 * $FreeBSD: src/sys/kern/kern_fork.c,v 1.72.2.14 2003/06/26 04:15:10 silby Exp $ 40 * $DragonFly: src/sys/kern/kern_fork.c,v 1.67 2007/03/13 00:18:59 corecode Exp $ 41 */ 42 43 #include "opt_ktrace.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/sysproto.h> 48 #include <sys/filedesc.h> 49 #include <sys/kernel.h> 50 #include <sys/sysctl.h> 51 #include <sys/malloc.h> 52 #include <sys/proc.h> 53 #include <sys/resourcevar.h> 54 #include <sys/vnode.h> 55 #include <sys/acct.h> 56 #include <sys/ktrace.h> 57 #include <sys/unistd.h> 58 #include <sys/jail.h> 59 #include <sys/caps.h> 60 61 #include <vm/vm.h> 62 #include <sys/lock.h> 63 #include <vm/pmap.h> 64 #include <vm/vm_map.h> 65 #include <vm/vm_extern.h> 66 #include <vm/vm_zone.h> 67 68 #include <sys/vmmeter.h> 69 #include <sys/thread2.h> 70 #include <sys/signal2.h> 71 72 static MALLOC_DEFINE(M_ATFORK, "atfork", "atfork callback"); 73 74 /* 75 * These are the stuctures used to create a callout list for things to do 76 * when forking a process 77 */ 78 struct forklist { 79 forklist_fn function; 80 TAILQ_ENTRY(forklist) next; 81 }; 82 83 TAILQ_HEAD(forklist_head, forklist); 84 static struct forklist_head fork_list = TAILQ_HEAD_INITIALIZER(fork_list); 85 86 static struct lwp *lwp_fork(struct lwp *, struct proc *, int flags); 87 88 int forksleep; /* Place for fork1() to sleep on. */ 89 90 /* ARGSUSED */ 91 int 92 sys_fork(struct fork_args *uap) 93 { 94 struct lwp *lp = curthread->td_lwp; 95 struct proc *p2; 96 int error; 97 98 error = fork1(lp, RFFDG | RFPROC | RFPGLOCK, &p2); 99 if (error == 0) { 100 start_forked_proc(lp, p2); 101 uap->sysmsg_fds[0] = p2->p_pid; 102 uap->sysmsg_fds[1] = 0; 103 } 104 return error; 105 } 106 107 /* ARGSUSED */ 108 int 109 sys_vfork(struct vfork_args *uap) 110 { 111 struct lwp *lp = curthread->td_lwp; 112 struct proc *p2; 113 int error; 114 115 error = fork1(lp, RFFDG | RFPROC | RFPPWAIT | RFMEM | RFPGLOCK, &p2); 116 if (error == 0) { 117 start_forked_proc(lp, p2); 118 uap->sysmsg_fds[0] = p2->p_pid; 119 uap->sysmsg_fds[1] = 0; 120 } 121 return error; 122 } 123 124 /* 125 * Handle rforks. An rfork may (1) operate on the current process without 126 * creating a new, (2) create a new process that shared the current process's 127 * vmspace, signals, and/or descriptors, or (3) create a new process that does 128 * not share these things (normal fork). 129 * 130 * Note that we only call start_forked_proc() if a new process is actually 131 * created. 132 * 133 * rfork { int flags } 134 */ 135 int 136 sys_rfork(struct rfork_args *uap) 137 { 138 struct lwp *lp = curthread->td_lwp; 139 struct proc *p2; 140 int error; 141 142 if ((uap->flags & RFKERNELONLY) != 0) 143 return (EINVAL); 144 145 error = fork1(lp, uap->flags | RFPGLOCK, &p2); 146 if (error == 0) { 147 if (p2) 148 start_forked_proc(lp, p2); 149 uap->sysmsg_fds[0] = p2 ? p2->p_pid : 0; 150 uap->sysmsg_fds[1] = 0; 151 } 152 return error; 153 } 154 155 int 156 sys_lwp_create(struct lwp_create_args *uap) 157 { 158 struct proc *p = curproc; 159 struct lwp *lp; 160 struct lwp_params params; 161 int error; 162 163 error = copyin(uap->params, ¶ms, sizeof(params)); 164 if (error) 165 goto fail2; 166 167 lp = lwp_fork(curthread->td_lwp, p, RFPROC); 168 error = cpu_prepare_lwp(lp, ¶ms); 169 if (params.tid1 != NULL && 170 (error = copyout(&lp->lwp_tid, params.tid1, sizeof(lp->lwp_tid)))) 171 goto fail; 172 if (params.tid2 != NULL && 173 (error = copyout(&lp->lwp_tid, params.tid2, sizeof(lp->lwp_tid)))) 174 goto fail; 175 176 /* 177 * Now schedule the new lwp. 178 */ 179 p->p_usched->resetpriority(lp); 180 crit_enter(); 181 lp->lwp_stat = LSRUN; 182 p->p_usched->setrunqueue(lp); 183 crit_exit(); 184 185 return (0); 186 187 fail: 188 --p->p_nthreads; 189 LIST_REMOVE(lp, lwp_list); 190 /* lwp_dispose expects a exited lwp */ 191 lp->lwp_thread->td_flags = TDF_EXITING; 192 lwp_dispose(lp); 193 fail2: 194 return (error); 195 } 196 197 int nprocs = 1; /* process 0 */ 198 199 int 200 fork1(struct lwp *lp1, int flags, struct proc **procp) 201 { 202 struct proc *p1 = lp1->lwp_proc; 203 struct proc *p2, *pptr; 204 struct pgrp *pgrp; 205 uid_t uid; 206 int ok, error; 207 static int curfail = 0; 208 static struct timeval lastfail; 209 struct forklist *ep; 210 struct filedesc_to_leader *fdtol; 211 212 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG)) 213 return (EINVAL); 214 215 /* 216 * Here we don't create a new process, but we divorce 217 * certain parts of a process from itself. 218 */ 219 if ((flags & RFPROC) == 0) { 220 221 /* 222 * This kind of stunt does not work anymore if 223 * there are native threads (lwps) running 224 */ 225 if (p1->p_nthreads != 1) 226 return (EINVAL); 227 228 vm_fork(p1, 0, flags); 229 230 /* 231 * Close all file descriptors. 232 */ 233 if (flags & RFCFDG) { 234 struct filedesc *fdtmp; 235 fdtmp = fdinit(p1); 236 fdfree(p1); 237 p1->p_fd = fdtmp; 238 } 239 240 /* 241 * Unshare file descriptors (from parent.) 242 */ 243 if (flags & RFFDG) { 244 if (p1->p_fd->fd_refcnt > 1) { 245 struct filedesc *newfd; 246 newfd = fdcopy(p1); 247 fdfree(p1); 248 p1->p_fd = newfd; 249 } 250 } 251 *procp = NULL; 252 return (0); 253 } 254 255 /* 256 * Interlock against process group signal delivery. If signals 257 * are pending after the interlock is obtained we have to restart 258 * the system call to process the signals. If we don't the child 259 * can miss a pgsignal (such as ^C) sent during the fork. 260 * 261 * We can't use CURSIG() here because it will process any STOPs 262 * and cause the process group lock to be held indefinitely. If 263 * a STOP occurs, the fork will be restarted after the CONT. 264 */ 265 error = 0; 266 pgrp = NULL; 267 if ((flags & RFPGLOCK) && (pgrp = p1->p_pgrp) != NULL) { 268 lockmgr(&pgrp->pg_lock, LK_SHARED); 269 if (CURSIGNB(lp1)) { 270 error = ERESTART; 271 goto done; 272 } 273 } 274 275 /* 276 * Although process entries are dynamically created, we still keep 277 * a global limit on the maximum number we will create. Don't allow 278 * a nonprivileged user to use the last ten processes; don't let root 279 * exceed the limit. The variable nprocs is the current number of 280 * processes, maxproc is the limit. 281 */ 282 uid = p1->p_ucred->cr_ruid; 283 if ((nprocs >= maxproc - 10 && uid != 0) || nprocs >= maxproc) { 284 if (ppsratecheck(&lastfail, &curfail, 1)) 285 kprintf("maxproc limit exceeded by uid %d, please " 286 "see tuning(7) and login.conf(5).\n", uid); 287 tsleep(&forksleep, 0, "fork", hz / 2); 288 error = EAGAIN; 289 goto done; 290 } 291 /* 292 * Increment the nprocs resource before blocking can occur. There 293 * are hard-limits as to the number of processes that can run. 294 */ 295 nprocs++; 296 297 /* 298 * Increment the count of procs running with this uid. Don't allow 299 * a nonprivileged user to exceed their current limit. 300 */ 301 ok = chgproccnt(p1->p_ucred->cr_ruidinfo, 1, 302 (uid != 0) ? p1->p_rlimit[RLIMIT_NPROC].rlim_cur : 0); 303 if (!ok) { 304 /* 305 * Back out the process count 306 */ 307 nprocs--; 308 if (ppsratecheck(&lastfail, &curfail, 1)) 309 kprintf("maxproc limit exceeded by uid %d, please " 310 "see tuning(7) and login.conf(5).\n", uid); 311 tsleep(&forksleep, 0, "fork", hz / 2); 312 error = EAGAIN; 313 goto done; 314 } 315 316 /* Allocate new proc. */ 317 p2 = zalloc(proc_zone); 318 bzero(p2, sizeof(*p2)); 319 320 /* 321 * Setup linkage for kernel based threading XXX lwp 322 */ 323 if (flags & RFTHREAD) { 324 p2->p_peers = p1->p_peers; 325 p1->p_peers = p2; 326 p2->p_leader = p1->p_leader; 327 } else { 328 p2->p_leader = p2; 329 } 330 331 LIST_INIT(&p2->p_lwps); 332 333 /* 334 * Setting the state to SIDL protects the partially initialized 335 * process once it starts getting hooked into the rest of the system. 336 */ 337 p2->p_stat = SIDL; 338 proc_add_allproc(p2); 339 340 /* 341 * Make a proc table entry for the new process. 342 * The whole structure was zeroed above, so copy the section that is 343 * copied directly from the parent. 344 */ 345 bcopy(&p1->p_startcopy, &p2->p_startcopy, 346 (unsigned) ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy)); 347 348 /* 349 * Duplicate sub-structures as needed. 350 * Increase reference counts on shared objects. 351 */ 352 if (p1->p_flag & P_PROFIL) 353 startprofclock(p2); 354 p2->p_ucred = crhold(p1->p_ucred); 355 356 if (jailed(p2->p_ucred)) 357 p2->p_flag |= P_JAILED; 358 359 if (p2->p_args) 360 p2->p_args->ar_ref++; 361 362 p2->p_usched = p1->p_usched; 363 364 if (flags & RFSIGSHARE) { 365 p2->p_sigacts = p1->p_sigacts; 366 p2->p_sigacts->ps_refcnt++; 367 } else { 368 p2->p_sigacts = (struct sigacts *)kmalloc(sizeof(*p2->p_sigacts), 369 M_SUBPROC, M_WAITOK); 370 bcopy(p1->p_sigacts, p2->p_sigacts, sizeof(*p2->p_sigacts)); 371 p2->p_sigacts->ps_refcnt = 1; 372 } 373 if (flags & RFLINUXTHPN) 374 p2->p_sigparent = SIGUSR1; 375 else 376 p2->p_sigparent = SIGCHLD; 377 378 /* bump references to the text vnode (for procfs) */ 379 p2->p_textvp = p1->p_textvp; 380 if (p2->p_textvp) 381 vref(p2->p_textvp); 382 383 /* 384 * Handle file descriptors 385 */ 386 if (flags & RFCFDG) { 387 p2->p_fd = fdinit(p1); 388 fdtol = NULL; 389 } else if (flags & RFFDG) { 390 p2->p_fd = fdcopy(p1); 391 fdtol = NULL; 392 } else { 393 p2->p_fd = fdshare(p1); 394 if (p1->p_fdtol == NULL) 395 p1->p_fdtol = 396 filedesc_to_leader_alloc(NULL, 397 p1->p_leader); 398 if ((flags & RFTHREAD) != 0) { 399 /* 400 * Shared file descriptor table and 401 * shared process leaders. 402 */ 403 fdtol = p1->p_fdtol; 404 fdtol->fdl_refcount++; 405 } else { 406 /* 407 * Shared file descriptor table, and 408 * different process leaders 409 */ 410 fdtol = filedesc_to_leader_alloc(p1->p_fdtol, p2); 411 } 412 } 413 p2->p_fdtol = fdtol; 414 p2->p_limit = plimit_fork(p1->p_limit); 415 416 /* 417 * Preserve some more flags in subprocess. P_PROFIL has already 418 * been preserved. 419 */ 420 p2->p_flag |= p1->p_flag & P_SUGID; 421 if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT) 422 p2->p_flag |= P_CONTROLT; 423 if (flags & RFPPWAIT) 424 p2->p_flag |= P_PPWAIT; 425 426 /* 427 * Inherit the virtual kernel structure (allows a virtual kernel 428 * to fork to simulate multiple cpus). 429 */ 430 if (p1->p_vkernel) 431 vkernel_inherit(p1, p2); 432 433 /* 434 * Once we are on a pglist we may receive signals. XXX we might 435 * race a ^C being sent to the process group by not receiving it 436 * at all prior to this line. 437 */ 438 LIST_INSERT_AFTER(p1, p2, p_pglist); 439 440 /* 441 * Attach the new process to its parent. 442 * 443 * If RFNOWAIT is set, the newly created process becomes a child 444 * of init. This effectively disassociates the child from the 445 * parent. 446 */ 447 if (flags & RFNOWAIT) 448 pptr = initproc; 449 else 450 pptr = p1; 451 p2->p_pptr = pptr; 452 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling); 453 LIST_INIT(&p2->p_children); 454 varsymset_init(&p2->p_varsymset, &p1->p_varsymset); 455 callout_init(&p2->p_ithandle); 456 457 #ifdef KTRACE 458 /* 459 * Copy traceflag and tracefile if enabled. If not inherited, 460 * these were zeroed above but we still could have a trace race 461 * so make sure p2's p_tracenode is NULL. 462 */ 463 if ((p1->p_traceflag & KTRFAC_INHERIT) && p2->p_tracenode == NULL) { 464 p2->p_traceflag = p1->p_traceflag; 465 p2->p_tracenode = ktrinherit(p1->p_tracenode); 466 } 467 #endif 468 469 /* 470 * This begins the section where we must prevent the parent 471 * from being swapped. 472 * 473 * Gets PRELE'd in the caller in start_forked_proc(). 474 */ 475 PHOLD(p1); 476 477 vm_fork(p1, p2, flags); 478 479 /* 480 * Create the first lwp associated with the new proc. 481 * It will return via a different execution path later, directly 482 * into userland, after it was put on the runq by 483 * start_forked_proc(). 484 */ 485 lwp_fork(lp1, p2, flags); 486 487 if (flags == (RFFDG | RFPROC)) { 488 mycpu->gd_cnt.v_forks++; 489 mycpu->gd_cnt.v_forkpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize; 490 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) { 491 mycpu->gd_cnt.v_vforks++; 492 mycpu->gd_cnt.v_vforkpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize; 493 } else if (p1 == &proc0) { 494 mycpu->gd_cnt.v_kthreads++; 495 mycpu->gd_cnt.v_kthreadpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize; 496 } else { 497 mycpu->gd_cnt.v_rforks++; 498 mycpu->gd_cnt.v_rforkpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize; 499 } 500 501 /* 502 * Both processes are set up, now check if any loadable modules want 503 * to adjust anything. 504 * What if they have an error? XXX 505 */ 506 TAILQ_FOREACH(ep, &fork_list, next) { 507 (*ep->function)(p1, p2, flags); 508 } 509 510 /* 511 * Set the start time. Note that the process is not runnable. The 512 * caller is responsible for making it runnable. 513 */ 514 microtime(&p2->p_start); 515 p2->p_acflag = AFORK; 516 517 /* 518 * tell any interested parties about the new process 519 */ 520 KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid); 521 522 /* 523 * Return child proc pointer to parent. 524 */ 525 *procp = p2; 526 done: 527 if (pgrp) 528 lockmgr(&pgrp->pg_lock, LK_RELEASE); 529 return (error); 530 } 531 532 static struct lwp * 533 lwp_fork(struct lwp *origlp, struct proc *destproc, int flags) 534 { 535 struct lwp *lp; 536 struct thread *td; 537 lwpid_t tid; 538 539 /* 540 * We need to prevent wrap-around collisions. 541 * Until we have a nice tid allocator, we need to 542 * start searching for free tids once we wrap around. 543 * 544 * XXX give me a nicer allocator 545 */ 546 if (destproc->p_lasttid + 1 <= 0) { 547 tid = 0; 548 restart: 549 FOREACH_LWP_IN_PROC(lp, destproc) { 550 if (lp->lwp_tid != tid) 551 continue; 552 /* tids match, search next. */ 553 tid++; 554 /* 555 * Wait -- the whole tid space is depleted? 556 * Impossible. 557 */ 558 if (tid <= 0) 559 panic("lwp_fork: All tids depleted?!"); 560 goto restart; 561 } 562 /* When we come here, the tid is not occupied */ 563 } else { 564 tid = destproc->p_lasttid++; 565 } 566 567 lp = zalloc(lwp_zone); 568 bzero(lp, sizeof(*lp)); 569 lp->lwp_proc = destproc; 570 lp->lwp_tid = tid; 571 LIST_INSERT_HEAD(&destproc->p_lwps, lp, lwp_list); 572 destproc->p_nthreads++; 573 lp->lwp_stat = LSRUN; 574 bcopy(&origlp->lwp_startcopy, &lp->lwp_startcopy, 575 (unsigned) ((caddr_t)&lp->lwp_endcopy - 576 (caddr_t)&lp->lwp_startcopy)); 577 lp->lwp_flag |= origlp->lwp_flag & LWP_ALTSTACK; 578 /* 579 * Set cpbase to the last timeout that occured (not the upcoming 580 * timeout). 581 * 582 * A critical section is required since a timer IPI can update 583 * scheduler specific data. 584 */ 585 crit_enter(); 586 lp->lwp_cpbase = mycpu->gd_schedclock.time - 587 mycpu->gd_schedclock.periodic; 588 destproc->p_usched->heuristic_forking(origlp, lp); 589 crit_exit(); 590 591 td = lwkt_alloc_thread(NULL, LWKT_THREAD_STACK, -1, 0); 592 lp->lwp_thread = td; 593 td->td_proc = destproc; 594 td->td_lwp = lp; 595 td->td_switch = cpu_heavy_switch; 596 #ifdef SMP 597 KKASSERT(td->td_mpcount == 1); 598 #endif 599 lwkt_setpri(td, TDPRI_KERN_USER); 600 lwkt_set_comm(td, "%s", destproc->p_comm); 601 602 /* 603 * cpu_fork will copy and update the pcb, set up the kernel stack, 604 * and make the child ready to run. 605 */ 606 cpu_fork(origlp, lp, flags); 607 caps_fork(origlp->lwp_thread, lp->lwp_thread); 608 609 return (lp); 610 } 611 612 /* 613 * The next two functionms are general routines to handle adding/deleting 614 * items on the fork callout list. 615 * 616 * at_fork(): 617 * Take the arguments given and put them onto the fork callout list, 618 * However first make sure that it's not already there. 619 * Returns 0 on success or a standard error number. 620 */ 621 int 622 at_fork(forklist_fn function) 623 { 624 struct forklist *ep; 625 626 #ifdef INVARIANTS 627 /* let the programmer know if he's been stupid */ 628 if (rm_at_fork(function)) { 629 kprintf("WARNING: fork callout entry (%p) already present\n", 630 function); 631 } 632 #endif 633 ep = kmalloc(sizeof(*ep), M_ATFORK, M_WAITOK|M_ZERO); 634 ep->function = function; 635 TAILQ_INSERT_TAIL(&fork_list, ep, next); 636 return (0); 637 } 638 639 /* 640 * Scan the exit callout list for the given item and remove it.. 641 * Returns the number of items removed (0 or 1) 642 */ 643 int 644 rm_at_fork(forklist_fn function) 645 { 646 struct forklist *ep; 647 648 TAILQ_FOREACH(ep, &fork_list, next) { 649 if (ep->function == function) { 650 TAILQ_REMOVE(&fork_list, ep, next); 651 kfree(ep, M_ATFORK); 652 return(1); 653 } 654 } 655 return (0); 656 } 657 658 /* 659 * Add a forked process to the run queue after any remaining setup, such 660 * as setting the fork handler, has been completed. 661 */ 662 void 663 start_forked_proc(struct lwp *lp1, struct proc *p2) 664 { 665 struct lwp *lp2 = ONLY_LWP_IN_PROC(p2); 666 667 /* 668 * Move from SIDL to RUN queue, and activate the process's thread. 669 * Activation of the thread effectively makes the process "a" 670 * current process, so we do not setrunqueue(). 671 * 672 * YYY setrunqueue works here but we should clean up the trampoline 673 * code so we just schedule the LWKT thread and let the trampoline 674 * deal with the userland scheduler on return to userland. 675 */ 676 KASSERT(p2->p_stat == SIDL, 677 ("cannot start forked process, bad status: %p", p2)); 678 p2->p_usched->resetpriority(lp2); 679 crit_enter(); 680 p2->p_stat = SACTIVE; 681 lp2->lwp_stat = LSRUN; 682 p2->p_usched->setrunqueue(lp2); 683 crit_exit(); 684 685 /* 686 * Now can be swapped. 687 */ 688 PRELE(lp1->lwp_proc); 689 690 /* 691 * Preserve synchronization semantics of vfork. If waiting for 692 * child to exec or exit, set P_PPWAIT on child, and sleep on our 693 * proc (in case of exit). 694 */ 695 while (p2->p_flag & P_PPWAIT) 696 tsleep(lp1->lwp_proc, 0, "ppwait", 0); 697 } 698