1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94 39 * $FreeBSD: src/sys/kern/kern_fork.c,v 1.72.2.14 2003/06/26 04:15:10 silby Exp $ 40 */ 41 42 #include "opt_ktrace.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/sysproto.h> 47 #include <sys/filedesc.h> 48 #include <sys/kernel.h> 49 #include <sys/sysctl.h> 50 #include <sys/malloc.h> 51 #include <sys/proc.h> 52 #include <sys/resourcevar.h> 53 #include <sys/vnode.h> 54 #include <sys/acct.h> 55 #include <sys/ktrace.h> 56 #include <sys/unistd.h> 57 #include <sys/jail.h> 58 59 #include <vm/vm.h> 60 #include <sys/lock.h> 61 #include <vm/pmap.h> 62 #include <vm/vm_map.h> 63 #include <vm/vm_extern.h> 64 65 #include <sys/vmmeter.h> 66 #include <sys/refcount.h> 67 #include <sys/thread2.h> 68 #include <sys/signal2.h> 69 #include <sys/spinlock2.h> 70 71 #include <sys/dsched.h> 72 73 static MALLOC_DEFINE(M_ATFORK, "atfork", "atfork callback"); 74 75 /* 76 * These are the stuctures used to create a callout list for things to do 77 * when forking a process 78 */ 79 struct forklist { 80 forklist_fn function; 81 TAILQ_ENTRY(forklist) next; 82 }; 83 84 TAILQ_HEAD(forklist_head, forklist); 85 static struct forklist_head fork_list = TAILQ_HEAD_INITIALIZER(fork_list); 86 87 static struct lwp *lwp_fork(struct lwp *, struct proc *, int flags); 88 89 int forksleep; /* Place for fork1() to sleep on. */ 90 91 /* 92 * Red-Black tree support for LWPs 93 */ 94 95 static int 96 rb_lwp_compare(struct lwp *lp1, struct lwp *lp2) 97 { 98 if (lp1->lwp_tid < lp2->lwp_tid) 99 return(-1); 100 if (lp1->lwp_tid > lp2->lwp_tid) 101 return(1); 102 return(0); 103 } 104 105 RB_GENERATE2(lwp_rb_tree, lwp, u.lwp_rbnode, rb_lwp_compare, lwpid_t, lwp_tid); 106 107 /* 108 * Fork system call 109 * 110 * MPALMOSTSAFE 111 */ 112 int 113 sys_fork(struct fork_args *uap) 114 { 115 struct lwp *lp = curthread->td_lwp; 116 struct proc *p2; 117 int error; 118 119 error = fork1(lp, RFFDG | RFPROC | RFPGLOCK, &p2); 120 if (error == 0) { 121 PHOLD(p2); 122 start_forked_proc(lp, p2); 123 uap->sysmsg_fds[0] = p2->p_pid; 124 uap->sysmsg_fds[1] = 0; 125 PRELE(p2); 126 } 127 return error; 128 } 129 130 /* 131 * MPALMOSTSAFE 132 */ 133 int 134 sys_vfork(struct vfork_args *uap) 135 { 136 struct lwp *lp = curthread->td_lwp; 137 struct proc *p2; 138 int error; 139 140 error = fork1(lp, RFFDG | RFPROC | RFPPWAIT | RFMEM | RFPGLOCK, &p2); 141 if (error == 0) { 142 PHOLD(p2); 143 start_forked_proc(lp, p2); 144 uap->sysmsg_fds[0] = p2->p_pid; 145 uap->sysmsg_fds[1] = 0; 146 PRELE(p2); 147 } 148 return error; 149 } 150 151 /* 152 * Handle rforks. An rfork may (1) operate on the current process without 153 * creating a new, (2) create a new process that shared the current process's 154 * vmspace, signals, and/or descriptors, or (3) create a new process that does 155 * not share these things (normal fork). 156 * 157 * Note that we only call start_forked_proc() if a new process is actually 158 * created. 159 * 160 * rfork { int flags } 161 * 162 * MPALMOSTSAFE 163 */ 164 int 165 sys_rfork(struct rfork_args *uap) 166 { 167 struct lwp *lp = curthread->td_lwp; 168 struct proc *p2; 169 int error; 170 171 if ((uap->flags & RFKERNELONLY) != 0) 172 return (EINVAL); 173 174 error = fork1(lp, uap->flags | RFPGLOCK, &p2); 175 if (error == 0) { 176 if (p2) { 177 PHOLD(p2); 178 start_forked_proc(lp, p2); 179 uap->sysmsg_fds[0] = p2->p_pid; 180 uap->sysmsg_fds[1] = 0; 181 PRELE(p2); 182 } else { 183 uap->sysmsg_fds[0] = 0; 184 uap->sysmsg_fds[1] = 0; 185 } 186 } 187 return error; 188 } 189 190 /* 191 * MPALMOSTSAFE 192 */ 193 int 194 sys_lwp_create(struct lwp_create_args *uap) 195 { 196 struct proc *p = curproc; 197 struct lwp *lp; 198 struct lwp_params params; 199 int error; 200 201 error = copyin(uap->params, ¶ms, sizeof(params)); 202 if (error) 203 goto fail2; 204 205 lwkt_gettoken(&p->p_token); 206 plimit_lwp_fork(p); /* force exclusive access */ 207 lp = lwp_fork(curthread->td_lwp, p, RFPROC); 208 error = cpu_prepare_lwp(lp, ¶ms); 209 if (error) 210 goto fail; 211 if (params.tid1 != NULL && 212 (error = copyout(&lp->lwp_tid, params.tid1, sizeof(lp->lwp_tid)))) 213 goto fail; 214 if (params.tid2 != NULL && 215 (error = copyout(&lp->lwp_tid, params.tid2, sizeof(lp->lwp_tid)))) 216 goto fail; 217 218 /* 219 * Now schedule the new lwp. 220 */ 221 p->p_usched->resetpriority(lp); 222 crit_enter(); 223 lp->lwp_stat = LSRUN; 224 p->p_usched->setrunqueue(lp); 225 crit_exit(); 226 lwkt_reltoken(&p->p_token); 227 228 return (0); 229 230 fail: 231 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp); 232 --p->p_nthreads; 233 /* lwp_dispose expects an exited lwp, and a held proc */ 234 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT); 235 lp->lwp_thread->td_flags |= TDF_EXITING; 236 lwkt_remove_tdallq(lp->lwp_thread); 237 PHOLD(p); 238 biosched_done(lp->lwp_thread); 239 dsched_exit_thread(lp->lwp_thread); 240 lwp_dispose(lp); 241 lwkt_reltoken(&p->p_token); 242 fail2: 243 return (error); 244 } 245 246 int nprocs = 1; /* process 0 */ 247 248 int 249 fork1(struct lwp *lp1, int flags, struct proc **procp) 250 { 251 struct proc *p1 = lp1->lwp_proc; 252 struct proc *p2; 253 struct proc *pptr; 254 struct pgrp *p1grp; 255 struct pgrp *plkgrp; 256 uid_t uid; 257 int ok, error; 258 static int curfail = 0; 259 static struct timeval lastfail; 260 struct forklist *ep; 261 struct filedesc_to_leader *fdtol; 262 263 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG)) 264 return (EINVAL); 265 266 lwkt_gettoken(&p1->p_token); 267 plkgrp = NULL; 268 p2 = NULL; 269 270 /* 271 * Here we don't create a new process, but we divorce 272 * certain parts of a process from itself. 273 */ 274 if ((flags & RFPROC) == 0) { 275 /* 276 * This kind of stunt does not work anymore if 277 * there are native threads (lwps) running 278 */ 279 if (p1->p_nthreads != 1) { 280 error = EINVAL; 281 goto done; 282 } 283 284 vm_fork(p1, 0, flags); 285 286 /* 287 * Close all file descriptors. 288 */ 289 if (flags & RFCFDG) { 290 struct filedesc *fdtmp; 291 fdtmp = fdinit(p1); 292 fdfree(p1, fdtmp); 293 } 294 295 /* 296 * Unshare file descriptors (from parent.) 297 */ 298 if (flags & RFFDG) { 299 if (p1->p_fd->fd_refcnt > 1) { 300 struct filedesc *newfd; 301 error = fdcopy(p1, &newfd); 302 if (error != 0) { 303 error = ENOMEM; 304 goto done; 305 } 306 fdfree(p1, newfd); 307 } 308 } 309 *procp = NULL; 310 error = 0; 311 goto done; 312 } 313 314 /* 315 * Interlock against process group signal delivery. If signals 316 * are pending after the interlock is obtained we have to restart 317 * the system call to process the signals. If we don't the child 318 * can miss a pgsignal (such as ^C) sent during the fork. 319 * 320 * We can't use CURSIG() here because it will process any STOPs 321 * and cause the process group lock to be held indefinitely. If 322 * a STOP occurs, the fork will be restarted after the CONT. 323 */ 324 p1grp = p1->p_pgrp; 325 if ((flags & RFPGLOCK) && (plkgrp = p1->p_pgrp) != NULL) { 326 pgref(plkgrp); 327 lockmgr(&plkgrp->pg_lock, LK_SHARED); 328 if (CURSIG_NOBLOCK(lp1)) { 329 error = ERESTART; 330 goto done; 331 } 332 } 333 334 /* 335 * Although process entries are dynamically created, we still keep 336 * a global limit on the maximum number we will create. Don't allow 337 * a nonprivileged user to use the last ten processes; don't let root 338 * exceed the limit. The variable nprocs is the current number of 339 * processes, maxproc is the limit. 340 */ 341 uid = lp1->lwp_thread->td_ucred->cr_ruid; 342 if ((nprocs >= maxproc - 10 && uid != 0) || nprocs >= maxproc) { 343 if (ppsratecheck(&lastfail, &curfail, 1)) 344 kprintf("maxproc limit exceeded by uid %d, please " 345 "see tuning(7) and login.conf(5).\n", uid); 346 tsleep(&forksleep, 0, "fork", hz / 2); 347 error = EAGAIN; 348 goto done; 349 } 350 351 /* 352 * Increment the nprocs resource before blocking can occur. There 353 * are hard-limits as to the number of processes that can run. 354 */ 355 atomic_add_int(&nprocs, 1); 356 357 /* 358 * Increment the count of procs running with this uid. Don't allow 359 * a nonprivileged user to exceed their current limit. 360 */ 361 ok = chgproccnt(lp1->lwp_thread->td_ucred->cr_ruidinfo, 1, 362 (uid != 0) ? p1->p_rlimit[RLIMIT_NPROC].rlim_cur : 0); 363 if (!ok) { 364 /* 365 * Back out the process count 366 */ 367 atomic_add_int(&nprocs, -1); 368 if (ppsratecheck(&lastfail, &curfail, 1)) 369 kprintf("maxproc limit exceeded by uid %d, please " 370 "see tuning(7) and login.conf(5).\n", uid); 371 tsleep(&forksleep, 0, "fork", hz / 2); 372 error = EAGAIN; 373 goto done; 374 } 375 376 /* 377 * Allocate a new process, don't get fancy: zero the structure. 378 */ 379 p2 = kmalloc(sizeof(struct proc), M_PROC, M_WAITOK|M_ZERO); 380 381 /* 382 * Core initialization. SIDL is a safety state that protects the 383 * partially initialized process once it starts getting hooked 384 * into system structures and becomes addressable. 385 * 386 * We must be sure to acquire p2->p_token as well, we must hold it 387 * once the process is on the allproc list to avoid things such 388 * as competing modifications to p_flags. 389 */ 390 p2->p_lasttid = -1; /* first tid will be 0 */ 391 p2->p_stat = SIDL; 392 393 RB_INIT(&p2->p_lwp_tree); 394 spin_init(&p2->p_spin); 395 lwkt_token_init(&p2->p_token, "proc"); 396 lwkt_gettoken(&p2->p_token); 397 398 /* 399 * Setup linkage for kernel based threading XXX lwp. Also add the 400 * process to the allproclist. 401 * 402 * The process structure is addressable after this point. 403 */ 404 if (flags & RFTHREAD) { 405 p2->p_peers = p1->p_peers; 406 p1->p_peers = p2; 407 p2->p_leader = p1->p_leader; 408 } else { 409 p2->p_leader = p2; 410 } 411 proc_add_allproc(p2); 412 413 /* 414 * Initialize the section which is copied verbatim from the parent. 415 */ 416 bcopy(&p1->p_startcopy, &p2->p_startcopy, 417 ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy)); 418 419 /* 420 * Duplicate sub-structures as needed. Increase reference counts 421 * on shared objects. 422 * 423 * NOTE: because we are now on the allproc list it is possible for 424 * other consumers to gain temporary references to p2 425 * (p2->p_lock can change). 426 */ 427 if (p1->p_flags & P_PROFIL) 428 startprofclock(p2); 429 p2->p_ucred = crhold(lp1->lwp_thread->td_ucred); 430 431 if (jailed(p2->p_ucred)) 432 p2->p_flags |= P_JAILED; 433 434 if (p2->p_args) 435 refcount_acquire(&p2->p_args->ar_ref); 436 437 p2->p_usched = p1->p_usched; 438 /* XXX: verify copy of the secondary iosched stuff */ 439 dsched_new_proc(p2); 440 441 if (flags & RFSIGSHARE) { 442 p2->p_sigacts = p1->p_sigacts; 443 refcount_acquire(&p2->p_sigacts->ps_refcnt); 444 } else { 445 p2->p_sigacts = kmalloc(sizeof(*p2->p_sigacts), 446 M_SUBPROC, M_WAITOK); 447 bcopy(p1->p_sigacts, p2->p_sigacts, sizeof(*p2->p_sigacts)); 448 refcount_init(&p2->p_sigacts->ps_refcnt, 1); 449 } 450 if (flags & RFLINUXTHPN) 451 p2->p_sigparent = SIGUSR1; 452 else 453 p2->p_sigparent = SIGCHLD; 454 455 /* bump references to the text vnode (for procfs) */ 456 p2->p_textvp = p1->p_textvp; 457 if (p2->p_textvp) 458 vref(p2->p_textvp); 459 460 /* copy namecache handle to the text file */ 461 if (p1->p_textnch.mount) 462 cache_copy(&p1->p_textnch, &p2->p_textnch); 463 464 /* 465 * Handle file descriptors 466 */ 467 if (flags & RFCFDG) { 468 p2->p_fd = fdinit(p1); 469 fdtol = NULL; 470 } else if (flags & RFFDG) { 471 error = fdcopy(p1, &p2->p_fd); 472 if (error != 0) { 473 error = ENOMEM; 474 goto done; 475 } 476 fdtol = NULL; 477 } else { 478 p2->p_fd = fdshare(p1); 479 if (p1->p_fdtol == NULL) { 480 p1->p_fdtol = filedesc_to_leader_alloc(NULL, 481 p1->p_leader); 482 } 483 if ((flags & RFTHREAD) != 0) { 484 /* 485 * Shared file descriptor table and 486 * shared process leaders. 487 */ 488 fdtol = p1->p_fdtol; 489 fdtol->fdl_refcount++; 490 } else { 491 /* 492 * Shared file descriptor table, and 493 * different process leaders 494 */ 495 fdtol = filedesc_to_leader_alloc(p1->p_fdtol, p2); 496 } 497 } 498 p2->p_fdtol = fdtol; 499 p2->p_limit = plimit_fork(p1); 500 501 /* 502 * Preserve some more flags in subprocess. P_PROFIL has already 503 * been preserved. 504 */ 505 p2->p_flags |= p1->p_flags & P_SUGID; 506 if (p1->p_session->s_ttyvp != NULL && (p1->p_flags & P_CONTROLT)) 507 p2->p_flags |= P_CONTROLT; 508 if (flags & RFPPWAIT) 509 p2->p_flags |= P_PPWAIT; 510 511 /* 512 * Inherit the virtual kernel structure (allows a virtual kernel 513 * to fork to simulate multiple cpus). 514 */ 515 if (p1->p_vkernel) 516 vkernel_inherit(p1, p2); 517 518 /* 519 * Once we are on a pglist we may receive signals. XXX we might 520 * race a ^C being sent to the process group by not receiving it 521 * at all prior to this line. 522 */ 523 pgref(p1grp); 524 lwkt_gettoken(&p1grp->pg_token); 525 LIST_INSERT_AFTER(p1, p2, p_pglist); 526 lwkt_reltoken(&p1grp->pg_token); 527 528 /* 529 * Attach the new process to its parent. 530 * 531 * If RFNOWAIT is set, the newly created process becomes a child 532 * of init. This effectively disassociates the child from the 533 * parent. 534 */ 535 if (flags & RFNOWAIT) 536 pptr = initproc; 537 else 538 pptr = p1; 539 p2->p_pptr = pptr; 540 LIST_INIT(&p2->p_children); 541 542 lwkt_gettoken(&pptr->p_token); 543 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling); 544 lwkt_reltoken(&pptr->p_token); 545 546 varsymset_init(&p2->p_varsymset, &p1->p_varsymset); 547 callout_init_mp(&p2->p_ithandle); 548 549 #ifdef KTRACE 550 /* 551 * Copy traceflag and tracefile if enabled. If not inherited, 552 * these were zeroed above but we still could have a trace race 553 * so make sure p2's p_tracenode is NULL. 554 */ 555 if ((p1->p_traceflag & KTRFAC_INHERIT) && p2->p_tracenode == NULL) { 556 p2->p_traceflag = p1->p_traceflag; 557 p2->p_tracenode = ktrinherit(p1->p_tracenode); 558 } 559 #endif 560 561 /* 562 * This begins the section where we must prevent the parent 563 * from being swapped. 564 * 565 * Gets PRELE'd in the caller in start_forked_proc(). 566 */ 567 PHOLD(p1); 568 569 vm_fork(p1, p2, flags); 570 571 /* 572 * Create the first lwp associated with the new proc. 573 * It will return via a different execution path later, directly 574 * into userland, after it was put on the runq by 575 * start_forked_proc(). 576 */ 577 lwp_fork(lp1, p2, flags); 578 579 if (flags == (RFFDG | RFPROC | RFPGLOCK)) { 580 mycpu->gd_cnt.v_forks++; 581 mycpu->gd_cnt.v_forkpages += p2->p_vmspace->vm_dsize + 582 p2->p_vmspace->vm_ssize; 583 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM | RFPGLOCK)) { 584 mycpu->gd_cnt.v_vforks++; 585 mycpu->gd_cnt.v_vforkpages += p2->p_vmspace->vm_dsize + 586 p2->p_vmspace->vm_ssize; 587 } else if (p1 == &proc0) { 588 mycpu->gd_cnt.v_kthreads++; 589 mycpu->gd_cnt.v_kthreadpages += p2->p_vmspace->vm_dsize + 590 p2->p_vmspace->vm_ssize; 591 } else { 592 mycpu->gd_cnt.v_rforks++; 593 mycpu->gd_cnt.v_rforkpages += p2->p_vmspace->vm_dsize + 594 p2->p_vmspace->vm_ssize; 595 } 596 597 /* 598 * Both processes are set up, now check if any loadable modules want 599 * to adjust anything. 600 * What if they have an error? XXX 601 */ 602 TAILQ_FOREACH(ep, &fork_list, next) { 603 (*ep->function)(p1, p2, flags); 604 } 605 606 /* 607 * Set the start time. Note that the process is not runnable. The 608 * caller is responsible for making it runnable. 609 */ 610 microtime(&p2->p_start); 611 p2->p_acflag = AFORK; 612 613 /* 614 * tell any interested parties about the new process 615 */ 616 KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid); 617 618 /* 619 * Return child proc pointer to parent. 620 */ 621 *procp = p2; 622 error = 0; 623 done: 624 if (p2) 625 lwkt_reltoken(&p2->p_token); 626 lwkt_reltoken(&p1->p_token); 627 if (plkgrp) { 628 lockmgr(&plkgrp->pg_lock, LK_RELEASE); 629 pgrel(plkgrp); 630 } 631 return (error); 632 } 633 634 static struct lwp * 635 lwp_fork(struct lwp *origlp, struct proc *destproc, int flags) 636 { 637 globaldata_t gd = mycpu; 638 struct lwp *lp; 639 struct thread *td; 640 641 lp = kmalloc(sizeof(struct lwp), M_LWP, M_WAITOK|M_ZERO); 642 643 lp->lwp_proc = destproc; 644 lp->lwp_vmspace = destproc->p_vmspace; 645 lp->lwp_stat = LSRUN; 646 bcopy(&origlp->lwp_startcopy, &lp->lwp_startcopy, 647 (unsigned) ((caddr_t)&lp->lwp_endcopy - 648 (caddr_t)&lp->lwp_startcopy)); 649 lp->lwp_flags |= origlp->lwp_flags & LWP_ALTSTACK; 650 /* 651 * Set cpbase to the last timeout that occured (not the upcoming 652 * timeout). 653 * 654 * A critical section is required since a timer IPI can update 655 * scheduler specific data. 656 */ 657 crit_enter(); 658 lp->lwp_cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic; 659 destproc->p_usched->heuristic_forking(origlp, lp); 660 crit_exit(); 661 lp->lwp_cpumask &= usched_mastermask; 662 lwkt_token_init(&lp->lwp_token, "lwp_token"); 663 spin_init(&lp->lwp_spin); 664 665 /* 666 * Assign the thread to the current cpu to begin with so we 667 * can manipulate it. 668 */ 669 td = lwkt_alloc_thread(NULL, LWKT_THREAD_STACK, gd->gd_cpuid, 0); 670 lp->lwp_thread = td; 671 td->td_proc = destproc; 672 td->td_lwp = lp; 673 td->td_switch = cpu_heavy_switch; 674 #ifdef NO_LWKT_SPLIT_USERPRI 675 lwkt_setpri(td, TDPRI_USER_NORM); 676 #else 677 lwkt_setpri(td, TDPRI_KERN_USER); 678 #endif 679 lwkt_set_comm(td, "%s", destproc->p_comm); 680 681 /* 682 * cpu_fork will copy and update the pcb, set up the kernel stack, 683 * and make the child ready to run. 684 */ 685 cpu_fork(origlp, lp, flags); 686 kqueue_init(&lp->lwp_kqueue, destproc->p_fd); 687 688 /* 689 * Assign a TID to the lp. Loop until the insert succeeds (returns 690 * NULL). 691 */ 692 lp->lwp_tid = destproc->p_lasttid; 693 do { 694 if (++lp->lwp_tid < 0) 695 lp->lwp_tid = 1; 696 } while (lwp_rb_tree_RB_INSERT(&destproc->p_lwp_tree, lp) != NULL); 697 destproc->p_lasttid = lp->lwp_tid; 698 destproc->p_nthreads++; 699 700 return (lp); 701 } 702 703 /* 704 * The next two functionms are general routines to handle adding/deleting 705 * items on the fork callout list. 706 * 707 * at_fork(): 708 * Take the arguments given and put them onto the fork callout list, 709 * However first make sure that it's not already there. 710 * Returns 0 on success or a standard error number. 711 */ 712 int 713 at_fork(forklist_fn function) 714 { 715 struct forklist *ep; 716 717 #ifdef INVARIANTS 718 /* let the programmer know if he's been stupid */ 719 if (rm_at_fork(function)) { 720 kprintf("WARNING: fork callout entry (%p) already present\n", 721 function); 722 } 723 #endif 724 ep = kmalloc(sizeof(*ep), M_ATFORK, M_WAITOK|M_ZERO); 725 ep->function = function; 726 TAILQ_INSERT_TAIL(&fork_list, ep, next); 727 return (0); 728 } 729 730 /* 731 * Scan the exit callout list for the given item and remove it.. 732 * Returns the number of items removed (0 or 1) 733 */ 734 int 735 rm_at_fork(forklist_fn function) 736 { 737 struct forklist *ep; 738 739 TAILQ_FOREACH(ep, &fork_list, next) { 740 if (ep->function == function) { 741 TAILQ_REMOVE(&fork_list, ep, next); 742 kfree(ep, M_ATFORK); 743 return(1); 744 } 745 } 746 return (0); 747 } 748 749 /* 750 * Add a forked process to the run queue after any remaining setup, such 751 * as setting the fork handler, has been completed. 752 * 753 * p2 is held by the caller. 754 */ 755 void 756 start_forked_proc(struct lwp *lp1, struct proc *p2) 757 { 758 struct lwp *lp2 = ONLY_LWP_IN_PROC(p2); 759 760 /* 761 * Move from SIDL to RUN queue, and activate the process's thread. 762 * Activation of the thread effectively makes the process "a" 763 * current process, so we do not setrunqueue(). 764 * 765 * YYY setrunqueue works here but we should clean up the trampoline 766 * code so we just schedule the LWKT thread and let the trampoline 767 * deal with the userland scheduler on return to userland. 768 */ 769 KASSERT(p2->p_stat == SIDL, 770 ("cannot start forked process, bad status: %p", p2)); 771 p2->p_usched->resetpriority(lp2); 772 crit_enter(); 773 p2->p_stat = SACTIVE; 774 lp2->lwp_stat = LSRUN; 775 p2->p_usched->setrunqueue(lp2); 776 crit_exit(); 777 778 /* 779 * Now can be swapped. 780 */ 781 PRELE(lp1->lwp_proc); 782 783 /* 784 * Preserve synchronization semantics of vfork. If waiting for 785 * child to exec or exit, set P_PPWAIT on child, and sleep on our 786 * proc (in case of exec or exit). 787 * 788 * We must hold our p_token to interlock the flag/tsleep 789 */ 790 lwkt_gettoken(&p2->p_token); 791 while (p2->p_flags & P_PPWAIT) 792 tsleep(lp1->lwp_proc, 0, "ppwait", 0); 793 lwkt_reltoken(&p2->p_token); 794 } 795