1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94 35 * $FreeBSD: src/sys/kern/kern_fork.c,v 1.72.2.14 2003/06/26 04:15:10 silby Exp $ 36 */ 37 38 #include "opt_ktrace.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/sysproto.h> 43 #include <sys/filedesc.h> 44 #include <sys/kernel.h> 45 #include <sys/sysctl.h> 46 #include <sys/malloc.h> 47 #include <sys/proc.h> 48 #include <sys/resourcevar.h> 49 #include <sys/vnode.h> 50 #include <sys/acct.h> 51 #include <sys/ktrace.h> 52 #include <sys/unistd.h> 53 #include <sys/jail.h> 54 55 #include <vm/vm.h> 56 #include <sys/lock.h> 57 #include <vm/pmap.h> 58 #include <vm/vm_map.h> 59 #include <vm/vm_extern.h> 60 61 #include <sys/vmmeter.h> 62 #include <sys/refcount.h> 63 #include <sys/thread2.h> 64 #include <sys/signal2.h> 65 #include <sys/spinlock2.h> 66 67 #include <sys/dsched.h> 68 69 static MALLOC_DEFINE(M_ATFORK, "atfork", "atfork callback"); 70 static MALLOC_DEFINE(M_REAPER, "reaper", "process reapers"); 71 72 /* 73 * These are the stuctures used to create a callout list for things to do 74 * when forking a process 75 */ 76 struct forklist { 77 forklist_fn function; 78 TAILQ_ENTRY(forklist) next; 79 }; 80 81 TAILQ_HEAD(forklist_head, forklist); 82 static struct forklist_head fork_list = TAILQ_HEAD_INITIALIZER(fork_list); 83 84 static struct lwp *lwp_fork(struct lwp *, struct proc *, int flags); 85 86 int forksleep; /* Place for fork1() to sleep on. */ 87 88 /* 89 * Red-Black tree support for LWPs 90 */ 91 92 static int 93 rb_lwp_compare(struct lwp *lp1, struct lwp *lp2) 94 { 95 if (lp1->lwp_tid < lp2->lwp_tid) 96 return(-1); 97 if (lp1->lwp_tid > lp2->lwp_tid) 98 return(1); 99 return(0); 100 } 101 102 RB_GENERATE2(lwp_rb_tree, lwp, u.lwp_rbnode, rb_lwp_compare, lwpid_t, lwp_tid); 103 104 /* 105 * fork() system call 106 */ 107 int 108 sys_fork(struct fork_args *uap) 109 { 110 struct lwp *lp = curthread->td_lwp; 111 struct proc *p2; 112 int error; 113 114 error = fork1(lp, RFFDG | RFPROC | RFPGLOCK, &p2); 115 if (error == 0) { 116 PHOLD(p2); 117 start_forked_proc(lp, p2); 118 uap->sysmsg_fds[0] = p2->p_pid; 119 uap->sysmsg_fds[1] = 0; 120 PRELE(p2); 121 } 122 return error; 123 } 124 125 /* 126 * vfork() system call 127 */ 128 int 129 sys_vfork(struct vfork_args *uap) 130 { 131 struct lwp *lp = curthread->td_lwp; 132 struct proc *p2; 133 int error; 134 135 error = fork1(lp, RFFDG | RFPROC | RFPPWAIT | RFMEM | RFPGLOCK, &p2); 136 if (error == 0) { 137 PHOLD(p2); 138 start_forked_proc(lp, p2); 139 uap->sysmsg_fds[0] = p2->p_pid; 140 uap->sysmsg_fds[1] = 0; 141 PRELE(p2); 142 } 143 return error; 144 } 145 146 /* 147 * Handle rforks. An rfork may (1) operate on the current process without 148 * creating a new, (2) create a new process that shared the current process's 149 * vmspace, signals, and/or descriptors, or (3) create a new process that does 150 * not share these things (normal fork). 151 * 152 * Note that we only call start_forked_proc() if a new process is actually 153 * created. 154 * 155 * rfork { int flags } 156 */ 157 int 158 sys_rfork(struct rfork_args *uap) 159 { 160 struct lwp *lp = curthread->td_lwp; 161 struct proc *p2; 162 int error; 163 164 if ((uap->flags & RFKERNELONLY) != 0) 165 return (EINVAL); 166 167 error = fork1(lp, uap->flags | RFPGLOCK, &p2); 168 if (error == 0) { 169 if (p2) { 170 PHOLD(p2); 171 start_forked_proc(lp, p2); 172 uap->sysmsg_fds[0] = p2->p_pid; 173 uap->sysmsg_fds[1] = 0; 174 PRELE(p2); 175 } else { 176 uap->sysmsg_fds[0] = 0; 177 uap->sysmsg_fds[1] = 0; 178 } 179 } 180 return error; 181 } 182 183 /* 184 * Low level thread create used by pthreads. 185 */ 186 int 187 sys_lwp_create(struct lwp_create_args *uap) 188 { 189 struct proc *p = curproc; 190 struct lwp *lp; 191 struct lwp_params params; 192 int error; 193 194 error = copyin(uap->params, ¶ms, sizeof(params)); 195 if (error) 196 goto fail2; 197 198 lwkt_gettoken(&p->p_token); 199 plimit_lwp_fork(p); /* force exclusive access */ 200 lp = lwp_fork(curthread->td_lwp, p, RFPROC | RFMEM); 201 error = cpu_prepare_lwp(lp, ¶ms); 202 if (error) 203 goto fail; 204 if (params.lwp_tid1 != NULL && 205 (error = copyout(&lp->lwp_tid, params.lwp_tid1, sizeof(lp->lwp_tid)))) 206 goto fail; 207 if (params.lwp_tid2 != NULL && 208 (error = copyout(&lp->lwp_tid, params.lwp_tid2, sizeof(lp->lwp_tid)))) 209 goto fail; 210 211 /* 212 * Now schedule the new lwp. 213 */ 214 p->p_usched->resetpriority(lp); 215 crit_enter(); 216 lp->lwp_stat = LSRUN; 217 p->p_usched->setrunqueue(lp); 218 crit_exit(); 219 lwkt_reltoken(&p->p_token); 220 221 return (0); 222 223 fail: 224 /* 225 * Make sure no one is using this lwp, before it is removed from 226 * the tree. If we didn't wait it here, lwp tree iteration with 227 * blocking operation would be broken. 228 */ 229 while (lp->lwp_lock > 0) 230 tsleep(lp, 0, "lwpfail", 1); 231 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp); 232 --p->p_nthreads; 233 /* lwp_dispose expects an exited lwp, and a held proc */ 234 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT); 235 lp->lwp_thread->td_flags |= TDF_EXITING; 236 lwkt_remove_tdallq(lp->lwp_thread); 237 PHOLD(p); 238 biosched_done(lp->lwp_thread); 239 dsched_exit_thread(lp->lwp_thread); 240 lwp_dispose(lp); 241 lwkt_reltoken(&p->p_token); 242 fail2: 243 return (error); 244 } 245 246 int nprocs = 1; /* process 0 */ 247 248 int 249 fork1(struct lwp *lp1, int flags, struct proc **procp) 250 { 251 struct proc *p1 = lp1->lwp_proc; 252 struct proc *p2; 253 struct proc *pptr; 254 struct pgrp *p1grp; 255 struct pgrp *plkgrp; 256 struct sysreaper *reap; 257 uid_t uid; 258 int ok, error; 259 static int curfail = 0; 260 static struct timeval lastfail; 261 struct forklist *ep; 262 struct filedesc_to_leader *fdtol; 263 264 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG)) 265 return (EINVAL); 266 267 lwkt_gettoken(&p1->p_token); 268 plkgrp = NULL; 269 p2 = NULL; 270 271 /* 272 * Here we don't create a new process, but we divorce 273 * certain parts of a process from itself. 274 */ 275 if ((flags & RFPROC) == 0) { 276 /* 277 * This kind of stunt does not work anymore if 278 * there are native threads (lwps) running 279 */ 280 if (p1->p_nthreads != 1) { 281 error = EINVAL; 282 goto done; 283 } 284 285 vm_fork(p1, 0, flags); 286 287 /* 288 * Close all file descriptors. 289 */ 290 if (flags & RFCFDG) { 291 struct filedesc *fdtmp; 292 fdtmp = fdinit(p1); 293 fdfree(p1, fdtmp); 294 } 295 296 /* 297 * Unshare file descriptors (from parent.) 298 */ 299 if (flags & RFFDG) { 300 if (p1->p_fd->fd_refcnt > 1) { 301 struct filedesc *newfd; 302 error = fdcopy(p1, &newfd); 303 if (error != 0) { 304 error = ENOMEM; 305 goto done; 306 } 307 fdfree(p1, newfd); 308 } 309 } 310 *procp = NULL; 311 error = 0; 312 goto done; 313 } 314 315 /* 316 * Interlock against process group signal delivery. If signals 317 * are pending after the interlock is obtained we have to restart 318 * the system call to process the signals. If we don't the child 319 * can miss a pgsignal (such as ^C) sent during the fork. 320 * 321 * We can't use CURSIG() here because it will process any STOPs 322 * and cause the process group lock to be held indefinitely. If 323 * a STOP occurs, the fork will be restarted after the CONT. 324 */ 325 p1grp = p1->p_pgrp; 326 if ((flags & RFPGLOCK) && (plkgrp = p1->p_pgrp) != NULL) { 327 pgref(plkgrp); 328 lockmgr(&plkgrp->pg_lock, LK_SHARED); 329 if (CURSIG_NOBLOCK(lp1)) { 330 error = ERESTART; 331 goto done; 332 } 333 } 334 335 /* 336 * Although process entries are dynamically created, we still keep 337 * a global limit on the maximum number we will create. Don't allow 338 * a nonprivileged user to use the last ten processes; don't let root 339 * exceed the limit. The variable nprocs is the current number of 340 * processes, maxproc is the limit. 341 */ 342 uid = lp1->lwp_thread->td_ucred->cr_ruid; 343 if ((nprocs >= maxproc - 10 && uid != 0) || nprocs >= maxproc) { 344 if (ppsratecheck(&lastfail, &curfail, 1)) 345 kprintf("maxproc limit exceeded by uid %d, please " 346 "see tuning(7) and login.conf(5).\n", uid); 347 tsleep(&forksleep, 0, "fork", hz / 2); 348 error = EAGAIN; 349 goto done; 350 } 351 352 /* 353 * Increment the nprocs resource before blocking can occur. There 354 * are hard-limits as to the number of processes that can run. 355 */ 356 atomic_add_int(&nprocs, 1); 357 358 /* 359 * Increment the count of procs running with this uid. Don't allow 360 * a nonprivileged user to exceed their current limit. 361 */ 362 ok = chgproccnt(lp1->lwp_thread->td_ucred->cr_ruidinfo, 1, 363 (uid != 0) ? p1->p_rlimit[RLIMIT_NPROC].rlim_cur : 0); 364 if (!ok) { 365 /* 366 * Back out the process count 367 */ 368 atomic_add_int(&nprocs, -1); 369 if (ppsratecheck(&lastfail, &curfail, 1)) 370 kprintf("maxproc limit exceeded by uid %d, please " 371 "see tuning(7) and login.conf(5).\n", uid); 372 tsleep(&forksleep, 0, "fork", hz / 2); 373 error = EAGAIN; 374 goto done; 375 } 376 377 /* 378 * Allocate a new process, don't get fancy: zero the structure. 379 */ 380 p2 = kmalloc(sizeof(struct proc), M_PROC, M_WAITOK|M_ZERO); 381 382 /* 383 * Core initialization. SIDL is a safety state that protects the 384 * partially initialized process once it starts getting hooked 385 * into system structures and becomes addressable. 386 * 387 * We must be sure to acquire p2->p_token as well, we must hold it 388 * once the process is on the allproc list to avoid things such 389 * as competing modifications to p_flags. 390 */ 391 mycpu->gd_forkid += ncpus; 392 p2->p_forkid = mycpu->gd_forkid + mycpu->gd_cpuid; 393 p2->p_lasttid = -1; /* first tid will be 0 */ 394 p2->p_stat = SIDL; 395 396 /* 397 * NOTE: Process 0 will not have a reaper, but process 1 (init) and 398 * all other processes always will. 399 */ 400 if ((reap = p1->p_reaper) != NULL) { 401 reaper_hold(reap); 402 p2->p_reaper = reap; 403 } else { 404 p2->p_reaper = NULL; 405 } 406 407 RB_INIT(&p2->p_lwp_tree); 408 spin_init(&p2->p_spin, "procfork1"); 409 lwkt_token_init(&p2->p_token, "proc"); 410 lwkt_gettoken(&p2->p_token); 411 412 /* 413 * Setup linkage for kernel based threading XXX lwp. Also add the 414 * process to the allproclist. 415 * 416 * The process structure is addressable after this point. 417 */ 418 if (flags & RFTHREAD) { 419 p2->p_peers = p1->p_peers; 420 p1->p_peers = p2; 421 p2->p_leader = p1->p_leader; 422 } else { 423 p2->p_leader = p2; 424 } 425 proc_add_allproc(p2); 426 427 /* 428 * Initialize the section which is copied verbatim from the parent. 429 */ 430 bcopy(&p1->p_startcopy, &p2->p_startcopy, 431 ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy)); 432 433 /* 434 * Duplicate sub-structures as needed. Increase reference counts 435 * on shared objects. 436 * 437 * NOTE: because we are now on the allproc list it is possible for 438 * other consumers to gain temporary references to p2 439 * (p2->p_lock can change). 440 */ 441 if (p1->p_flags & P_PROFIL) 442 startprofclock(p2); 443 p2->p_ucred = crhold(lp1->lwp_thread->td_ucred); 444 445 if (jailed(p2->p_ucred)) 446 p2->p_flags |= P_JAILED; 447 448 if (p2->p_args) 449 refcount_acquire(&p2->p_args->ar_ref); 450 451 p2->p_usched = p1->p_usched; 452 /* XXX: verify copy of the secondary iosched stuff */ 453 dsched_enter_proc(p2); 454 455 if (flags & RFSIGSHARE) { 456 p2->p_sigacts = p1->p_sigacts; 457 refcount_acquire(&p2->p_sigacts->ps_refcnt); 458 } else { 459 p2->p_sigacts = kmalloc(sizeof(*p2->p_sigacts), 460 M_SUBPROC, M_WAITOK); 461 bcopy(p1->p_sigacts, p2->p_sigacts, sizeof(*p2->p_sigacts)); 462 refcount_init(&p2->p_sigacts->ps_refcnt, 1); 463 } 464 if (flags & RFLINUXTHPN) 465 p2->p_sigparent = SIGUSR1; 466 else 467 p2->p_sigparent = SIGCHLD; 468 469 /* bump references to the text vnode (for procfs) */ 470 p2->p_textvp = p1->p_textvp; 471 if (p2->p_textvp) 472 vref(p2->p_textvp); 473 474 /* copy namecache handle to the text file */ 475 if (p1->p_textnch.mount) 476 cache_copy(&p1->p_textnch, &p2->p_textnch); 477 478 /* 479 * Handle file descriptors 480 */ 481 if (flags & RFCFDG) { 482 p2->p_fd = fdinit(p1); 483 fdtol = NULL; 484 } else if (flags & RFFDG) { 485 error = fdcopy(p1, &p2->p_fd); 486 if (error != 0) { 487 error = ENOMEM; 488 goto done; 489 } 490 fdtol = NULL; 491 } else { 492 p2->p_fd = fdshare(p1); 493 if (p1->p_fdtol == NULL) { 494 p1->p_fdtol = filedesc_to_leader_alloc(NULL, 495 p1->p_leader); 496 } 497 if ((flags & RFTHREAD) != 0) { 498 /* 499 * Shared file descriptor table and 500 * shared process leaders. 501 */ 502 fdtol = p1->p_fdtol; 503 fdtol->fdl_refcount++; 504 } else { 505 /* 506 * Shared file descriptor table, and 507 * different process leaders 508 */ 509 fdtol = filedesc_to_leader_alloc(p1->p_fdtol, p2); 510 } 511 } 512 p2->p_fdtol = fdtol; 513 p2->p_limit = plimit_fork(p1); 514 515 /* 516 * Preserve some more flags in subprocess. P_PROFIL has already 517 * been preserved. 518 */ 519 p2->p_flags |= p1->p_flags & P_SUGID; 520 if (p1->p_session->s_ttyvp != NULL && (p1->p_flags & P_CONTROLT)) 521 p2->p_flags |= P_CONTROLT; 522 if (flags & RFPPWAIT) { 523 p2->p_flags |= P_PPWAIT; 524 if (p1->p_upmap) 525 atomic_add_int(&p1->p_upmap->invfork, 1); 526 } 527 528 /* 529 * Inherit the virtual kernel structure (allows a virtual kernel 530 * to fork to simulate multiple cpus). 531 */ 532 if (p1->p_vkernel) 533 vkernel_inherit(p1, p2); 534 535 /* 536 * Once we are on a pglist we may receive signals. XXX we might 537 * race a ^C being sent to the process group by not receiving it 538 * at all prior to this line. 539 */ 540 pgref(p1grp); 541 lwkt_gettoken(&p1grp->pg_token); 542 LIST_INSERT_AFTER(p1, p2, p_pglist); 543 lwkt_reltoken(&p1grp->pg_token); 544 545 /* 546 * Attach the new process to its parent. 547 * 548 * If RFNOWAIT is set, the newly created process becomes a child 549 * of the reaper (typically init). This effectively disassociates 550 * the child from the parent. 551 * 552 * Temporarily hold pptr for the RFNOWAIT case to avoid ripouts. 553 */ 554 if (flags & RFNOWAIT) { 555 pptr = reaper_get(reap); 556 if (pptr == NULL) { 557 pptr = initproc; 558 PHOLD(pptr); 559 } 560 } else { 561 pptr = p1; 562 } 563 p2->p_pptr = pptr; 564 LIST_INIT(&p2->p_children); 565 566 lwkt_gettoken(&pptr->p_token); 567 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling); 568 lwkt_reltoken(&pptr->p_token); 569 570 if (flags & RFNOWAIT) 571 PRELE(pptr); 572 573 varsymset_init(&p2->p_varsymset, &p1->p_varsymset); 574 callout_init_mp(&p2->p_ithandle); 575 576 #ifdef KTRACE 577 /* 578 * Copy traceflag and tracefile if enabled. If not inherited, 579 * these were zeroed above but we still could have a trace race 580 * so make sure p2's p_tracenode is NULL. 581 */ 582 if ((p1->p_traceflag & KTRFAC_INHERIT) && p2->p_tracenode == NULL) { 583 p2->p_traceflag = p1->p_traceflag; 584 p2->p_tracenode = ktrinherit(p1->p_tracenode); 585 } 586 #endif 587 588 /* 589 * This begins the section where we must prevent the parent 590 * from being swapped. 591 * 592 * Gets PRELE'd in the caller in start_forked_proc(). 593 */ 594 PHOLD(p1); 595 596 vm_fork(p1, p2, flags); 597 598 /* 599 * Create the first lwp associated with the new proc. 600 * It will return via a different execution path later, directly 601 * into userland, after it was put on the runq by 602 * start_forked_proc(). 603 */ 604 lwp_fork(lp1, p2, flags); 605 606 if (flags == (RFFDG | RFPROC | RFPGLOCK)) { 607 mycpu->gd_cnt.v_forks++; 608 mycpu->gd_cnt.v_forkpages += p2->p_vmspace->vm_dsize + 609 p2->p_vmspace->vm_ssize; 610 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM | RFPGLOCK)) { 611 mycpu->gd_cnt.v_vforks++; 612 mycpu->gd_cnt.v_vforkpages += p2->p_vmspace->vm_dsize + 613 p2->p_vmspace->vm_ssize; 614 } else if (p1 == &proc0) { 615 mycpu->gd_cnt.v_kthreads++; 616 mycpu->gd_cnt.v_kthreadpages += p2->p_vmspace->vm_dsize + 617 p2->p_vmspace->vm_ssize; 618 } else { 619 mycpu->gd_cnt.v_rforks++; 620 mycpu->gd_cnt.v_rforkpages += p2->p_vmspace->vm_dsize + 621 p2->p_vmspace->vm_ssize; 622 } 623 624 /* 625 * Both processes are set up, now check if any loadable modules want 626 * to adjust anything. 627 * What if they have an error? XXX 628 */ 629 TAILQ_FOREACH(ep, &fork_list, next) { 630 (*ep->function)(p1, p2, flags); 631 } 632 633 /* 634 * Set the start time. Note that the process is not runnable. The 635 * caller is responsible for making it runnable. 636 */ 637 microtime(&p2->p_start); 638 p2->p_acflag = AFORK; 639 640 /* 641 * tell any interested parties about the new process 642 */ 643 KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid); 644 645 /* 646 * Return child proc pointer to parent. 647 */ 648 *procp = p2; 649 error = 0; 650 done: 651 if (p2) 652 lwkt_reltoken(&p2->p_token); 653 lwkt_reltoken(&p1->p_token); 654 if (plkgrp) { 655 lockmgr(&plkgrp->pg_lock, LK_RELEASE); 656 pgrel(plkgrp); 657 } 658 return (error); 659 } 660 661 static struct lwp * 662 lwp_fork(struct lwp *origlp, struct proc *destproc, int flags) 663 { 664 globaldata_t gd = mycpu; 665 struct lwp *lp; 666 struct thread *td; 667 668 lp = kmalloc(sizeof(struct lwp), M_LWP, M_WAITOK|M_ZERO); 669 670 lp->lwp_proc = destproc; 671 lp->lwp_vmspace = destproc->p_vmspace; 672 lp->lwp_stat = LSRUN; 673 bcopy(&origlp->lwp_startcopy, &lp->lwp_startcopy, 674 (unsigned) ((caddr_t)&lp->lwp_endcopy - 675 (caddr_t)&lp->lwp_startcopy)); 676 677 /* 678 * Reset the sigaltstack if memory is shared, otherwise inherit 679 * it. 680 */ 681 if (flags & RFMEM) { 682 lp->lwp_sigstk.ss_flags = SS_DISABLE; 683 lp->lwp_sigstk.ss_size = 0; 684 lp->lwp_sigstk.ss_sp = NULL; 685 lp->lwp_flags &= ~LWP_ALTSTACK; 686 } else { 687 lp->lwp_flags |= origlp->lwp_flags & LWP_ALTSTACK; 688 } 689 690 /* 691 * Set cpbase to the last timeout that occured (not the upcoming 692 * timeout). 693 * 694 * A critical section is required since a timer IPI can update 695 * scheduler specific data. 696 */ 697 crit_enter(); 698 lp->lwp_cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic; 699 destproc->p_usched->heuristic_forking(origlp, lp); 700 crit_exit(); 701 CPUMASK_ANDMASK(lp->lwp_cpumask, usched_mastermask); 702 lwkt_token_init(&lp->lwp_token, "lwp_token"); 703 spin_init(&lp->lwp_spin, "lwptoken"); 704 705 /* 706 * Assign the thread to the current cpu to begin with so we 707 * can manipulate it. 708 */ 709 td = lwkt_alloc_thread(NULL, LWKT_THREAD_STACK, gd->gd_cpuid, 0); 710 lp->lwp_thread = td; 711 td->td_ucred = crhold(destproc->p_ucred); 712 td->td_proc = destproc; 713 td->td_lwp = lp; 714 td->td_switch = cpu_heavy_switch; 715 #ifdef NO_LWKT_SPLIT_USERPRI 716 lwkt_setpri(td, TDPRI_USER_NORM); 717 #else 718 lwkt_setpri(td, TDPRI_KERN_USER); 719 #endif 720 lwkt_set_comm(td, "%s", destproc->p_comm); 721 722 /* 723 * cpu_fork will copy and update the pcb, set up the kernel stack, 724 * and make the child ready to run. 725 */ 726 cpu_fork(origlp, lp, flags); 727 kqueue_init(&lp->lwp_kqueue, destproc->p_fd); 728 729 /* 730 * Assign a TID to the lp. Loop until the insert succeeds (returns 731 * NULL). 732 * 733 * If we are in a vfork assign the same TID as the lwp that did the 734 * vfork(). This way if the user program messes around with 735 * pthread calls inside the vfork(), it will operate like an 736 * extension of the (blocked) parent. Also note that since the 737 * address space is being shared, insofar as pthreads is concerned, 738 * the code running in the vfork() is part of the original process. 739 */ 740 if (flags & RFPPWAIT) { 741 lp->lwp_tid = origlp->lwp_tid - 1; 742 } else { 743 lp->lwp_tid = destproc->p_lasttid; 744 } 745 746 do { 747 if (++lp->lwp_tid < 0) 748 lp->lwp_tid = 1; 749 } while (lwp_rb_tree_RB_INSERT(&destproc->p_lwp_tree, lp) != NULL); 750 751 destproc->p_lasttid = lp->lwp_tid; 752 destproc->p_nthreads++; 753 754 /* 755 * This flag is set and never cleared. It means that the process 756 * was threaded at some point. Used to improve exit performance. 757 */ 758 destproc->p_flags |= P_MAYBETHREADED; 759 760 return (lp); 761 } 762 763 /* 764 * The next two functionms are general routines to handle adding/deleting 765 * items on the fork callout list. 766 * 767 * at_fork(): 768 * Take the arguments given and put them onto the fork callout list, 769 * However first make sure that it's not already there. 770 * Returns 0 on success or a standard error number. 771 */ 772 int 773 at_fork(forklist_fn function) 774 { 775 struct forklist *ep; 776 777 #ifdef INVARIANTS 778 /* let the programmer know if he's been stupid */ 779 if (rm_at_fork(function)) { 780 kprintf("WARNING: fork callout entry (%p) already present\n", 781 function); 782 } 783 #endif 784 ep = kmalloc(sizeof(*ep), M_ATFORK, M_WAITOK|M_ZERO); 785 ep->function = function; 786 TAILQ_INSERT_TAIL(&fork_list, ep, next); 787 return (0); 788 } 789 790 /* 791 * Scan the exit callout list for the given item and remove it.. 792 * Returns the number of items removed (0 or 1) 793 */ 794 int 795 rm_at_fork(forklist_fn function) 796 { 797 struct forklist *ep; 798 799 TAILQ_FOREACH(ep, &fork_list, next) { 800 if (ep->function == function) { 801 TAILQ_REMOVE(&fork_list, ep, next); 802 kfree(ep, M_ATFORK); 803 return(1); 804 } 805 } 806 return (0); 807 } 808 809 /* 810 * Add a forked process to the run queue after any remaining setup, such 811 * as setting the fork handler, has been completed. 812 * 813 * p2 is held by the caller. 814 */ 815 void 816 start_forked_proc(struct lwp *lp1, struct proc *p2) 817 { 818 struct lwp *lp2 = ONLY_LWP_IN_PROC(p2); 819 int pflags; 820 821 /* 822 * Move from SIDL to RUN queue, and activate the process's thread. 823 * Activation of the thread effectively makes the process "a" 824 * current process, so we do not setrunqueue(). 825 * 826 * YYY setrunqueue works here but we should clean up the trampoline 827 * code so we just schedule the LWKT thread and let the trampoline 828 * deal with the userland scheduler on return to userland. 829 */ 830 KASSERT(p2->p_stat == SIDL, 831 ("cannot start forked process, bad status: %p", p2)); 832 p2->p_usched->resetpriority(lp2); 833 crit_enter(); 834 p2->p_stat = SACTIVE; 835 lp2->lwp_stat = LSRUN; 836 p2->p_usched->setrunqueue(lp2); 837 crit_exit(); 838 839 /* 840 * Now can be swapped. 841 */ 842 PRELE(lp1->lwp_proc); 843 844 /* 845 * Preserve synchronization semantics of vfork. P_PPWAIT is set in 846 * the child until it has retired the parent's resources. The parent 847 * must wait for the flag to be cleared by the child. 848 * 849 * Interlock the flag/tsleep with atomic ops to avoid unnecessary 850 * p_token conflicts. 851 * 852 * XXX Is this use of an atomic op on a field that is not normally 853 * manipulated with atomic ops ok? 854 */ 855 while ((pflags = p2->p_flags) & P_PPWAIT) { 856 cpu_ccfence(); 857 tsleep_interlock(lp1->lwp_proc, 0); 858 if (atomic_cmpset_int(&p2->p_flags, pflags, pflags)) 859 tsleep(lp1->lwp_proc, PINTERLOCKED, "ppwait", 0); 860 } 861 } 862 863 /* 864 * procctl (idtype_t idtype, id_t id, int cmd, void *arg) 865 */ 866 int 867 sys_procctl(struct procctl_args *uap) 868 { 869 struct proc *p = curproc; 870 struct proc *p2; 871 struct sysreaper *reap; 872 union reaper_info udata; 873 int error; 874 875 if (uap->idtype != P_PID || uap->id != (id_t)p->p_pid) 876 return EINVAL; 877 878 switch(uap->cmd) { 879 case PROC_REAP_ACQUIRE: 880 lwkt_gettoken(&p->p_token); 881 reap = kmalloc(sizeof(*reap), M_REAPER, M_WAITOK|M_ZERO); 882 if (p->p_reaper == NULL || p->p_reaper->p != p) { 883 reaper_init(p, reap); 884 error = 0; 885 } else { 886 kfree(reap, M_REAPER); 887 error = EALREADY; 888 } 889 lwkt_reltoken(&p->p_token); 890 break; 891 case PROC_REAP_RELEASE: 892 lwkt_gettoken(&p->p_token); 893 release_again: 894 reap = p->p_reaper; 895 KKASSERT(reap != NULL); 896 if (reap->p == p) { 897 reaper_hold(reap); /* in case of thread race */ 898 lockmgr(&reap->lock, LK_EXCLUSIVE); 899 if (reap->p != p) { 900 lockmgr(&reap->lock, LK_RELEASE); 901 reaper_drop(reap); 902 goto release_again; 903 } 904 reap->p = NULL; 905 p->p_reaper = reap->parent; 906 if (p->p_reaper) 907 reaper_hold(p->p_reaper); 908 lockmgr(&reap->lock, LK_RELEASE); 909 reaper_drop(reap); /* our ref */ 910 reaper_drop(reap); /* old p_reaper ref */ 911 error = 0; 912 } else { 913 error = ENOTCONN; 914 } 915 lwkt_reltoken(&p->p_token); 916 break; 917 case PROC_REAP_STATUS: 918 bzero(&udata, sizeof(udata)); 919 lwkt_gettoken_shared(&p->p_token); 920 if ((reap = p->p_reaper) != NULL && reap->p == p) { 921 udata.status.flags = reap->flags; 922 udata.status.refs = reap->refs - 1; /* minus ours */ 923 } 924 p2 = LIST_FIRST(&p->p_children); 925 udata.status.pid_head = p2 ? p2->p_pid : -1; 926 lwkt_reltoken(&p->p_token); 927 928 if (uap->data) { 929 error = copyout(&udata, uap->data, 930 sizeof(udata.status)); 931 } else { 932 error = 0; 933 } 934 break; 935 default: 936 error = EINVAL; 937 break; 938 } 939 return error; 940 } 941 942 /* 943 * Bump ref on reaper, preventing destruction 944 */ 945 void 946 reaper_hold(struct sysreaper *reap) 947 { 948 KKASSERT(reap->refs > 0); 949 refcount_acquire(&reap->refs); 950 } 951 952 /* 953 * Drop ref on reaper, destroy the structure on the 1->0 954 * transition and loop on the parent. 955 */ 956 void 957 reaper_drop(struct sysreaper *next) 958 { 959 struct sysreaper *reap; 960 961 while ((reap = next) != NULL) { 962 if (refcount_release(&reap->refs)) { 963 next = reap->parent; 964 KKASSERT(reap->p == NULL); 965 reap->parent = NULL; 966 kfree(reap, M_REAPER); 967 } else { 968 next = NULL; 969 } 970 } 971 } 972 973 /* 974 * Initialize a static or newly allocated reaper structure 975 */ 976 void 977 reaper_init(struct proc *p, struct sysreaper *reap) 978 { 979 reap->parent = p->p_reaper; 980 reap->p = p; 981 if (p == initproc) { 982 reap->flags = REAPER_STAT_OWNED | REAPER_STAT_REALINIT; 983 reap->refs = 2; 984 } else { 985 reap->flags = REAPER_STAT_OWNED; 986 reap->refs = 1; 987 } 988 lockinit(&reap->lock, "subrp", 0, 0); 989 cpu_sfence(); 990 p->p_reaper = reap; 991 } 992 993 /* 994 * Called with p->p_token held during exit. 995 * 996 * This is a bit simpler than RELEASE because there are no threads remaining 997 * to race. We only release if we own the reaper, the exit code will handle 998 * the final p_reaper release. 999 */ 1000 struct sysreaper * 1001 reaper_exit(struct proc *p) 1002 { 1003 struct sysreaper *reap; 1004 1005 /* 1006 * Release acquired reaper 1007 */ 1008 if ((reap = p->p_reaper) != NULL && reap->p == p) { 1009 lockmgr(&reap->lock, LK_EXCLUSIVE); 1010 p->p_reaper = reap->parent; 1011 if (p->p_reaper) 1012 reaper_hold(p->p_reaper); 1013 reap->p = NULL; 1014 lockmgr(&reap->lock, LK_RELEASE); 1015 reaper_drop(reap); 1016 } 1017 1018 /* 1019 * Return and clear reaper (caller is holding p_token for us) 1020 * (reap->p does not equal p). Caller must drop it. 1021 */ 1022 if ((reap = p->p_reaper) != NULL) { 1023 p->p_reaper = NULL; 1024 } 1025 return reap; 1026 } 1027 1028 /* 1029 * Return a held (PHOLD) process representing the reaper for process (p). 1030 * NULL should not normally be returned. Caller should PRELE() the returned 1031 * reaper process when finished. 1032 * 1033 * Remove dead internal nodes while we are at it. 1034 * 1035 * Process (p)'s token must be held on call. 1036 * The returned process's token is NOT acquired by this routine. 1037 */ 1038 struct proc * 1039 reaper_get(struct sysreaper *reap) 1040 { 1041 struct sysreaper *next; 1042 struct proc *reproc; 1043 1044 if (reap == NULL) 1045 return NULL; 1046 1047 /* 1048 * Extra hold for loop 1049 */ 1050 reaper_hold(reap); 1051 1052 while (reap) { 1053 lockmgr(&reap->lock, LK_SHARED); 1054 if (reap->p) { 1055 /* 1056 * Probable reaper 1057 */ 1058 if (reap->p) { 1059 reproc = reap->p; 1060 PHOLD(reproc); 1061 lockmgr(&reap->lock, LK_RELEASE); 1062 reaper_drop(reap); 1063 return reproc; 1064 } 1065 1066 /* 1067 * Raced, try again 1068 */ 1069 lockmgr(&reap->lock, LK_RELEASE); 1070 continue; 1071 } 1072 1073 /* 1074 * Traverse upwards in the reaper topology, destroy 1075 * dead internal nodes when possible. 1076 * 1077 * NOTE: Our ref on next means that a dead node should 1078 * have 2 (ours and reap->parent's). 1079 */ 1080 next = reap->parent; 1081 while (next) { 1082 reaper_hold(next); 1083 if (next->refs == 2 && next->p == NULL) { 1084 lockmgr(&reap->lock, LK_RELEASE); 1085 lockmgr(&reap->lock, LK_EXCLUSIVE); 1086 if (next->refs == 2 && 1087 reap->parent == next && 1088 next->p == NULL) { 1089 /* 1090 * reap->parent inherits ref from next. 1091 */ 1092 reap->parent = next->parent; 1093 next->parent = NULL; 1094 reaper_drop(next); /* ours */ 1095 reaper_drop(next); /* old parent */ 1096 next = reap->parent; 1097 continue; /* possible chain */ 1098 } 1099 } 1100 break; 1101 } 1102 lockmgr(&reap->lock, LK_RELEASE); 1103 reaper_drop(reap); 1104 reap = next; 1105 } 1106 return NULL; 1107 } 1108