1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94 35 * $FreeBSD: src/sys/kern/kern_fork.c,v 1.72.2.14 2003/06/26 04:15:10 silby Exp $ 36 */ 37 38 #include "opt_ktrace.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/sysproto.h> 43 #include <sys/filedesc.h> 44 #include <sys/kernel.h> 45 #include <sys/sysctl.h> 46 #include <sys/malloc.h> 47 #include <sys/proc.h> 48 #include <sys/resourcevar.h> 49 #include <sys/vnode.h> 50 #include <sys/acct.h> 51 #include <sys/ktrace.h> 52 #include <sys/unistd.h> 53 #include <sys/jail.h> 54 55 #include <vm/vm.h> 56 #include <sys/lock.h> 57 #include <vm/pmap.h> 58 #include <vm/vm_map.h> 59 #include <vm/vm_extern.h> 60 61 #include <sys/vmmeter.h> 62 #include <sys/refcount.h> 63 #include <sys/thread2.h> 64 #include <sys/signal2.h> 65 #include <sys/spinlock2.h> 66 67 #include <sys/dsched.h> 68 69 static MALLOC_DEFINE(M_ATFORK, "atfork", "atfork callback"); 70 static MALLOC_DEFINE(M_REAPER, "reaper", "process reapers"); 71 72 /* 73 * These are the stuctures used to create a callout list for things to do 74 * when forking a process 75 */ 76 struct forklist { 77 forklist_fn function; 78 TAILQ_ENTRY(forklist) next; 79 }; 80 81 TAILQ_HEAD(forklist_head, forklist); 82 static struct forklist_head fork_list = TAILQ_HEAD_INITIALIZER(fork_list); 83 84 static struct lwp *lwp_fork(struct lwp *, struct proc *, int flags, 85 const cpumask_t *mask); 86 static int lwp_create1(struct lwp_params *params, 87 const cpumask_t *mask); 88 89 int forksleep; /* Place for fork1() to sleep on. */ 90 91 /* 92 * Red-Black tree support for LWPs 93 */ 94 95 static int 96 rb_lwp_compare(struct lwp *lp1, struct lwp *lp2) 97 { 98 if (lp1->lwp_tid < lp2->lwp_tid) 99 return(-1); 100 if (lp1->lwp_tid > lp2->lwp_tid) 101 return(1); 102 return(0); 103 } 104 105 RB_GENERATE2(lwp_rb_tree, lwp, u.lwp_rbnode, rb_lwp_compare, lwpid_t, lwp_tid); 106 107 /* 108 * fork() system call 109 */ 110 int 111 sys_fork(struct fork_args *uap) 112 { 113 struct lwp *lp = curthread->td_lwp; 114 struct proc *p2; 115 int error; 116 117 error = fork1(lp, RFFDG | RFPROC | RFPGLOCK, &p2); 118 if (error == 0) { 119 PHOLD(p2); 120 start_forked_proc(lp, p2); 121 uap->sysmsg_fds[0] = p2->p_pid; 122 uap->sysmsg_fds[1] = 0; 123 PRELE(p2); 124 } 125 return error; 126 } 127 128 /* 129 * vfork() system call 130 */ 131 int 132 sys_vfork(struct vfork_args *uap) 133 { 134 struct lwp *lp = curthread->td_lwp; 135 struct proc *p2; 136 int error; 137 138 error = fork1(lp, RFFDG | RFPROC | RFPPWAIT | RFMEM | RFPGLOCK, &p2); 139 if (error == 0) { 140 PHOLD(p2); 141 start_forked_proc(lp, p2); 142 uap->sysmsg_fds[0] = p2->p_pid; 143 uap->sysmsg_fds[1] = 0; 144 PRELE(p2); 145 } 146 return error; 147 } 148 149 /* 150 * Handle rforks. An rfork may (1) operate on the current process without 151 * creating a new, (2) create a new process that shared the current process's 152 * vmspace, signals, and/or descriptors, or (3) create a new process that does 153 * not share these things (normal fork). 154 * 155 * Note that we only call start_forked_proc() if a new process is actually 156 * created. 157 * 158 * rfork { int flags } 159 */ 160 int 161 sys_rfork(struct rfork_args *uap) 162 { 163 struct lwp *lp = curthread->td_lwp; 164 struct proc *p2; 165 int error; 166 167 if ((uap->flags & RFKERNELONLY) != 0) 168 return (EINVAL); 169 170 error = fork1(lp, uap->flags | RFPGLOCK, &p2); 171 if (error == 0) { 172 if (p2) { 173 PHOLD(p2); 174 start_forked_proc(lp, p2); 175 uap->sysmsg_fds[0] = p2->p_pid; 176 uap->sysmsg_fds[1] = 0; 177 PRELE(p2); 178 } else { 179 uap->sysmsg_fds[0] = 0; 180 uap->sysmsg_fds[1] = 0; 181 } 182 } 183 return error; 184 } 185 186 static int 187 lwp_create1(struct lwp_params *uprm, const cpumask_t *umask) 188 { 189 struct proc *p = curproc; 190 struct lwp *lp; 191 struct lwp_params params; 192 cpumask_t *mask = NULL, mask0; 193 int error; 194 195 error = copyin(uprm, ¶ms, sizeof(params)); 196 if (error) 197 goto fail2; 198 199 if (umask != NULL) { 200 error = copyin(umask, &mask0, sizeof(mask0)); 201 if (error) 202 goto fail2; 203 CPUMASK_ANDMASK(mask0, smp_active_mask); 204 if (CPUMASK_TESTNZERO(mask0)) 205 mask = &mask0; 206 } 207 208 lwkt_gettoken(&p->p_token); 209 plimit_lwp_fork(p); /* force exclusive access */ 210 lp = lwp_fork(curthread->td_lwp, p, RFPROC | RFMEM, mask); 211 error = cpu_prepare_lwp(lp, ¶ms); 212 if (error) 213 goto fail; 214 if (params.lwp_tid1 != NULL && 215 (error = copyout(&lp->lwp_tid, params.lwp_tid1, sizeof(lp->lwp_tid)))) 216 goto fail; 217 if (params.lwp_tid2 != NULL && 218 (error = copyout(&lp->lwp_tid, params.lwp_tid2, sizeof(lp->lwp_tid)))) 219 goto fail; 220 221 /* 222 * Now schedule the new lwp. 223 */ 224 p->p_usched->resetpriority(lp); 225 crit_enter(); 226 lp->lwp_stat = LSRUN; 227 p->p_usched->setrunqueue(lp); 228 crit_exit(); 229 lwkt_reltoken(&p->p_token); 230 231 return (0); 232 233 fail: 234 /* 235 * Make sure no one is using this lwp, before it is removed from 236 * the tree. If we didn't wait it here, lwp tree iteration with 237 * blocking operation would be broken. 238 */ 239 while (lp->lwp_lock > 0) 240 tsleep(lp, 0, "lwpfail", 1); 241 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp); 242 --p->p_nthreads; 243 /* lwp_dispose expects an exited lwp, and a held proc */ 244 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT); 245 lp->lwp_thread->td_flags |= TDF_EXITING; 246 lwkt_remove_tdallq(lp->lwp_thread); 247 PHOLD(p); 248 biosched_done(lp->lwp_thread); 249 dsched_exit_thread(lp->lwp_thread); 250 lwp_dispose(lp); 251 lwkt_reltoken(&p->p_token); 252 fail2: 253 return (error); 254 } 255 256 /* 257 * Low level thread create used by pthreads. 258 */ 259 int 260 sys_lwp_create(struct lwp_create_args *uap) 261 { 262 263 return (lwp_create1(uap->params, NULL)); 264 } 265 266 int 267 sys_lwp_create2(struct lwp_create2_args *uap) 268 { 269 270 return (lwp_create1(uap->params, uap->mask)); 271 } 272 273 int nprocs = 1; /* process 0 */ 274 275 int 276 fork1(struct lwp *lp1, int flags, struct proc **procp) 277 { 278 struct proc *p1 = lp1->lwp_proc; 279 struct proc *p2; 280 struct proc *pptr; 281 struct pgrp *p1grp; 282 struct pgrp *plkgrp; 283 struct sysreaper *reap; 284 uid_t uid; 285 int ok, error; 286 static int curfail = 0; 287 static struct timeval lastfail; 288 struct forklist *ep; 289 struct filedesc_to_leader *fdtol; 290 291 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG)) 292 return (EINVAL); 293 294 lwkt_gettoken(&p1->p_token); 295 plkgrp = NULL; 296 p2 = NULL; 297 298 /* 299 * Here we don't create a new process, but we divorce 300 * certain parts of a process from itself. 301 */ 302 if ((flags & RFPROC) == 0) { 303 /* 304 * This kind of stunt does not work anymore if 305 * there are native threads (lwps) running 306 */ 307 if (p1->p_nthreads != 1) { 308 error = EINVAL; 309 goto done; 310 } 311 312 vm_fork(p1, 0, flags); 313 314 /* 315 * Close all file descriptors. 316 */ 317 if (flags & RFCFDG) { 318 struct filedesc *fdtmp; 319 fdtmp = fdinit(p1); 320 fdfree(p1, fdtmp); 321 } 322 323 /* 324 * Unshare file descriptors (from parent.) 325 */ 326 if (flags & RFFDG) { 327 if (p1->p_fd->fd_refcnt > 1) { 328 struct filedesc *newfd; 329 error = fdcopy(p1, &newfd); 330 if (error != 0) { 331 error = ENOMEM; 332 goto done; 333 } 334 fdfree(p1, newfd); 335 } 336 } 337 *procp = NULL; 338 error = 0; 339 goto done; 340 } 341 342 /* 343 * Interlock against process group signal delivery. If signals 344 * are pending after the interlock is obtained we have to restart 345 * the system call to process the signals. If we don't the child 346 * can miss a pgsignal (such as ^C) sent during the fork. 347 * 348 * We can't use CURSIG() here because it will process any STOPs 349 * and cause the process group lock to be held indefinitely. If 350 * a STOP occurs, the fork will be restarted after the CONT. 351 */ 352 p1grp = p1->p_pgrp; 353 if ((flags & RFPGLOCK) && (plkgrp = p1->p_pgrp) != NULL) { 354 pgref(plkgrp); 355 lockmgr(&plkgrp->pg_lock, LK_SHARED); 356 if (CURSIG_NOBLOCK(lp1)) { 357 error = ERESTART; 358 goto done; 359 } 360 } 361 362 /* 363 * Although process entries are dynamically created, we still keep 364 * a global limit on the maximum number we will create. Don't allow 365 * a nonprivileged user to use the last ten processes; don't let root 366 * exceed the limit. The variable nprocs is the current number of 367 * processes, maxproc is the limit. 368 */ 369 uid = lp1->lwp_thread->td_ucred->cr_ruid; 370 if ((nprocs >= maxproc - 10 && uid != 0) || nprocs >= maxproc) { 371 if (ppsratecheck(&lastfail, &curfail, 1)) 372 kprintf("maxproc limit exceeded by uid %d, please " 373 "see tuning(7) and login.conf(5).\n", uid); 374 tsleep(&forksleep, 0, "fork", hz / 2); 375 error = EAGAIN; 376 goto done; 377 } 378 379 /* 380 * Increment the nprocs resource before blocking can occur. There 381 * are hard-limits as to the number of processes that can run. 382 */ 383 atomic_add_int(&nprocs, 1); 384 385 /* 386 * Increment the count of procs running with this uid. Don't allow 387 * a nonprivileged user to exceed their current limit. 388 */ 389 ok = chgproccnt(lp1->lwp_thread->td_ucred->cr_ruidinfo, 1, 390 (uid != 0) ? p1->p_rlimit[RLIMIT_NPROC].rlim_cur : 0); 391 if (!ok) { 392 /* 393 * Back out the process count 394 */ 395 atomic_add_int(&nprocs, -1); 396 if (ppsratecheck(&lastfail, &curfail, 1)) 397 kprintf("maxproc limit exceeded by uid %d, please " 398 "see tuning(7) and login.conf(5).\n", uid); 399 tsleep(&forksleep, 0, "fork", hz / 2); 400 error = EAGAIN; 401 goto done; 402 } 403 404 /* 405 * Allocate a new process, don't get fancy: zero the structure. 406 */ 407 p2 = kmalloc(sizeof(struct proc), M_PROC, M_WAITOK|M_ZERO); 408 409 /* 410 * Core initialization. SIDL is a safety state that protects the 411 * partially initialized process once it starts getting hooked 412 * into system structures and becomes addressable. 413 * 414 * We must be sure to acquire p2->p_token as well, we must hold it 415 * once the process is on the allproc list to avoid things such 416 * as competing modifications to p_flags. 417 */ 418 mycpu->gd_forkid += ncpus; 419 p2->p_forkid = mycpu->gd_forkid + mycpu->gd_cpuid; 420 p2->p_lasttid = -1; /* first tid will be 0 */ 421 p2->p_stat = SIDL; 422 423 /* 424 * NOTE: Process 0 will not have a reaper, but process 1 (init) and 425 * all other processes always will. 426 */ 427 if ((reap = p1->p_reaper) != NULL) { 428 reaper_hold(reap); 429 p2->p_reaper = reap; 430 } else { 431 p2->p_reaper = NULL; 432 } 433 434 RB_INIT(&p2->p_lwp_tree); 435 spin_init(&p2->p_spin, "procfork1"); 436 lwkt_token_init(&p2->p_token, "proc"); 437 lwkt_gettoken(&p2->p_token); 438 439 /* 440 * Setup linkage for kernel based threading XXX lwp. Also add the 441 * process to the allproclist. 442 * 443 * The process structure is addressable after this point. 444 */ 445 if (flags & RFTHREAD) { 446 p2->p_peers = p1->p_peers; 447 p1->p_peers = p2; 448 p2->p_leader = p1->p_leader; 449 } else { 450 p2->p_leader = p2; 451 } 452 proc_add_allproc(p2); 453 454 /* 455 * Initialize the section which is copied verbatim from the parent. 456 */ 457 bcopy(&p1->p_startcopy, &p2->p_startcopy, 458 ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy)); 459 460 /* 461 * Duplicate sub-structures as needed. Increase reference counts 462 * on shared objects. 463 * 464 * NOTE: because we are now on the allproc list it is possible for 465 * other consumers to gain temporary references to p2 466 * (p2->p_lock can change). 467 */ 468 if (p1->p_flags & P_PROFIL) 469 startprofclock(p2); 470 p2->p_ucred = crhold(lp1->lwp_thread->td_ucred); 471 472 if (jailed(p2->p_ucred)) 473 p2->p_flags |= P_JAILED; 474 475 if (p2->p_args) 476 refcount_acquire(&p2->p_args->ar_ref); 477 478 p2->p_usched = p1->p_usched; 479 /* XXX: verify copy of the secondary iosched stuff */ 480 dsched_enter_proc(p2); 481 482 if (flags & RFSIGSHARE) { 483 p2->p_sigacts = p1->p_sigacts; 484 refcount_acquire(&p2->p_sigacts->ps_refcnt); 485 } else { 486 p2->p_sigacts = kmalloc(sizeof(*p2->p_sigacts), 487 M_SUBPROC, M_WAITOK); 488 bcopy(p1->p_sigacts, p2->p_sigacts, sizeof(*p2->p_sigacts)); 489 refcount_init(&p2->p_sigacts->ps_refcnt, 1); 490 } 491 if (flags & RFLINUXTHPN) 492 p2->p_sigparent = SIGUSR1; 493 else 494 p2->p_sigparent = SIGCHLD; 495 496 /* bump references to the text vnode (for procfs) */ 497 p2->p_textvp = p1->p_textvp; 498 if (p2->p_textvp) 499 vref(p2->p_textvp); 500 501 /* copy namecache handle to the text file */ 502 if (p1->p_textnch.mount) 503 cache_copy(&p1->p_textnch, &p2->p_textnch); 504 505 /* 506 * Handle file descriptors 507 */ 508 if (flags & RFCFDG) { 509 p2->p_fd = fdinit(p1); 510 fdtol = NULL; 511 } else if (flags & RFFDG) { 512 error = fdcopy(p1, &p2->p_fd); 513 if (error != 0) { 514 error = ENOMEM; 515 goto done; 516 } 517 fdtol = NULL; 518 } else { 519 p2->p_fd = fdshare(p1); 520 if (p1->p_fdtol == NULL) { 521 p1->p_fdtol = filedesc_to_leader_alloc(NULL, 522 p1->p_leader); 523 } 524 if ((flags & RFTHREAD) != 0) { 525 /* 526 * Shared file descriptor table and 527 * shared process leaders. 528 */ 529 fdtol = p1->p_fdtol; 530 fdtol->fdl_refcount++; 531 } else { 532 /* 533 * Shared file descriptor table, and 534 * different process leaders 535 */ 536 fdtol = filedesc_to_leader_alloc(p1->p_fdtol, p2); 537 } 538 } 539 p2->p_fdtol = fdtol; 540 p2->p_limit = plimit_fork(p1); 541 542 /* 543 * Preserve some more flags in subprocess. P_PROFIL has already 544 * been preserved. 545 */ 546 p2->p_flags |= p1->p_flags & P_SUGID; 547 if (p1->p_session->s_ttyvp != NULL && (p1->p_flags & P_CONTROLT)) 548 p2->p_flags |= P_CONTROLT; 549 if (flags & RFPPWAIT) { 550 p2->p_flags |= P_PPWAIT; 551 if (p1->p_upmap) 552 atomic_add_int(&p1->p_upmap->invfork, 1); 553 } 554 555 /* 556 * Inherit the virtual kernel structure (allows a virtual kernel 557 * to fork to simulate multiple cpus). 558 */ 559 if (p1->p_vkernel) 560 vkernel_inherit(p1, p2); 561 562 /* 563 * Once we are on a pglist we may receive signals. XXX we might 564 * race a ^C being sent to the process group by not receiving it 565 * at all prior to this line. 566 */ 567 pgref(p1grp); 568 lwkt_gettoken(&p1grp->pg_token); 569 LIST_INSERT_AFTER(p1, p2, p_pglist); 570 lwkt_reltoken(&p1grp->pg_token); 571 572 /* 573 * Attach the new process to its parent. 574 * 575 * If RFNOWAIT is set, the newly created process becomes a child 576 * of the reaper (typically init). This effectively disassociates 577 * the child from the parent. 578 * 579 * Temporarily hold pptr for the RFNOWAIT case to avoid ripouts. 580 */ 581 if (flags & RFNOWAIT) { 582 pptr = reaper_get(reap); 583 if (pptr == NULL) { 584 pptr = initproc; 585 PHOLD(pptr); 586 } 587 } else { 588 pptr = p1; 589 } 590 p2->p_pptr = pptr; 591 LIST_INIT(&p2->p_children); 592 593 lwkt_gettoken(&pptr->p_token); 594 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling); 595 lwkt_reltoken(&pptr->p_token); 596 597 if (flags & RFNOWAIT) 598 PRELE(pptr); 599 600 varsymset_init(&p2->p_varsymset, &p1->p_varsymset); 601 callout_init_mp(&p2->p_ithandle); 602 603 #ifdef KTRACE 604 /* 605 * Copy traceflag and tracefile if enabled. If not inherited, 606 * these were zeroed above but we still could have a trace race 607 * so make sure p2's p_tracenode is NULL. 608 */ 609 if ((p1->p_traceflag & KTRFAC_INHERIT) && p2->p_tracenode == NULL) { 610 p2->p_traceflag = p1->p_traceflag; 611 p2->p_tracenode = ktrinherit(p1->p_tracenode); 612 } 613 #endif 614 615 /* 616 * This begins the section where we must prevent the parent 617 * from being swapped. 618 * 619 * Gets PRELE'd in the caller in start_forked_proc(). 620 */ 621 PHOLD(p1); 622 623 vm_fork(p1, p2, flags); 624 625 /* 626 * Create the first lwp associated with the new proc. 627 * It will return via a different execution path later, directly 628 * into userland, after it was put on the runq by 629 * start_forked_proc(). 630 */ 631 lwp_fork(lp1, p2, flags, NULL); 632 633 if (flags == (RFFDG | RFPROC | RFPGLOCK)) { 634 mycpu->gd_cnt.v_forks++; 635 mycpu->gd_cnt.v_forkpages += p2->p_vmspace->vm_dsize + 636 p2->p_vmspace->vm_ssize; 637 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM | RFPGLOCK)) { 638 mycpu->gd_cnt.v_vforks++; 639 mycpu->gd_cnt.v_vforkpages += p2->p_vmspace->vm_dsize + 640 p2->p_vmspace->vm_ssize; 641 } else if (p1 == &proc0) { 642 mycpu->gd_cnt.v_kthreads++; 643 mycpu->gd_cnt.v_kthreadpages += p2->p_vmspace->vm_dsize + 644 p2->p_vmspace->vm_ssize; 645 } else { 646 mycpu->gd_cnt.v_rforks++; 647 mycpu->gd_cnt.v_rforkpages += p2->p_vmspace->vm_dsize + 648 p2->p_vmspace->vm_ssize; 649 } 650 651 /* 652 * Both processes are set up, now check if any loadable modules want 653 * to adjust anything. 654 * What if they have an error? XXX 655 */ 656 TAILQ_FOREACH(ep, &fork_list, next) { 657 (*ep->function)(p1, p2, flags); 658 } 659 660 /* 661 * Set the start time. Note that the process is not runnable. The 662 * caller is responsible for making it runnable. 663 */ 664 microtime(&p2->p_start); 665 p2->p_acflag = AFORK; 666 667 /* 668 * tell any interested parties about the new process 669 */ 670 KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid); 671 672 /* 673 * Return child proc pointer to parent. 674 */ 675 *procp = p2; 676 error = 0; 677 done: 678 if (p2) 679 lwkt_reltoken(&p2->p_token); 680 lwkt_reltoken(&p1->p_token); 681 if (plkgrp) { 682 lockmgr(&plkgrp->pg_lock, LK_RELEASE); 683 pgrel(plkgrp); 684 } 685 return (error); 686 } 687 688 static struct lwp * 689 lwp_fork(struct lwp *origlp, struct proc *destproc, int flags, 690 const cpumask_t *mask) 691 { 692 globaldata_t gd = mycpu; 693 struct lwp *lp; 694 struct thread *td; 695 696 lp = kmalloc(sizeof(struct lwp), M_LWP, M_WAITOK|M_ZERO); 697 698 lp->lwp_proc = destproc; 699 lp->lwp_vmspace = destproc->p_vmspace; 700 lp->lwp_stat = LSRUN; 701 bcopy(&origlp->lwp_startcopy, &lp->lwp_startcopy, 702 (unsigned) ((caddr_t)&lp->lwp_endcopy - 703 (caddr_t)&lp->lwp_startcopy)); 704 if (mask != NULL) 705 lp->lwp_cpumask = *mask; 706 707 /* 708 * Reset the sigaltstack if memory is shared, otherwise inherit 709 * it. 710 */ 711 if (flags & RFMEM) { 712 lp->lwp_sigstk.ss_flags = SS_DISABLE; 713 lp->lwp_sigstk.ss_size = 0; 714 lp->lwp_sigstk.ss_sp = NULL; 715 lp->lwp_flags &= ~LWP_ALTSTACK; 716 } else { 717 lp->lwp_flags |= origlp->lwp_flags & LWP_ALTSTACK; 718 } 719 720 /* 721 * Set cpbase to the last timeout that occured (not the upcoming 722 * timeout). 723 * 724 * A critical section is required since a timer IPI can update 725 * scheduler specific data. 726 */ 727 crit_enter(); 728 lp->lwp_cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic; 729 destproc->p_usched->heuristic_forking(origlp, lp); 730 crit_exit(); 731 CPUMASK_ANDMASK(lp->lwp_cpumask, usched_mastermask); 732 lwkt_token_init(&lp->lwp_token, "lwp_token"); 733 spin_init(&lp->lwp_spin, "lwptoken"); 734 735 /* 736 * Assign the thread to the current cpu to begin with so we 737 * can manipulate it. 738 */ 739 td = lwkt_alloc_thread(NULL, LWKT_THREAD_STACK, gd->gd_cpuid, 0); 740 lp->lwp_thread = td; 741 td->td_ucred = crhold(destproc->p_ucred); 742 td->td_proc = destproc; 743 td->td_lwp = lp; 744 td->td_switch = cpu_heavy_switch; 745 #ifdef NO_LWKT_SPLIT_USERPRI 746 lwkt_setpri(td, TDPRI_USER_NORM); 747 #else 748 lwkt_setpri(td, TDPRI_KERN_USER); 749 #endif 750 lwkt_set_comm(td, "%s", destproc->p_comm); 751 752 /* 753 * cpu_fork will copy and update the pcb, set up the kernel stack, 754 * and make the child ready to run. 755 */ 756 cpu_fork(origlp, lp, flags); 757 kqueue_init(&lp->lwp_kqueue, destproc->p_fd); 758 759 /* 760 * Assign a TID to the lp. Loop until the insert succeeds (returns 761 * NULL). 762 * 763 * If we are in a vfork assign the same TID as the lwp that did the 764 * vfork(). This way if the user program messes around with 765 * pthread calls inside the vfork(), it will operate like an 766 * extension of the (blocked) parent. Also note that since the 767 * address space is being shared, insofar as pthreads is concerned, 768 * the code running in the vfork() is part of the original process. 769 */ 770 if (flags & RFPPWAIT) { 771 lp->lwp_tid = origlp->lwp_tid - 1; 772 } else { 773 lp->lwp_tid = destproc->p_lasttid; 774 } 775 776 do { 777 if (++lp->lwp_tid < 0) 778 lp->lwp_tid = 1; 779 } while (lwp_rb_tree_RB_INSERT(&destproc->p_lwp_tree, lp) != NULL); 780 781 destproc->p_lasttid = lp->lwp_tid; 782 destproc->p_nthreads++; 783 784 /* 785 * This flag is set and never cleared. It means that the process 786 * was threaded at some point. Used to improve exit performance. 787 */ 788 destproc->p_flags |= P_MAYBETHREADED; 789 790 return (lp); 791 } 792 793 /* 794 * The next two functionms are general routines to handle adding/deleting 795 * items on the fork callout list. 796 * 797 * at_fork(): 798 * Take the arguments given and put them onto the fork callout list, 799 * However first make sure that it's not already there. 800 * Returns 0 on success or a standard error number. 801 */ 802 int 803 at_fork(forklist_fn function) 804 { 805 struct forklist *ep; 806 807 #ifdef INVARIANTS 808 /* let the programmer know if he's been stupid */ 809 if (rm_at_fork(function)) { 810 kprintf("WARNING: fork callout entry (%p) already present\n", 811 function); 812 } 813 #endif 814 ep = kmalloc(sizeof(*ep), M_ATFORK, M_WAITOK|M_ZERO); 815 ep->function = function; 816 TAILQ_INSERT_TAIL(&fork_list, ep, next); 817 return (0); 818 } 819 820 /* 821 * Scan the exit callout list for the given item and remove it.. 822 * Returns the number of items removed (0 or 1) 823 */ 824 int 825 rm_at_fork(forklist_fn function) 826 { 827 struct forklist *ep; 828 829 TAILQ_FOREACH(ep, &fork_list, next) { 830 if (ep->function == function) { 831 TAILQ_REMOVE(&fork_list, ep, next); 832 kfree(ep, M_ATFORK); 833 return(1); 834 } 835 } 836 return (0); 837 } 838 839 /* 840 * Add a forked process to the run queue after any remaining setup, such 841 * as setting the fork handler, has been completed. 842 * 843 * p2 is held by the caller. 844 */ 845 void 846 start_forked_proc(struct lwp *lp1, struct proc *p2) 847 { 848 struct lwp *lp2 = ONLY_LWP_IN_PROC(p2); 849 int pflags; 850 851 /* 852 * Move from SIDL to RUN queue, and activate the process's thread. 853 * Activation of the thread effectively makes the process "a" 854 * current process, so we do not setrunqueue(). 855 * 856 * YYY setrunqueue works here but we should clean up the trampoline 857 * code so we just schedule the LWKT thread and let the trampoline 858 * deal with the userland scheduler on return to userland. 859 */ 860 KASSERT(p2->p_stat == SIDL, 861 ("cannot start forked process, bad status: %p", p2)); 862 p2->p_usched->resetpriority(lp2); 863 crit_enter(); 864 p2->p_stat = SACTIVE; 865 lp2->lwp_stat = LSRUN; 866 p2->p_usched->setrunqueue(lp2); 867 crit_exit(); 868 869 /* 870 * Now can be swapped. 871 */ 872 PRELE(lp1->lwp_proc); 873 874 /* 875 * Preserve synchronization semantics of vfork. P_PPWAIT is set in 876 * the child until it has retired the parent's resources. The parent 877 * must wait for the flag to be cleared by the child. 878 * 879 * Interlock the flag/tsleep with atomic ops to avoid unnecessary 880 * p_token conflicts. 881 * 882 * XXX Is this use of an atomic op on a field that is not normally 883 * manipulated with atomic ops ok? 884 */ 885 while ((pflags = p2->p_flags) & P_PPWAIT) { 886 cpu_ccfence(); 887 tsleep_interlock(lp1->lwp_proc, 0); 888 if (atomic_cmpset_int(&p2->p_flags, pflags, pflags)) 889 tsleep(lp1->lwp_proc, PINTERLOCKED, "ppwait", 0); 890 } 891 } 892 893 /* 894 * procctl (idtype_t idtype, id_t id, int cmd, void *arg) 895 */ 896 int 897 sys_procctl(struct procctl_args *uap) 898 { 899 struct proc *p = curproc; 900 struct proc *p2; 901 struct sysreaper *reap; 902 union reaper_info udata; 903 int error; 904 905 if (uap->idtype != P_PID || uap->id != (id_t)p->p_pid) 906 return EINVAL; 907 908 switch(uap->cmd) { 909 case PROC_REAP_ACQUIRE: 910 lwkt_gettoken(&p->p_token); 911 reap = kmalloc(sizeof(*reap), M_REAPER, M_WAITOK|M_ZERO); 912 if (p->p_reaper == NULL || p->p_reaper->p != p) { 913 reaper_init(p, reap); 914 error = 0; 915 } else { 916 kfree(reap, M_REAPER); 917 error = EALREADY; 918 } 919 lwkt_reltoken(&p->p_token); 920 break; 921 case PROC_REAP_RELEASE: 922 lwkt_gettoken(&p->p_token); 923 release_again: 924 reap = p->p_reaper; 925 KKASSERT(reap != NULL); 926 if (reap->p == p) { 927 reaper_hold(reap); /* in case of thread race */ 928 lockmgr(&reap->lock, LK_EXCLUSIVE); 929 if (reap->p != p) { 930 lockmgr(&reap->lock, LK_RELEASE); 931 reaper_drop(reap); 932 goto release_again; 933 } 934 reap->p = NULL; 935 p->p_reaper = reap->parent; 936 if (p->p_reaper) 937 reaper_hold(p->p_reaper); 938 lockmgr(&reap->lock, LK_RELEASE); 939 reaper_drop(reap); /* our ref */ 940 reaper_drop(reap); /* old p_reaper ref */ 941 error = 0; 942 } else { 943 error = ENOTCONN; 944 } 945 lwkt_reltoken(&p->p_token); 946 break; 947 case PROC_REAP_STATUS: 948 bzero(&udata, sizeof(udata)); 949 lwkt_gettoken_shared(&p->p_token); 950 if ((reap = p->p_reaper) != NULL && reap->p == p) { 951 udata.status.flags = reap->flags; 952 udata.status.refs = reap->refs - 1; /* minus ours */ 953 } 954 p2 = LIST_FIRST(&p->p_children); 955 udata.status.pid_head = p2 ? p2->p_pid : -1; 956 lwkt_reltoken(&p->p_token); 957 958 if (uap->data) { 959 error = copyout(&udata, uap->data, 960 sizeof(udata.status)); 961 } else { 962 error = 0; 963 } 964 break; 965 default: 966 error = EINVAL; 967 break; 968 } 969 return error; 970 } 971 972 /* 973 * Bump ref on reaper, preventing destruction 974 */ 975 void 976 reaper_hold(struct sysreaper *reap) 977 { 978 KKASSERT(reap->refs > 0); 979 refcount_acquire(&reap->refs); 980 } 981 982 /* 983 * Drop ref on reaper, destroy the structure on the 1->0 984 * transition and loop on the parent. 985 */ 986 void 987 reaper_drop(struct sysreaper *next) 988 { 989 struct sysreaper *reap; 990 991 while ((reap = next) != NULL) { 992 if (refcount_release(&reap->refs)) { 993 next = reap->parent; 994 KKASSERT(reap->p == NULL); 995 reap->parent = NULL; 996 kfree(reap, M_REAPER); 997 } else { 998 next = NULL; 999 } 1000 } 1001 } 1002 1003 /* 1004 * Initialize a static or newly allocated reaper structure 1005 */ 1006 void 1007 reaper_init(struct proc *p, struct sysreaper *reap) 1008 { 1009 reap->parent = p->p_reaper; 1010 reap->p = p; 1011 if (p == initproc) { 1012 reap->flags = REAPER_STAT_OWNED | REAPER_STAT_REALINIT; 1013 reap->refs = 2; 1014 } else { 1015 reap->flags = REAPER_STAT_OWNED; 1016 reap->refs = 1; 1017 } 1018 lockinit(&reap->lock, "subrp", 0, 0); 1019 cpu_sfence(); 1020 p->p_reaper = reap; 1021 } 1022 1023 /* 1024 * Called with p->p_token held during exit. 1025 * 1026 * This is a bit simpler than RELEASE because there are no threads remaining 1027 * to race. We only release if we own the reaper, the exit code will handle 1028 * the final p_reaper release. 1029 */ 1030 struct sysreaper * 1031 reaper_exit(struct proc *p) 1032 { 1033 struct sysreaper *reap; 1034 1035 /* 1036 * Release acquired reaper 1037 */ 1038 if ((reap = p->p_reaper) != NULL && reap->p == p) { 1039 lockmgr(&reap->lock, LK_EXCLUSIVE); 1040 p->p_reaper = reap->parent; 1041 if (p->p_reaper) 1042 reaper_hold(p->p_reaper); 1043 reap->p = NULL; 1044 lockmgr(&reap->lock, LK_RELEASE); 1045 reaper_drop(reap); 1046 } 1047 1048 /* 1049 * Return and clear reaper (caller is holding p_token for us) 1050 * (reap->p does not equal p). Caller must drop it. 1051 */ 1052 if ((reap = p->p_reaper) != NULL) { 1053 p->p_reaper = NULL; 1054 } 1055 return reap; 1056 } 1057 1058 /* 1059 * Return a held (PHOLD) process representing the reaper for process (p). 1060 * NULL should not normally be returned. Caller should PRELE() the returned 1061 * reaper process when finished. 1062 * 1063 * Remove dead internal nodes while we are at it. 1064 * 1065 * Process (p)'s token must be held on call. 1066 * The returned process's token is NOT acquired by this routine. 1067 */ 1068 struct proc * 1069 reaper_get(struct sysreaper *reap) 1070 { 1071 struct sysreaper *next; 1072 struct proc *reproc; 1073 1074 if (reap == NULL) 1075 return NULL; 1076 1077 /* 1078 * Extra hold for loop 1079 */ 1080 reaper_hold(reap); 1081 1082 while (reap) { 1083 lockmgr(&reap->lock, LK_SHARED); 1084 if (reap->p) { 1085 /* 1086 * Probable reaper 1087 */ 1088 if (reap->p) { 1089 reproc = reap->p; 1090 PHOLD(reproc); 1091 lockmgr(&reap->lock, LK_RELEASE); 1092 reaper_drop(reap); 1093 return reproc; 1094 } 1095 1096 /* 1097 * Raced, try again 1098 */ 1099 lockmgr(&reap->lock, LK_RELEASE); 1100 continue; 1101 } 1102 1103 /* 1104 * Traverse upwards in the reaper topology, destroy 1105 * dead internal nodes when possible. 1106 * 1107 * NOTE: Our ref on next means that a dead node should 1108 * have 2 (ours and reap->parent's). 1109 */ 1110 next = reap->parent; 1111 while (next) { 1112 reaper_hold(next); 1113 if (next->refs == 2 && next->p == NULL) { 1114 lockmgr(&reap->lock, LK_RELEASE); 1115 lockmgr(&reap->lock, LK_EXCLUSIVE); 1116 if (next->refs == 2 && 1117 reap->parent == next && 1118 next->p == NULL) { 1119 /* 1120 * reap->parent inherits ref from next. 1121 */ 1122 reap->parent = next->parent; 1123 next->parent = NULL; 1124 reaper_drop(next); /* ours */ 1125 reaper_drop(next); /* old parent */ 1126 next = reap->parent; 1127 continue; /* possible chain */ 1128 } 1129 } 1130 break; 1131 } 1132 lockmgr(&reap->lock, LK_RELEASE); 1133 reaper_drop(reap); 1134 reap = next; 1135 } 1136 return NULL; 1137 } 1138