1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94 35 * $FreeBSD: src/sys/kern/kern_fork.c,v 1.72.2.14 2003/06/26 04:15:10 silby Exp $ 36 */ 37 38 #include "opt_ktrace.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/sysproto.h> 43 #include <sys/filedesc.h> 44 #include <sys/kernel.h> 45 #include <sys/sysctl.h> 46 #include <sys/malloc.h> 47 #include <sys/proc.h> 48 #include <sys/resourcevar.h> 49 #include <sys/vnode.h> 50 #include <sys/acct.h> 51 #include <sys/ktrace.h> 52 #include <sys/unistd.h> 53 #include <sys/jail.h> 54 #include <sys/lwp.h> 55 56 #include <vm/vm.h> 57 #include <sys/lock.h> 58 #include <vm/pmap.h> 59 #include <vm/vm_map.h> 60 #include <vm/vm_extern.h> 61 62 #include <sys/vmmeter.h> 63 #include <sys/refcount.h> 64 #include <sys/thread2.h> 65 #include <sys/signal2.h> 66 #include <sys/spinlock2.h> 67 68 #include <sys/dsched.h> 69 70 static MALLOC_DEFINE(M_ATFORK, "atfork", "atfork callback"); 71 static MALLOC_DEFINE(M_REAPER, "reaper", "process reapers"); 72 73 /* 74 * These are the stuctures used to create a callout list for things to do 75 * when forking a process 76 */ 77 struct forklist { 78 forklist_fn function; 79 TAILQ_ENTRY(forklist) next; 80 }; 81 82 TAILQ_HEAD(forklist_head, forklist); 83 static struct forklist_head fork_list = TAILQ_HEAD_INITIALIZER(fork_list); 84 85 static struct lwp *lwp_fork(struct lwp *, struct proc *, int flags, 86 const cpumask_t *mask); 87 static int lwp_create1(struct lwp_params *params, 88 const cpumask_t *mask); 89 static struct lock reaper_lock = LOCK_INITIALIZER("reapgl", 0, 0); 90 91 int forksleep; /* Place for fork1() to sleep on. */ 92 93 /* 94 * Red-Black tree support for LWPs 95 */ 96 97 static int 98 rb_lwp_compare(struct lwp *lp1, struct lwp *lp2) 99 { 100 if (lp1->lwp_tid < lp2->lwp_tid) 101 return(-1); 102 if (lp1->lwp_tid > lp2->lwp_tid) 103 return(1); 104 return(0); 105 } 106 107 RB_GENERATE2(lwp_rb_tree, lwp, u.lwp_rbnode, rb_lwp_compare, lwpid_t, lwp_tid); 108 109 /* 110 * When forking, memory underpinning umtx-supported mutexes may be set 111 * COW causing the physical address to change. We must wakeup any threads 112 * blocked on the physical address to allow them to re-resolve their VM. 113 * 114 * (caller is holding p->p_token) 115 */ 116 static void 117 wake_umtx_threads(struct proc *p1) 118 { 119 struct lwp *lp; 120 struct thread *td; 121 122 RB_FOREACH(lp, lwp_rb_tree, &p1->p_lwp_tree) { 123 td = lp->lwp_thread; 124 if (td && (td->td_flags & TDF_TSLEEPQ) && 125 (td->td_wdomain & PDOMAIN_MASK) == PDOMAIN_UMTX) { 126 wakeup_domain(td->td_wchan, PDOMAIN_UMTX); 127 } 128 } 129 } 130 131 /* 132 * fork() system call 133 */ 134 int 135 sys_fork(struct fork_args *uap) 136 { 137 struct lwp *lp = curthread->td_lwp; 138 struct proc *p2; 139 int error; 140 141 error = fork1(lp, RFFDG | RFPROC | RFPGLOCK, &p2); 142 if (error == 0) { 143 PHOLD(p2); 144 start_forked_proc(lp, p2); 145 uap->sysmsg_fds[0] = p2->p_pid; 146 uap->sysmsg_fds[1] = 0; 147 PRELE(p2); 148 } 149 return error; 150 } 151 152 /* 153 * vfork() system call 154 */ 155 int 156 sys_vfork(struct vfork_args *uap) 157 { 158 struct lwp *lp = curthread->td_lwp; 159 struct proc *p2; 160 int error; 161 162 error = fork1(lp, RFFDG | RFPROC | RFPPWAIT | RFMEM | RFPGLOCK, &p2); 163 if (error == 0) { 164 PHOLD(p2); 165 start_forked_proc(lp, p2); 166 uap->sysmsg_fds[0] = p2->p_pid; 167 uap->sysmsg_fds[1] = 0; 168 PRELE(p2); 169 } 170 return error; 171 } 172 173 /* 174 * Handle rforks. An rfork may (1) operate on the current process without 175 * creating a new, (2) create a new process that shared the current process's 176 * vmspace, signals, and/or descriptors, or (3) create a new process that does 177 * not share these things (normal fork). 178 * 179 * Note that we only call start_forked_proc() if a new process is actually 180 * created. 181 * 182 * rfork { int flags } 183 */ 184 int 185 sys_rfork(struct rfork_args *uap) 186 { 187 struct lwp *lp = curthread->td_lwp; 188 struct proc *p2; 189 int error; 190 191 if ((uap->flags & RFKERNELONLY) != 0) 192 return (EINVAL); 193 194 error = fork1(lp, uap->flags | RFPGLOCK, &p2); 195 if (error == 0) { 196 if (p2) { 197 PHOLD(p2); 198 start_forked_proc(lp, p2); 199 uap->sysmsg_fds[0] = p2->p_pid; 200 uap->sysmsg_fds[1] = 0; 201 PRELE(p2); 202 } else { 203 uap->sysmsg_fds[0] = 0; 204 uap->sysmsg_fds[1] = 0; 205 } 206 } 207 return error; 208 } 209 210 static int 211 lwp_create1(struct lwp_params *uprm, const cpumask_t *umask) 212 { 213 struct proc *p = curproc; 214 struct lwp *lp; 215 struct lwp_params params; 216 cpumask_t *mask = NULL, mask0; 217 int error; 218 219 error = copyin(uprm, ¶ms, sizeof(params)); 220 if (error) 221 goto fail2; 222 223 if (umask != NULL) { 224 error = copyin(umask, &mask0, sizeof(mask0)); 225 if (error) 226 goto fail2; 227 CPUMASK_ANDMASK(mask0, smp_active_mask); 228 if (CPUMASK_TESTNZERO(mask0)) 229 mask = &mask0; 230 } 231 232 lwkt_gettoken(&p->p_token); 233 plimit_lwp_fork(p); /* force exclusive access */ 234 lp = lwp_fork(curthread->td_lwp, p, RFPROC | RFMEM, mask); 235 error = cpu_prepare_lwp(lp, ¶ms); 236 if (error) 237 goto fail; 238 if (params.lwp_tid1 != NULL && 239 (error = copyout(&lp->lwp_tid, params.lwp_tid1, sizeof(lp->lwp_tid)))) 240 goto fail; 241 if (params.lwp_tid2 != NULL && 242 (error = copyout(&lp->lwp_tid, params.lwp_tid2, sizeof(lp->lwp_tid)))) 243 goto fail; 244 245 /* 246 * Now schedule the new lwp. 247 */ 248 p->p_usched->resetpriority(lp); 249 crit_enter(); 250 lp->lwp_stat = LSRUN; 251 p->p_usched->setrunqueue(lp); 252 crit_exit(); 253 lwkt_reltoken(&p->p_token); 254 255 return (0); 256 257 fail: 258 /* 259 * Make sure no one is using this lwp, before it is removed from 260 * the tree. If we didn't wait it here, lwp tree iteration with 261 * blocking operation would be broken. 262 */ 263 while (lp->lwp_lock > 0) 264 tsleep(lp, 0, "lwpfail", 1); 265 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp); 266 --p->p_nthreads; 267 /* lwp_dispose expects an exited lwp, and a held proc */ 268 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT); 269 lp->lwp_thread->td_flags |= TDF_EXITING; 270 lwkt_remove_tdallq(lp->lwp_thread); 271 PHOLD(p); 272 biosched_done(lp->lwp_thread); 273 dsched_exit_thread(lp->lwp_thread); 274 lwp_dispose(lp); 275 lwkt_reltoken(&p->p_token); 276 fail2: 277 return (error); 278 } 279 280 /* 281 * Low level thread create used by pthreads. 282 */ 283 int 284 sys_lwp_create(struct lwp_create_args *uap) 285 { 286 287 return (lwp_create1(uap->params, NULL)); 288 } 289 290 int 291 sys_lwp_create2(struct lwp_create2_args *uap) 292 { 293 294 return (lwp_create1(uap->params, uap->mask)); 295 } 296 297 int nprocs = 1; /* process 0 */ 298 299 int 300 fork1(struct lwp *lp1, int flags, struct proc **procp) 301 { 302 struct proc *p1 = lp1->lwp_proc; 303 struct proc *p2; 304 struct proc *pptr; 305 struct pgrp *p1grp; 306 struct pgrp *plkgrp; 307 struct sysreaper *reap; 308 uid_t uid; 309 int ok, error; 310 static int curfail = 0; 311 static struct timeval lastfail; 312 struct forklist *ep; 313 struct filedesc_to_leader *fdtol; 314 315 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG)) 316 return (EINVAL); 317 318 lwkt_gettoken(&p1->p_token); 319 plkgrp = NULL; 320 p2 = NULL; 321 322 /* 323 * Here we don't create a new process, but we divorce 324 * certain parts of a process from itself. 325 */ 326 if ((flags & RFPROC) == 0) { 327 /* 328 * This kind of stunt does not work anymore if 329 * there are native threads (lwps) running 330 */ 331 if (p1->p_nthreads != 1) { 332 error = EINVAL; 333 goto done; 334 } 335 336 vm_fork(p1, 0, flags); 337 if ((flags & RFMEM) == 0) 338 wake_umtx_threads(p1); 339 340 /* 341 * Close all file descriptors. 342 */ 343 if (flags & RFCFDG) { 344 struct filedesc *fdtmp; 345 fdtmp = fdinit(p1); 346 fdfree(p1, fdtmp); 347 } 348 349 /* 350 * Unshare file descriptors (from parent.) 351 */ 352 if (flags & RFFDG) { 353 if (p1->p_fd->fd_refcnt > 1) { 354 struct filedesc *newfd; 355 error = fdcopy(p1, &newfd); 356 if (error != 0) { 357 error = ENOMEM; 358 goto done; 359 } 360 fdfree(p1, newfd); 361 } 362 } 363 *procp = NULL; 364 error = 0; 365 goto done; 366 } 367 368 /* 369 * Interlock against process group signal delivery. If signals 370 * are pending after the interlock is obtained we have to restart 371 * the system call to process the signals. If we don't the child 372 * can miss a pgsignal (such as ^C) sent during the fork. 373 * 374 * We can't use CURSIG() here because it will process any STOPs 375 * and cause the process group lock to be held indefinitely. If 376 * a STOP occurs, the fork will be restarted after the CONT. 377 */ 378 p1grp = p1->p_pgrp; 379 if ((flags & RFPGLOCK) && (plkgrp = p1->p_pgrp) != NULL) { 380 pgref(plkgrp); 381 lockmgr(&plkgrp->pg_lock, LK_SHARED); 382 if (CURSIG_NOBLOCK(lp1)) { 383 error = ERESTART; 384 goto done; 385 } 386 } 387 388 /* 389 * Although process entries are dynamically created, we still keep 390 * a global limit on the maximum number we will create. Don't allow 391 * a nonprivileged user to use the last ten processes; don't let root 392 * exceed the limit. The variable nprocs is the current number of 393 * processes, maxproc is the limit. 394 */ 395 uid = lp1->lwp_thread->td_ucred->cr_ruid; 396 if ((nprocs >= maxproc - 10 && uid != 0) || nprocs >= maxproc) { 397 if (ppsratecheck(&lastfail, &curfail, 1)) 398 kprintf("maxproc limit exceeded by uid %d, please " 399 "see tuning(7) and login.conf(5).\n", uid); 400 tsleep(&forksleep, 0, "fork", hz / 2); 401 error = EAGAIN; 402 goto done; 403 } 404 405 /* 406 * Increment the nprocs resource before blocking can occur. There 407 * are hard-limits as to the number of processes that can run. 408 */ 409 atomic_add_int(&nprocs, 1); 410 411 /* 412 * Increment the count of procs running with this uid. This also 413 * applies to root. 414 */ 415 ok = chgproccnt(lp1->lwp_thread->td_ucred->cr_ruidinfo, 1, 416 plimit_getadjvalue(RLIMIT_NPROC)); 417 if (!ok) { 418 /* 419 * Back out the process count 420 */ 421 atomic_add_int(&nprocs, -1); 422 if (ppsratecheck(&lastfail, &curfail, 1)) { 423 kprintf("maxproc limit of %jd " 424 "exceeded by \"%s\" uid %d, " 425 "please see tuning(7) and login.conf(5).\n", 426 plimit_getadjvalue(RLIMIT_NPROC), 427 p1->p_comm, 428 uid); 429 } 430 tsleep(&forksleep, 0, "fork", hz / 2); 431 error = EAGAIN; 432 goto done; 433 } 434 435 /* 436 * Allocate a new process, don't get fancy: zero the structure. 437 */ 438 p2 = kmalloc(sizeof(struct proc), M_PROC, M_WAITOK|M_ZERO); 439 440 /* 441 * Core initialization. SIDL is a safety state that protects the 442 * partially initialized process once it starts getting hooked 443 * into system structures and becomes addressable. 444 * 445 * We must be sure to acquire p2->p_token as well, we must hold it 446 * once the process is on the allproc list to avoid things such 447 * as competing modifications to p_flags. 448 */ 449 mycpu->gd_forkid += ncpus; 450 p2->p_forkid = mycpu->gd_forkid + mycpu->gd_cpuid; 451 p2->p_lasttid = 0; /* first tid will be 1 */ 452 p2->p_stat = SIDL; 453 454 /* 455 * NOTE: Process 0 will not have a reaper, but process 1 (init) and 456 * all other processes always will. 457 */ 458 if ((reap = p1->p_reaper) != NULL) { 459 reaper_hold(reap); 460 p2->p_reaper = reap; 461 } else { 462 p2->p_reaper = NULL; 463 } 464 465 RB_INIT(&p2->p_lwp_tree); 466 spin_init(&p2->p_spin, "procfork1"); 467 lwkt_token_init(&p2->p_token, "proc"); 468 lwkt_gettoken(&p2->p_token); 469 p2->p_uidpcpu = kmalloc(sizeof(*p2->p_uidpcpu) * ncpus, 470 M_SUBPROC, M_WAITOK | M_ZERO); 471 472 /* 473 * Setup linkage for kernel based threading XXX lwp. Also add the 474 * process to the allproclist. 475 * 476 * The process structure is addressable after this point. 477 */ 478 if (flags & RFTHREAD) { 479 p2->p_peers = p1->p_peers; 480 p1->p_peers = p2; 481 p2->p_leader = p1->p_leader; 482 } else { 483 p2->p_leader = p2; 484 } 485 proc_add_allproc(p2); 486 487 /* 488 * Initialize the section which is copied verbatim from the parent. 489 */ 490 bcopy(&p1->p_startcopy, &p2->p_startcopy, 491 ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy)); 492 493 /* 494 * Duplicate sub-structures as needed. Increase reference counts 495 * on shared objects. 496 * 497 * NOTE: because we are now on the allproc list it is possible for 498 * other consumers to gain temporary references to p2 499 * (p2->p_lock can change). 500 */ 501 if (p1->p_flags & P_PROFIL) 502 startprofclock(p2); 503 p2->p_ucred = crhold(lp1->lwp_thread->td_ucred); 504 505 if (jailed(p2->p_ucred)) 506 p2->p_flags |= P_JAILED; 507 508 if (p2->p_args) 509 refcount_acquire(&p2->p_args->ar_ref); 510 511 p2->p_usched = p1->p_usched; 512 /* XXX: verify copy of the secondary iosched stuff */ 513 dsched_enter_proc(p2); 514 515 if (flags & RFSIGSHARE) { 516 p2->p_sigacts = p1->p_sigacts; 517 refcount_acquire(&p2->p_sigacts->ps_refcnt); 518 } else { 519 p2->p_sigacts = kmalloc(sizeof(*p2->p_sigacts), 520 M_SUBPROC, M_WAITOK); 521 bcopy(p1->p_sigacts, p2->p_sigacts, sizeof(*p2->p_sigacts)); 522 refcount_init(&p2->p_sigacts->ps_refcnt, 1); 523 } 524 if (flags & RFLINUXTHPN) 525 p2->p_sigparent = SIGUSR1; 526 else 527 p2->p_sigparent = SIGCHLD; 528 529 /* bump references to the text vnode (for procfs) */ 530 p2->p_textvp = p1->p_textvp; 531 if (p2->p_textvp) 532 vref(p2->p_textvp); 533 534 /* copy namecache handle to the text file */ 535 if (p1->p_textnch.mount) 536 cache_copy(&p1->p_textnch, &p2->p_textnch); 537 538 /* 539 * Handle file descriptors 540 */ 541 if (flags & RFCFDG) { 542 p2->p_fd = fdinit(p1); 543 fdtol = NULL; 544 } else if (flags & RFFDG) { 545 error = fdcopy(p1, &p2->p_fd); 546 if (error != 0) { 547 error = ENOMEM; 548 goto done; 549 } 550 fdtol = NULL; 551 } else { 552 p2->p_fd = fdshare(p1); 553 if (p1->p_fdtol == NULL) { 554 p1->p_fdtol = filedesc_to_leader_alloc(NULL, 555 p1->p_leader); 556 } 557 if ((flags & RFTHREAD) != 0) { 558 /* 559 * Shared file descriptor table and 560 * shared process leaders. 561 */ 562 fdtol = p1->p_fdtol; 563 fdtol->fdl_refcount++; 564 } else { 565 /* 566 * Shared file descriptor table, and 567 * different process leaders 568 */ 569 fdtol = filedesc_to_leader_alloc(p1->p_fdtol, p2); 570 } 571 } 572 p2->p_fdtol = fdtol; 573 p2->p_limit = plimit_fork(p1); 574 575 /* 576 * Adjust depth for resource downscaling 577 */ 578 if ((p2->p_depth & 31) != 31) 579 ++p2->p_depth; 580 581 /* 582 * Preserve some more flags in subprocess. P_PROFIL has already 583 * been preserved. 584 */ 585 p2->p_flags |= p1->p_flags & P_SUGID; 586 if (p1->p_session->s_ttyvp != NULL && (p1->p_flags & P_CONTROLT)) 587 p2->p_flags |= P_CONTROLT; 588 if (flags & RFPPWAIT) { 589 p2->p_flags |= P_PPWAIT; 590 if (p1->p_upmap) 591 atomic_add_int(&p1->p_upmap->invfork, 1); 592 } 593 594 /* 595 * Inherit the virtual kernel structure (allows a virtual kernel 596 * to fork to simulate multiple cpus). 597 */ 598 if (p1->p_vkernel) 599 vkernel_inherit(p1, p2); 600 601 /* 602 * Once we are on a pglist we may receive signals. XXX we might 603 * race a ^C being sent to the process group by not receiving it 604 * at all prior to this line. 605 */ 606 pgref(p1grp); 607 lwkt_gettoken(&p1grp->pg_token); 608 LIST_INSERT_AFTER(p1, p2, p_pglist); 609 lwkt_reltoken(&p1grp->pg_token); 610 611 /* 612 * Attach the new process to its parent. 613 * 614 * If RFNOWAIT is set, the newly created process becomes a child 615 * of the reaper (typically init). This effectively disassociates 616 * the child from the parent. 617 * 618 * Temporarily hold pptr for the RFNOWAIT case to avoid ripouts. 619 */ 620 if (flags & RFNOWAIT) { 621 pptr = reaper_get(reap); 622 if (pptr == NULL) { 623 pptr = initproc; 624 PHOLD(pptr); 625 } 626 } else { 627 pptr = p1; 628 } 629 p2->p_pptr = pptr; 630 p2->p_ppid = pptr->p_pid; 631 LIST_INIT(&p2->p_children); 632 633 lwkt_gettoken(&pptr->p_token); 634 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling); 635 lwkt_reltoken(&pptr->p_token); 636 637 if (flags & RFNOWAIT) 638 PRELE(pptr); 639 640 varsymset_init(&p2->p_varsymset, &p1->p_varsymset); 641 callout_init_mp(&p2->p_ithandle); 642 643 #ifdef KTRACE 644 /* 645 * Copy traceflag and tracefile if enabled. If not inherited, 646 * these were zeroed above but we still could have a trace race 647 * so make sure p2's p_tracenode is NULL. 648 */ 649 if ((p1->p_traceflag & KTRFAC_INHERIT) && p2->p_tracenode == NULL) { 650 p2->p_traceflag = p1->p_traceflag; 651 p2->p_tracenode = ktrinherit(p1->p_tracenode); 652 } 653 #endif 654 655 /* 656 * This begins the section where we must prevent the parent 657 * from being swapped. 658 * 659 * Gets PRELE'd in the caller in start_forked_proc(). 660 */ 661 PHOLD(p1); 662 663 vm_fork(p1, p2, flags); 664 if ((flags & RFMEM) == 0) 665 wake_umtx_threads(p1); 666 667 /* 668 * Create the first lwp associated with the new proc. 669 * It will return via a different execution path later, directly 670 * into userland, after it was put on the runq by 671 * start_forked_proc(). 672 */ 673 lwp_fork(lp1, p2, flags, NULL); 674 675 if (flags == (RFFDG | RFPROC | RFPGLOCK)) { 676 mycpu->gd_cnt.v_forks++; 677 mycpu->gd_cnt.v_forkpages += btoc(p2->p_vmspace->vm_dsize) + 678 btoc(p2->p_vmspace->vm_ssize); 679 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM | RFPGLOCK)) { 680 mycpu->gd_cnt.v_vforks++; 681 mycpu->gd_cnt.v_vforkpages += btoc(p2->p_vmspace->vm_dsize) + 682 btoc(p2->p_vmspace->vm_ssize); 683 } else if (p1 == &proc0) { 684 mycpu->gd_cnt.v_kthreads++; 685 mycpu->gd_cnt.v_kthreadpages += btoc(p2->p_vmspace->vm_dsize) + 686 btoc(p2->p_vmspace->vm_ssize); 687 } else { 688 mycpu->gd_cnt.v_rforks++; 689 mycpu->gd_cnt.v_rforkpages += btoc(p2->p_vmspace->vm_dsize) + 690 btoc(p2->p_vmspace->vm_ssize); 691 } 692 693 /* 694 * Both processes are set up, now check if any loadable modules want 695 * to adjust anything. 696 * What if they have an error? XXX 697 */ 698 TAILQ_FOREACH(ep, &fork_list, next) { 699 (*ep->function)(p1, p2, flags); 700 } 701 702 /* 703 * Set the start time. Note that the process is not runnable. The 704 * caller is responsible for making it runnable. 705 */ 706 microtime(&p2->p_start); 707 p2->p_acflag = AFORK; 708 709 /* 710 * tell any interested parties about the new process 711 */ 712 KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid); 713 714 /* 715 * Return child proc pointer to parent. 716 */ 717 *procp = p2; 718 error = 0; 719 done: 720 if (p2) 721 lwkt_reltoken(&p2->p_token); 722 lwkt_reltoken(&p1->p_token); 723 if (plkgrp) { 724 lockmgr(&plkgrp->pg_lock, LK_RELEASE); 725 pgrel(plkgrp); 726 } 727 return (error); 728 } 729 730 static struct lwp * 731 lwp_fork(struct lwp *origlp, struct proc *destproc, int flags, 732 const cpumask_t *mask) 733 { 734 globaldata_t gd = mycpu; 735 struct lwp *lp; 736 struct thread *td; 737 738 lp = kmalloc(sizeof(struct lwp), M_LWP, M_WAITOK|M_ZERO); 739 740 lp->lwp_proc = destproc; 741 lp->lwp_vmspace = destproc->p_vmspace; 742 lp->lwp_stat = LSRUN; 743 bcopy(&origlp->lwp_startcopy, &lp->lwp_startcopy, 744 (unsigned) ((caddr_t)&lp->lwp_endcopy - 745 (caddr_t)&lp->lwp_startcopy)); 746 if (mask != NULL) 747 lp->lwp_cpumask = *mask; 748 749 /* 750 * Reset the sigaltstack if memory is shared, otherwise inherit 751 * it. 752 */ 753 if (flags & RFMEM) { 754 lp->lwp_sigstk.ss_flags = SS_DISABLE; 755 lp->lwp_sigstk.ss_size = 0; 756 lp->lwp_sigstk.ss_sp = NULL; 757 lp->lwp_flags &= ~LWP_ALTSTACK; 758 } else { 759 lp->lwp_flags |= origlp->lwp_flags & LWP_ALTSTACK; 760 } 761 762 /* 763 * Set cpbase to the last timeout that occured (not the upcoming 764 * timeout). 765 * 766 * A critical section is required since a timer IPI can update 767 * scheduler specific data. 768 */ 769 crit_enter(); 770 lp->lwp_cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic; 771 destproc->p_usched->heuristic_forking(origlp, lp); 772 crit_exit(); 773 CPUMASK_ANDMASK(lp->lwp_cpumask, usched_mastermask); 774 lwkt_token_init(&lp->lwp_token, "lwp_token"); 775 spin_init(&lp->lwp_spin, "lwptoken"); 776 777 /* 778 * Assign the thread to the current cpu to begin with so we 779 * can manipulate it. 780 */ 781 td = lwkt_alloc_thread(NULL, LWKT_THREAD_STACK, gd->gd_cpuid, 0); 782 lp->lwp_thread = td; 783 td->td_wakefromcpu = gd->gd_cpuid; 784 td->td_ucred = crhold(destproc->p_ucred); 785 td->td_proc = destproc; 786 td->td_lwp = lp; 787 td->td_switch = cpu_heavy_switch; 788 #ifdef NO_LWKT_SPLIT_USERPRI 789 lwkt_setpri(td, TDPRI_USER_NORM); 790 #else 791 lwkt_setpri(td, TDPRI_KERN_USER); 792 #endif 793 lwkt_set_comm(td, "%s", destproc->p_comm); 794 795 /* 796 * cpu_fork will copy and update the pcb, set up the kernel stack, 797 * and make the child ready to run. 798 */ 799 cpu_fork(origlp, lp, flags); 800 kqueue_init(&lp->lwp_kqueue, destproc->p_fd); 801 802 /* 803 * Assign a TID to the lp. Loop until the insert succeeds (returns 804 * NULL). 805 * 806 * If we are in a vfork assign the same TID as the lwp that did the 807 * vfork(). This way if the user program messes around with 808 * pthread calls inside the vfork(), it will operate like an 809 * extension of the (blocked) parent. Also note that since the 810 * address space is being shared, insofar as pthreads is concerned, 811 * the code running in the vfork() is part of the original process. 812 */ 813 if (flags & RFPPWAIT) { 814 lp->lwp_tid = origlp->lwp_tid - 1; 815 } else { 816 lp->lwp_tid = destproc->p_lasttid; 817 } 818 819 /* 820 * Leave 2 bits open so the pthreads library can optimize locks 821 * by combining the TID with a few LOck-related flags. 822 */ 823 do { 824 if (lp->lwp_tid == 0 || lp->lwp_tid == 0x3FFFFFFF) 825 lp->lwp_tid = 1; 826 else 827 ++lp->lwp_tid; 828 } while (lwp_rb_tree_RB_INSERT(&destproc->p_lwp_tree, lp) != NULL); 829 830 destproc->p_lasttid = lp->lwp_tid; 831 destproc->p_nthreads++; 832 833 /* 834 * This flag is set and never cleared. It means that the process 835 * was threaded at some point. Used to improve exit performance. 836 */ 837 pmap_maybethreaded(&destproc->p_vmspace->vm_pmap); 838 destproc->p_flags |= P_MAYBETHREADED; 839 840 return (lp); 841 } 842 843 /* 844 * The next two functionms are general routines to handle adding/deleting 845 * items on the fork callout list. 846 * 847 * at_fork(): 848 * Take the arguments given and put them onto the fork callout list, 849 * However first make sure that it's not already there. 850 * Returns 0 on success or a standard error number. 851 */ 852 int 853 at_fork(forklist_fn function) 854 { 855 struct forklist *ep; 856 857 #ifdef INVARIANTS 858 /* let the programmer know if he's been stupid */ 859 if (rm_at_fork(function)) { 860 kprintf("WARNING: fork callout entry (%p) already present\n", 861 function); 862 } 863 #endif 864 ep = kmalloc(sizeof(*ep), M_ATFORK, M_WAITOK|M_ZERO); 865 ep->function = function; 866 TAILQ_INSERT_TAIL(&fork_list, ep, next); 867 return (0); 868 } 869 870 /* 871 * Scan the exit callout list for the given item and remove it.. 872 * Returns the number of items removed (0 or 1) 873 */ 874 int 875 rm_at_fork(forklist_fn function) 876 { 877 struct forklist *ep; 878 879 TAILQ_FOREACH(ep, &fork_list, next) { 880 if (ep->function == function) { 881 TAILQ_REMOVE(&fork_list, ep, next); 882 kfree(ep, M_ATFORK); 883 return(1); 884 } 885 } 886 return (0); 887 } 888 889 /* 890 * Add a forked process to the run queue after any remaining setup, such 891 * as setting the fork handler, has been completed. 892 * 893 * p2 is held by the caller. 894 */ 895 void 896 start_forked_proc(struct lwp *lp1, struct proc *p2) 897 { 898 struct lwp *lp2 = ONLY_LWP_IN_PROC(p2); 899 int pflags; 900 901 /* 902 * Move from SIDL to RUN queue, and activate the process's thread. 903 * Activation of the thread effectively makes the process "a" 904 * current process, so we do not setrunqueue(). 905 * 906 * YYY setrunqueue works here but we should clean up the trampoline 907 * code so we just schedule the LWKT thread and let the trampoline 908 * deal with the userland scheduler on return to userland. 909 */ 910 KASSERT(p2->p_stat == SIDL, 911 ("cannot start forked process, bad status: %p", p2)); 912 p2->p_usched->resetpriority(lp2); 913 crit_enter(); 914 p2->p_stat = SACTIVE; 915 lp2->lwp_stat = LSRUN; 916 p2->p_usched->setrunqueue(lp2); 917 crit_exit(); 918 919 /* 920 * Now can be swapped. 921 */ 922 PRELE(lp1->lwp_proc); 923 924 /* 925 * Preserve synchronization semantics of vfork. P_PPWAIT is set in 926 * the child until it has retired the parent's resources. The parent 927 * must wait for the flag to be cleared by the child. 928 * 929 * Interlock the flag/tsleep with atomic ops to avoid unnecessary 930 * p_token conflicts. 931 * 932 * XXX Is this use of an atomic op on a field that is not normally 933 * manipulated with atomic ops ok? 934 */ 935 while ((pflags = p2->p_flags) & P_PPWAIT) { 936 cpu_ccfence(); 937 tsleep_interlock(lp1->lwp_proc, 0); 938 if (atomic_cmpset_int(&p2->p_flags, pflags, pflags)) 939 tsleep(lp1->lwp_proc, PINTERLOCKED, "ppwait", 0); 940 } 941 } 942 943 /* 944 * procctl (idtype_t idtype, id_t id, int cmd, void *arg) 945 */ 946 int 947 sys_procctl(struct procctl_args *uap) 948 { 949 struct proc *p = curproc; 950 struct proc *p2; 951 struct sysreaper *reap; 952 union reaper_info udata; 953 int error; 954 955 if (uap->idtype != P_PID || uap->id != (id_t)p->p_pid) 956 return EINVAL; 957 958 switch(uap->cmd) { 959 case PROC_REAP_ACQUIRE: 960 lwkt_gettoken(&p->p_token); 961 reap = kmalloc(sizeof(*reap), M_REAPER, M_WAITOK|M_ZERO); 962 if (p->p_reaper == NULL || p->p_reaper->p != p) { 963 reaper_init(p, reap); 964 error = 0; 965 } else { 966 kfree(reap, M_REAPER); 967 error = EALREADY; 968 } 969 lwkt_reltoken(&p->p_token); 970 break; 971 case PROC_REAP_RELEASE: 972 lwkt_gettoken(&p->p_token); 973 release_again: 974 reap = p->p_reaper; 975 KKASSERT(reap != NULL); 976 if (reap->p == p) { 977 reaper_hold(reap); /* in case of thread race */ 978 lockmgr(&reap->lock, LK_EXCLUSIVE); 979 if (reap->p != p) { 980 lockmgr(&reap->lock, LK_RELEASE); 981 reaper_drop(reap); 982 goto release_again; 983 } 984 reap->p = NULL; 985 p->p_reaper = reap->parent; 986 if (p->p_reaper) 987 reaper_hold(p->p_reaper); 988 lockmgr(&reap->lock, LK_RELEASE); 989 reaper_drop(reap); /* our ref */ 990 reaper_drop(reap); /* old p_reaper ref */ 991 error = 0; 992 } else { 993 error = ENOTCONN; 994 } 995 lwkt_reltoken(&p->p_token); 996 break; 997 case PROC_REAP_STATUS: 998 bzero(&udata, sizeof(udata)); 999 lwkt_gettoken_shared(&p->p_token); 1000 if ((reap = p->p_reaper) != NULL && reap->p == p) { 1001 udata.status.flags = reap->flags; 1002 udata.status.refs = reap->refs - 1; /* minus ours */ 1003 } 1004 p2 = LIST_FIRST(&p->p_children); 1005 udata.status.pid_head = p2 ? p2->p_pid : -1; 1006 lwkt_reltoken(&p->p_token); 1007 1008 if (uap->data) { 1009 error = copyout(&udata, uap->data, 1010 sizeof(udata.status)); 1011 } else { 1012 error = 0; 1013 } 1014 break; 1015 default: 1016 error = EINVAL; 1017 break; 1018 } 1019 return error; 1020 } 1021 1022 /* 1023 * Bump ref on reaper, preventing destruction 1024 */ 1025 void 1026 reaper_hold(struct sysreaper *reap) 1027 { 1028 KKASSERT(reap->refs > 0); 1029 refcount_acquire(&reap->refs); 1030 } 1031 1032 /* 1033 * Drop ref on reaper, destroy the structure on the 1->0 1034 * transition and loop on the parent. 1035 */ 1036 void 1037 reaper_drop(struct sysreaper *next) 1038 { 1039 struct sysreaper *reap; 1040 1041 while ((reap = next) != NULL) { 1042 if (refcount_release(&reap->refs)) { 1043 next = reap->parent; 1044 KKASSERT(reap->p == NULL); 1045 lockmgr(&reaper_lock, LK_EXCLUSIVE); 1046 reap->parent = NULL; 1047 kfree(reap, M_REAPER); 1048 lockmgr(&reaper_lock, LK_RELEASE); 1049 } else { 1050 next = NULL; 1051 } 1052 } 1053 } 1054 1055 /* 1056 * Initialize a static or newly allocated reaper structure 1057 */ 1058 void 1059 reaper_init(struct proc *p, struct sysreaper *reap) 1060 { 1061 reap->parent = p->p_reaper; 1062 reap->p = p; 1063 if (p == initproc) { 1064 reap->flags = REAPER_STAT_OWNED | REAPER_STAT_REALINIT; 1065 reap->refs = 2; 1066 } else { 1067 reap->flags = REAPER_STAT_OWNED; 1068 reap->refs = 1; 1069 } 1070 lockinit(&reap->lock, "subrp", 0, 0); 1071 cpu_sfence(); 1072 p->p_reaper = reap; 1073 } 1074 1075 /* 1076 * Called with p->p_token held during exit. 1077 * 1078 * This is a bit simpler than RELEASE because there are no threads remaining 1079 * to race. We only release if we own the reaper, the exit code will handle 1080 * the final p_reaper release. 1081 */ 1082 struct sysreaper * 1083 reaper_exit(struct proc *p) 1084 { 1085 struct sysreaper *reap; 1086 1087 /* 1088 * Release acquired reaper 1089 */ 1090 if ((reap = p->p_reaper) != NULL && reap->p == p) { 1091 lockmgr(&reap->lock, LK_EXCLUSIVE); 1092 p->p_reaper = reap->parent; 1093 if (p->p_reaper) 1094 reaper_hold(p->p_reaper); 1095 reap->p = NULL; 1096 lockmgr(&reap->lock, LK_RELEASE); 1097 reaper_drop(reap); 1098 } 1099 1100 /* 1101 * Return and clear reaper (caller is holding p_token for us) 1102 * (reap->p does not equal p). Caller must drop it. 1103 */ 1104 if ((reap = p->p_reaper) != NULL) { 1105 p->p_reaper = NULL; 1106 } 1107 return reap; 1108 } 1109 1110 /* 1111 * Return a held (PHOLD) process representing the reaper for process (p). 1112 * NULL should not normally be returned. Caller should PRELE() the returned 1113 * reaper process when finished. 1114 * 1115 * Remove dead internal nodes while we are at it. 1116 * 1117 * Process (p)'s token must be held on call. 1118 * The returned process's token is NOT acquired by this routine. 1119 */ 1120 struct proc * 1121 reaper_get(struct sysreaper *reap) 1122 { 1123 struct sysreaper *next; 1124 struct proc *reproc; 1125 1126 if (reap == NULL) 1127 return NULL; 1128 1129 /* 1130 * Extra hold for loop 1131 */ 1132 reaper_hold(reap); 1133 1134 while (reap) { 1135 lockmgr(&reap->lock, LK_SHARED); 1136 if (reap->p) { 1137 /* 1138 * Probable reaper 1139 */ 1140 if (reap->p) { 1141 reproc = reap->p; 1142 PHOLD(reproc); 1143 lockmgr(&reap->lock, LK_RELEASE); 1144 reaper_drop(reap); 1145 return reproc; 1146 } 1147 1148 /* 1149 * Raced, try again 1150 */ 1151 lockmgr(&reap->lock, LK_RELEASE); 1152 continue; 1153 } 1154 1155 /* 1156 * Traverse upwards in the reaper topology, destroy 1157 * dead internal nodes when possible. 1158 * 1159 * NOTE: Our ref on next means that a dead node should 1160 * have 2 (ours and reap->parent's). 1161 */ 1162 next = reap->parent; 1163 while (next) { 1164 reaper_hold(next); 1165 if (next->refs == 2 && next->p == NULL) { 1166 lockmgr(&reap->lock, LK_RELEASE); 1167 lockmgr(&reap->lock, LK_EXCLUSIVE); 1168 if (next->refs == 2 && 1169 reap->parent == next && 1170 next->p == NULL) { 1171 /* 1172 * reap->parent inherits ref from next. 1173 */ 1174 reap->parent = next->parent; 1175 next->parent = NULL; 1176 reaper_drop(next); /* ours */ 1177 reaper_drop(next); /* old parent */ 1178 next = reap->parent; 1179 continue; /* possible chain */ 1180 } 1181 } 1182 break; 1183 } 1184 lockmgr(&reap->lock, LK_RELEASE); 1185 reaper_drop(reap); 1186 reap = next; 1187 } 1188 return NULL; 1189 } 1190 1191 /* 1192 * Test that the sender is allowed to send a signal to the target. 1193 * The sender process is assumed to have a stable reaper. The 1194 * target can be e.g. from a scan callback. 1195 * 1196 * Target cannot be the reaper process itself unless reaper_ok is specified, 1197 * or sender == target. 1198 */ 1199 int 1200 reaper_sigtest(struct proc *sender, struct proc *target, int reaper_ok) 1201 { 1202 struct sysreaper *sreap; 1203 struct sysreaper *reap; 1204 int r; 1205 1206 sreap = sender->p_reaper; 1207 if (sreap == NULL) 1208 return 1; 1209 1210 if (sreap == target->p_reaper) { 1211 if (sreap->p == target && sreap->p != sender && reaper_ok == 0) 1212 return 0; 1213 return 1; 1214 } 1215 lockmgr(&reaper_lock, LK_SHARED); 1216 r = 0; 1217 for (reap = target->p_reaper; reap; reap = reap->parent) { 1218 if (sreap == reap) { 1219 if (sreap->p != target || reaper_ok) 1220 r = 1; 1221 break; 1222 } 1223 } 1224 lockmgr(&reaper_lock, LK_RELEASE); 1225 1226 return r; 1227 } 1228