1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94 35 * $FreeBSD: src/sys/kern/kern_fork.c,v 1.72.2.14 2003/06/26 04:15:10 silby Exp $ 36 */ 37 38 #include "opt_ktrace.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/sysproto.h> 43 #include <sys/filedesc.h> 44 #include <sys/kernel.h> 45 #include <sys/sysctl.h> 46 #include <sys/malloc.h> 47 #include <sys/proc.h> 48 #include <sys/resourcevar.h> 49 #include <sys/vnode.h> 50 #include <sys/acct.h> 51 #include <sys/ktrace.h> 52 #include <sys/unistd.h> 53 #include <sys/jail.h> 54 #include <sys/lwp.h> 55 56 #include <vm/vm.h> 57 #include <sys/lock.h> 58 #include <vm/pmap.h> 59 #include <vm/vm_map.h> 60 #include <vm/vm_extern.h> 61 62 #include <sys/vmmeter.h> 63 #include <sys/refcount.h> 64 #include <sys/thread2.h> 65 #include <sys/signal2.h> 66 #include <sys/spinlock2.h> 67 68 #include <sys/dsched.h> 69 70 static MALLOC_DEFINE(M_ATFORK, "atfork", "atfork callback"); 71 static MALLOC_DEFINE(M_REAPER, "reaper", "process reapers"); 72 73 /* 74 * These are the stuctures used to create a callout list for things to do 75 * when forking a process 76 */ 77 struct forklist { 78 forklist_fn function; 79 TAILQ_ENTRY(forklist) next; 80 }; 81 82 TAILQ_HEAD(forklist_head, forklist); 83 static struct forklist_head fork_list = TAILQ_HEAD_INITIALIZER(fork_list); 84 85 static struct lwp *lwp_fork1(struct lwp *, struct proc *, int flags, 86 const cpumask_t *mask); 87 static void lwp_fork2(struct lwp *lp1, struct proc *destproc, 88 struct lwp *lp2, int flags); 89 static int lwp_create1(struct lwp_params *params, 90 const cpumask_t *mask); 91 static struct lock reaper_lock = LOCK_INITIALIZER("reapgl", 0, 0); 92 93 int forksleep; /* Place for fork1() to sleep on. */ 94 95 /* 96 * Red-Black tree support for LWPs 97 */ 98 99 static int 100 rb_lwp_compare(struct lwp *lp1, struct lwp *lp2) 101 { 102 if (lp1->lwp_tid < lp2->lwp_tid) 103 return(-1); 104 if (lp1->lwp_tid > lp2->lwp_tid) 105 return(1); 106 return(0); 107 } 108 109 RB_GENERATE2(lwp_rb_tree, lwp, u.lwp_rbnode, rb_lwp_compare, lwpid_t, lwp_tid); 110 111 /* 112 * When forking, memory underpinning umtx-supported mutexes may be set 113 * COW causing the physical address to change. We must wakeup any threads 114 * blocked on the physical address to allow them to re-resolve their VM. 115 * 116 * (caller is holding p->p_token) 117 */ 118 static void 119 wake_umtx_threads(struct proc *p1) 120 { 121 struct lwp *lp; 122 struct thread *td; 123 124 RB_FOREACH(lp, lwp_rb_tree, &p1->p_lwp_tree) { 125 td = lp->lwp_thread; 126 if (td && (td->td_flags & TDF_TSLEEPQ) && 127 (td->td_wdomain & PDOMAIN_MASK) == PDOMAIN_UMTX) { 128 wakeup_domain(td->td_wchan, PDOMAIN_UMTX); 129 } 130 } 131 } 132 133 /* 134 * fork() system call 135 */ 136 int 137 sys_fork(struct fork_args *uap) 138 { 139 struct lwp *lp = curthread->td_lwp; 140 struct proc *p2; 141 int error; 142 143 error = fork1(lp, RFFDG | RFPROC | RFPGLOCK, &p2); 144 if (error == 0) { 145 PHOLD(p2); 146 start_forked_proc(lp, p2); 147 uap->sysmsg_fds[0] = p2->p_pid; 148 uap->sysmsg_fds[1] = 0; 149 PRELE(p2); 150 } 151 return error; 152 } 153 154 /* 155 * vfork() system call 156 */ 157 int 158 sys_vfork(struct vfork_args *uap) 159 { 160 struct lwp *lp = curthread->td_lwp; 161 struct proc *p2; 162 int error; 163 164 error = fork1(lp, RFFDG | RFPROC | RFPPWAIT | RFMEM | RFPGLOCK, &p2); 165 if (error == 0) { 166 PHOLD(p2); 167 start_forked_proc(lp, p2); 168 uap->sysmsg_fds[0] = p2->p_pid; 169 uap->sysmsg_fds[1] = 0; 170 PRELE(p2); 171 } 172 return error; 173 } 174 175 /* 176 * Handle rforks. An rfork may (1) operate on the current process without 177 * creating a new, (2) create a new process that shared the current process's 178 * vmspace, signals, and/or descriptors, or (3) create a new process that does 179 * not share these things (normal fork). 180 * 181 * Note that we only call start_forked_proc() if a new process is actually 182 * created. 183 * 184 * rfork { int flags } 185 */ 186 int 187 sys_rfork(struct rfork_args *uap) 188 { 189 struct lwp *lp = curthread->td_lwp; 190 struct proc *p2; 191 int error; 192 193 if ((uap->flags & RFKERNELONLY) != 0) 194 return (EINVAL); 195 196 error = fork1(lp, uap->flags | RFPGLOCK, &p2); 197 if (error == 0) { 198 if (p2) { 199 PHOLD(p2); 200 start_forked_proc(lp, p2); 201 uap->sysmsg_fds[0] = p2->p_pid; 202 uap->sysmsg_fds[1] = 0; 203 PRELE(p2); 204 } else { 205 uap->sysmsg_fds[0] = 0; 206 uap->sysmsg_fds[1] = 0; 207 } 208 } 209 return error; 210 } 211 212 static int 213 lwp_create1(struct lwp_params *uprm, const cpumask_t *umask) 214 { 215 struct proc *p = curproc; 216 struct lwp *lp; 217 struct lwp_params params; 218 cpumask_t *mask = NULL, mask0; 219 int error; 220 221 error = copyin(uprm, ¶ms, sizeof(params)); 222 if (error) 223 goto fail2; 224 225 if (umask != NULL) { 226 error = copyin(umask, &mask0, sizeof(mask0)); 227 if (error) 228 goto fail2; 229 CPUMASK_ANDMASK(mask0, smp_active_mask); 230 if (CPUMASK_TESTNZERO(mask0)) 231 mask = &mask0; 232 } 233 234 lwkt_gettoken(&p->p_token); 235 plimit_lwp_fork(p); /* force exclusive access */ 236 lp = lwp_fork1(curthread->td_lwp, p, RFPROC | RFMEM, mask); 237 lwp_fork2(curthread->td_lwp, p, lp, RFPROC | RFMEM); 238 error = cpu_prepare_lwp(lp, ¶ms); 239 if (error) 240 goto fail; 241 if (params.lwp_tid1 != NULL && 242 (error = copyout(&lp->lwp_tid, params.lwp_tid1, sizeof(lp->lwp_tid)))) 243 goto fail; 244 if (params.lwp_tid2 != NULL && 245 (error = copyout(&lp->lwp_tid, params.lwp_tid2, sizeof(lp->lwp_tid)))) 246 goto fail; 247 248 /* 249 * Now schedule the new lwp. 250 */ 251 p->p_usched->resetpriority(lp); 252 crit_enter(); 253 lp->lwp_stat = LSRUN; 254 p->p_usched->setrunqueue(lp); 255 crit_exit(); 256 lwkt_reltoken(&p->p_token); 257 258 return (0); 259 260 fail: 261 /* 262 * Make sure no one is using this lwp, before it is removed from 263 * the tree. If we didn't wait it here, lwp tree iteration with 264 * blocking operation would be broken. 265 */ 266 while (lp->lwp_lock > 0) 267 tsleep(lp, 0, "lwpfail", 1); 268 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp); 269 --p->p_nthreads; 270 /* lwp_dispose expects an exited lwp, and a held proc */ 271 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT); 272 lp->lwp_thread->td_flags |= TDF_EXITING; 273 lwkt_remove_tdallq(lp->lwp_thread); 274 PHOLD(p); 275 biosched_done(lp->lwp_thread); 276 dsched_exit_thread(lp->lwp_thread); 277 lwp_dispose(lp); 278 lwkt_reltoken(&p->p_token); 279 fail2: 280 return (error); 281 } 282 283 /* 284 * Low level thread create used by pthreads. 285 */ 286 int 287 sys_lwp_create(struct lwp_create_args *uap) 288 { 289 290 return (lwp_create1(uap->params, NULL)); 291 } 292 293 int 294 sys_lwp_create2(struct lwp_create2_args *uap) 295 { 296 297 return (lwp_create1(uap->params, uap->mask)); 298 } 299 300 int nprocs = 1; /* process 0 */ 301 302 int 303 fork1(struct lwp *lp1, int flags, struct proc **procp) 304 { 305 struct proc *p1 = lp1->lwp_proc; 306 struct proc *p2; 307 struct proc *pptr; 308 struct pgrp *p1grp; 309 struct pgrp *plkgrp; 310 struct lwp *lp2; 311 struct sysreaper *reap; 312 uid_t uid; 313 int ok, error; 314 static int curfail = 0; 315 static struct timeval lastfail; 316 struct forklist *ep; 317 struct filedesc_to_leader *fdtol; 318 319 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG)) 320 return (EINVAL); 321 322 lwkt_gettoken(&p1->p_token); 323 plkgrp = NULL; 324 p2 = NULL; 325 326 /* 327 * Here we don't create a new process, but we divorce 328 * certain parts of a process from itself. 329 */ 330 if ((flags & RFPROC) == 0) { 331 /* 332 * This kind of stunt does not work anymore if 333 * there are native threads (lwps) running 334 */ 335 if (p1->p_nthreads != 1) { 336 error = EINVAL; 337 goto done; 338 } 339 340 vm_fork(p1, 0, flags); 341 if ((flags & RFMEM) == 0) 342 wake_umtx_threads(p1); 343 344 /* 345 * Close all file descriptors. 346 */ 347 if (flags & RFCFDG) { 348 struct filedesc *fdtmp; 349 fdtmp = fdinit(p1); 350 fdfree(p1, fdtmp); 351 } 352 353 /* 354 * Unshare file descriptors (from parent.) 355 */ 356 if (flags & RFFDG) { 357 if (p1->p_fd->fd_refcnt > 1) { 358 struct filedesc *newfd; 359 error = fdcopy(p1, &newfd); 360 if (error != 0) { 361 error = ENOMEM; 362 goto done; 363 } 364 fdfree(p1, newfd); 365 } 366 } 367 *procp = NULL; 368 error = 0; 369 goto done; 370 } 371 372 /* 373 * Interlock against process group signal delivery. If signals 374 * are pending after the interlock is obtained we have to restart 375 * the system call to process the signals. If we don't the child 376 * can miss a pgsignal (such as ^C) sent during the fork. 377 * 378 * We can't use CURSIG() here because it will process any STOPs 379 * and cause the process group lock to be held indefinitely. If 380 * a STOP occurs, the fork will be restarted after the CONT. 381 */ 382 p1grp = p1->p_pgrp; 383 if ((flags & RFPGLOCK) && (plkgrp = p1->p_pgrp) != NULL) { 384 pgref(plkgrp); 385 lockmgr(&plkgrp->pg_lock, LK_SHARED); 386 if (CURSIG_NOBLOCK(lp1)) { 387 error = ERESTART; 388 goto done; 389 } 390 } 391 392 /* 393 * Although process entries are dynamically created, we still keep 394 * a global limit on the maximum number we will create. Don't allow 395 * a nonprivileged user to use the last ten processes; don't let root 396 * exceed the limit. The variable nprocs is the current number of 397 * processes, maxproc is the limit. 398 */ 399 uid = lp1->lwp_thread->td_ucred->cr_ruid; 400 if ((nprocs >= maxproc - 10 && uid != 0) || nprocs >= maxproc) { 401 if (ppsratecheck(&lastfail, &curfail, 1)) 402 kprintf("maxproc limit exceeded by uid %d, please " 403 "see tuning(7) and login.conf(5).\n", uid); 404 tsleep(&forksleep, 0, "fork", hz / 2); 405 error = EAGAIN; 406 goto done; 407 } 408 409 /* 410 * Increment the nprocs resource before blocking can occur. There 411 * are hard-limits as to the number of processes that can run. 412 */ 413 atomic_add_int(&nprocs, 1); 414 415 /* 416 * Increment the count of procs running with this uid. This also 417 * applies to root. 418 */ 419 ok = chgproccnt(lp1->lwp_thread->td_ucred->cr_ruidinfo, 1, 420 plimit_getadjvalue(RLIMIT_NPROC)); 421 if (!ok) { 422 /* 423 * Back out the process count 424 */ 425 atomic_add_int(&nprocs, -1); 426 if (ppsratecheck(&lastfail, &curfail, 1)) { 427 kprintf("maxproc limit of %jd " 428 "exceeded by \"%s\" uid %d, " 429 "please see tuning(7) and login.conf(5).\n", 430 plimit_getadjvalue(RLIMIT_NPROC), 431 p1->p_comm, 432 uid); 433 } 434 tsleep(&forksleep, 0, "fork", hz / 2); 435 error = EAGAIN; 436 goto done; 437 } 438 439 /* 440 * Allocate a new process, don't get fancy: zero the structure. 441 */ 442 p2 = kmalloc(sizeof(struct proc), M_PROC, M_WAITOK|M_ZERO); 443 444 /* 445 * Core initialization. SIDL is a safety state that protects the 446 * partially initialized process once it starts getting hooked 447 * into system structures and becomes addressable. 448 * 449 * We must be sure to acquire p2->p_token as well, we must hold it 450 * once the process is on the allproc list to avoid things such 451 * as competing modifications to p_flags. 452 */ 453 mycpu->gd_forkid += ncpus; 454 p2->p_forkid = mycpu->gd_forkid + mycpu->gd_cpuid; 455 p2->p_lasttid = 0; /* first tid will be 1 */ 456 p2->p_stat = SIDL; 457 458 /* 459 * NOTE: Process 0 will not have a reaper, but process 1 (init) and 460 * all other processes always will. 461 */ 462 if ((reap = p1->p_reaper) != NULL) { 463 reaper_hold(reap); 464 p2->p_reaper = reap; 465 } else { 466 p2->p_reaper = NULL; 467 } 468 469 RB_INIT(&p2->p_lwp_tree); 470 spin_init(&p2->p_spin, "procfork1"); 471 lwkt_token_init(&p2->p_token, "proc"); 472 lwkt_gettoken(&p2->p_token); 473 p2->p_uidpcpu = kmalloc(sizeof(*p2->p_uidpcpu) * ncpus, 474 M_SUBPROC, M_WAITOK | M_ZERO); 475 476 /* 477 * Setup linkage for kernel based threading XXX lwp. Also add the 478 * process to the allproclist. 479 * 480 * The process structure is addressable after this point. 481 */ 482 if (flags & RFTHREAD) { 483 p2->p_peers = p1->p_peers; 484 p1->p_peers = p2; 485 p2->p_leader = p1->p_leader; 486 } else { 487 p2->p_leader = p2; 488 } 489 proc_add_allproc(p2); 490 491 /* 492 * Initialize the section which is copied verbatim from the parent. 493 */ 494 bcopy(&p1->p_startcopy, &p2->p_startcopy, 495 ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy)); 496 497 /* 498 * Duplicate sub-structures as needed. Increase reference counts 499 * on shared objects. 500 * 501 * NOTE: because we are now on the allproc list it is possible for 502 * other consumers to gain temporary references to p2 503 * (p2->p_lock can change). 504 */ 505 if (p1->p_flags & P_PROFIL) 506 startprofclock(p2); 507 p2->p_ucred = crhold(lp1->lwp_thread->td_ucred); 508 509 if (jailed(p2->p_ucred)) 510 p2->p_flags |= P_JAILED; 511 512 if (p2->p_args) 513 refcount_acquire(&p2->p_args->ar_ref); 514 515 p2->p_usched = p1->p_usched; 516 /* XXX: verify copy of the secondary iosched stuff */ 517 dsched_enter_proc(p2); 518 519 if (flags & RFSIGSHARE) { 520 p2->p_sigacts = p1->p_sigacts; 521 refcount_acquire(&p2->p_sigacts->ps_refcnt); 522 } else { 523 p2->p_sigacts = kmalloc(sizeof(*p2->p_sigacts), 524 M_SUBPROC, M_WAITOK); 525 bcopy(p1->p_sigacts, p2->p_sigacts, sizeof(*p2->p_sigacts)); 526 refcount_init(&p2->p_sigacts->ps_refcnt, 1); 527 } 528 if (flags & RFLINUXTHPN) 529 p2->p_sigparent = SIGUSR1; 530 else 531 p2->p_sigparent = SIGCHLD; 532 533 /* bump references to the text vnode (for procfs) */ 534 p2->p_textvp = p1->p_textvp; 535 if (p2->p_textvp) 536 vref(p2->p_textvp); 537 538 /* copy namecache handle to the text file */ 539 if (p1->p_textnch.mount) 540 cache_copy(&p1->p_textnch, &p2->p_textnch); 541 542 /* 543 * Handle file descriptors 544 */ 545 if (flags & RFCFDG) { 546 p2->p_fd = fdinit(p1); 547 fdtol = NULL; 548 } else if (flags & RFFDG) { 549 error = fdcopy(p1, &p2->p_fd); 550 if (error != 0) { 551 error = ENOMEM; 552 goto done; 553 } 554 fdtol = NULL; 555 } else { 556 p2->p_fd = fdshare(p1); 557 if (p1->p_fdtol == NULL) { 558 p1->p_fdtol = filedesc_to_leader_alloc(NULL, 559 p1->p_leader); 560 } 561 if ((flags & RFTHREAD) != 0) { 562 /* 563 * Shared file descriptor table and 564 * shared process leaders. 565 */ 566 fdtol = p1->p_fdtol; 567 fdtol->fdl_refcount++; 568 } else { 569 /* 570 * Shared file descriptor table, and 571 * different process leaders 572 */ 573 fdtol = filedesc_to_leader_alloc(p1->p_fdtol, p2); 574 } 575 } 576 p2->p_fdtol = fdtol; 577 p2->p_limit = plimit_fork(p1); 578 579 /* 580 * Adjust depth for resource downscaling 581 */ 582 if ((p2->p_depth & 31) != 31) 583 ++p2->p_depth; 584 585 /* 586 * Preserve some more flags in subprocess. P_PROFIL has already 587 * been preserved. 588 */ 589 p2->p_flags |= p1->p_flags & P_SUGID; 590 if (p1->p_session->s_ttyvp != NULL && (p1->p_flags & P_CONTROLT)) 591 p2->p_flags |= P_CONTROLT; 592 if (flags & RFPPWAIT) { 593 p2->p_flags |= P_PPWAIT; 594 if (p1->p_upmap) 595 atomic_add_int(&p1->p_upmap->invfork, 1); 596 } 597 598 /* 599 * Inherit the virtual kernel structure (allows a virtual kernel 600 * to fork to simulate multiple cpus). 601 */ 602 if (p1->p_vkernel) 603 vkernel_inherit(p1, p2); 604 605 /* 606 * Once we are on a pglist we may receive signals. XXX we might 607 * race a ^C being sent to the process group by not receiving it 608 * at all prior to this line. 609 */ 610 pgref(p1grp); 611 lwkt_gettoken(&p1grp->pg_token); 612 LIST_INSERT_AFTER(p1, p2, p_pglist); 613 lwkt_reltoken(&p1grp->pg_token); 614 615 /* 616 * Attach the new process to its parent. 617 * 618 * If RFNOWAIT is set, the newly created process becomes a child 619 * of the reaper (typically init). This effectively disassociates 620 * the child from the parent. 621 * 622 * Temporarily hold pptr for the RFNOWAIT case to avoid ripouts. 623 */ 624 if (flags & RFNOWAIT) { 625 pptr = reaper_get(reap); 626 if (pptr == NULL) { 627 pptr = initproc; 628 PHOLD(pptr); 629 } 630 } else { 631 pptr = p1; 632 } 633 p2->p_pptr = pptr; 634 p2->p_ppid = pptr->p_pid; 635 LIST_INIT(&p2->p_children); 636 637 lwkt_gettoken(&pptr->p_token); 638 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling); 639 lwkt_reltoken(&pptr->p_token); 640 641 if (flags & RFNOWAIT) 642 PRELE(pptr); 643 644 varsymset_init(&p2->p_varsymset, &p1->p_varsymset); 645 callout_init_mp(&p2->p_ithandle); 646 647 #ifdef KTRACE 648 /* 649 * Copy traceflag and tracefile if enabled. If not inherited, 650 * these were zeroed above but we still could have a trace race 651 * so make sure p2's p_tracenode is NULL. 652 */ 653 if ((p1->p_traceflag & KTRFAC_INHERIT) && p2->p_tracenode == NULL) { 654 p2->p_traceflag = p1->p_traceflag; 655 p2->p_tracenode = ktrinherit(p1->p_tracenode); 656 } 657 #endif 658 659 /* 660 * This begins the section where we must prevent the parent 661 * from being swapped. 662 * 663 * Gets PRELE'd in the caller in start_forked_proc(). 664 */ 665 PHOLD(p1); 666 667 lp2 = lwp_fork1(lp1, p2, flags, NULL); 668 vm_fork(p1, p2, flags); 669 if ((flags & RFMEM) == 0) 670 wake_umtx_threads(p1); 671 672 /* 673 * Create the first lwp associated with the new proc. 674 * It will return via a different execution path later, directly 675 * into userland, after it was put on the runq by 676 * start_forked_proc(). 677 */ 678 lwp_fork2(lp1, p2, lp2, flags); 679 680 if (flags == (RFFDG | RFPROC | RFPGLOCK)) { 681 mycpu->gd_cnt.v_forks++; 682 mycpu->gd_cnt.v_forkpages += btoc(p2->p_vmspace->vm_dsize) + 683 btoc(p2->p_vmspace->vm_ssize); 684 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM | RFPGLOCK)) { 685 mycpu->gd_cnt.v_vforks++; 686 mycpu->gd_cnt.v_vforkpages += btoc(p2->p_vmspace->vm_dsize) + 687 btoc(p2->p_vmspace->vm_ssize); 688 } else if (p1 == &proc0) { 689 mycpu->gd_cnt.v_kthreads++; 690 mycpu->gd_cnt.v_kthreadpages += btoc(p2->p_vmspace->vm_dsize) + 691 btoc(p2->p_vmspace->vm_ssize); 692 } else { 693 mycpu->gd_cnt.v_rforks++; 694 mycpu->gd_cnt.v_rforkpages += btoc(p2->p_vmspace->vm_dsize) + 695 btoc(p2->p_vmspace->vm_ssize); 696 } 697 698 /* 699 * Both processes are set up, now check if any loadable modules want 700 * to adjust anything. 701 * What if they have an error? XXX 702 */ 703 TAILQ_FOREACH(ep, &fork_list, next) { 704 (*ep->function)(p1, p2, flags); 705 } 706 707 /* 708 * Set the start time. Note that the process is not runnable. The 709 * caller is responsible for making it runnable. 710 */ 711 microtime(&p2->p_start); 712 p2->p_acflag = AFORK; 713 714 /* 715 * tell any interested parties about the new process 716 */ 717 KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid); 718 719 /* 720 * Return child proc pointer to parent. 721 */ 722 *procp = p2; 723 error = 0; 724 done: 725 if (p2) 726 lwkt_reltoken(&p2->p_token); 727 lwkt_reltoken(&p1->p_token); 728 if (plkgrp) { 729 lockmgr(&plkgrp->pg_lock, LK_RELEASE); 730 pgrel(plkgrp); 731 } 732 return (error); 733 } 734 735 static struct lwp * 736 lwp_fork1(struct lwp *lp1, struct proc *destproc, int flags, 737 const cpumask_t *mask) 738 { 739 struct lwp *lp2; 740 741 lp2 = kmalloc(sizeof(struct lwp), M_LWP, M_WAITOK|M_ZERO); 742 lp2->lwp_proc = destproc; 743 lp2->lwp_stat = LSRUN; 744 bcopy(&lp1->lwp_startcopy, &lp2->lwp_startcopy, 745 (unsigned) ((caddr_t)&lp2->lwp_endcopy - 746 (caddr_t)&lp2->lwp_startcopy)); 747 if (mask != NULL) 748 lp2->lwp_cpumask = *mask; 749 750 lwkt_token_init(&lp2->lwp_token, "lwp_token"); 751 TAILQ_INIT(&lp2->lwp_lpmap_backing_list); 752 spin_init(&lp2->lwp_spin, "lwptoken"); 753 754 /* 755 * Use the same TID for the first thread in the new process after 756 * a fork or vfork. This is needed to keep pthreads and /dev/lpmap 757 * sane. In particular a consequence of implementing the per-thread 758 * /dev/lpmap map code makes this mandatory. 759 * 760 * NOTE: exec*() will reset the TID to 1 to keep things sane in that 761 * department too. 762 */ 763 lp2->lwp_tid = lp1->lwp_tid - 1; 764 765 /* 766 * Leave 2 bits open so the pthreads library can optimize locks 767 * by combining the TID with a few LOck-related flags. 768 */ 769 do { 770 if (lp2->lwp_tid == 0 || lp2->lwp_tid == 0x3FFFFFFF) 771 lp2->lwp_tid = 1; 772 else 773 ++lp2->lwp_tid; 774 } while (lwp_rb_tree_RB_INSERT(&destproc->p_lwp_tree, lp2) != NULL); 775 776 destproc->p_lasttid = lp2->lwp_tid; 777 destproc->p_nthreads++; 778 779 return lp2; 780 } 781 782 static void 783 lwp_fork2(struct lwp *lp1, struct proc *destproc, struct lwp *lp2, int flags) 784 { 785 globaldata_t gd = mycpu; 786 struct thread *td2; 787 788 lp2->lwp_vmspace = destproc->p_vmspace; 789 790 /* 791 * Reset the sigaltstack if memory is shared, otherwise inherit 792 * it. 793 */ 794 if (flags & RFMEM) { 795 lp2->lwp_sigstk.ss_flags = SS_DISABLE; 796 lp2->lwp_sigstk.ss_size = 0; 797 lp2->lwp_sigstk.ss_sp = NULL; 798 lp2->lwp_flags &= ~LWP_ALTSTACK; 799 } else { 800 lp2->lwp_flags |= lp1->lwp_flags & LWP_ALTSTACK; 801 } 802 803 /* 804 * Set cpbase to the last timeout that occured (not the upcoming 805 * timeout). 806 * 807 * A critical section is required since a timer IPI can update 808 * scheduler specific data. 809 */ 810 crit_enter(); 811 lp2->lwp_cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic; 812 destproc->p_usched->heuristic_forking(lp1, lp2); 813 crit_exit(); 814 CPUMASK_ANDMASK(lp2->lwp_cpumask, usched_mastermask); 815 816 /* 817 * Assign the thread to the current cpu to begin with so we 818 * can manipulate it. 819 */ 820 td2 = lwkt_alloc_thread(NULL, LWKT_THREAD_STACK, gd->gd_cpuid, 0); 821 lp2->lwp_thread = td2; 822 td2->td_wakefromcpu = gd->gd_cpuid; 823 td2->td_ucred = crhold(destproc->p_ucred); 824 td2->td_proc = destproc; 825 td2->td_lwp = lp2; 826 td2->td_switch = cpu_heavy_switch; 827 #ifdef NO_LWKT_SPLIT_USERPRI 828 lwkt_setpri(td2, TDPRI_USER_NORM); 829 #else 830 lwkt_setpri(td2, TDPRI_KERN_USER); 831 #endif 832 lwkt_set_comm(td2, "%s", destproc->p_comm); 833 834 /* 835 * cpu_fork will copy and update the pcb, set up the kernel stack, 836 * and make the child ready to run. 837 */ 838 cpu_fork(lp1, lp2, flags); 839 kqueue_init(&lp2->lwp_kqueue, destproc->p_fd); 840 841 /* 842 * This flag is set and never cleared. It means that the process 843 * was threaded at some point. Used to improve exit performance. 844 */ 845 pmap_maybethreaded(&destproc->p_vmspace->vm_pmap); 846 destproc->p_flags |= P_MAYBETHREADED; 847 848 /* 849 * If the original lp had a lpmap and a non-zero blockallsigs 850 * count, give the lp for the forked process the same count. 851 * 852 * This makes the user code and expectations less confusing 853 * in terms of unwinding locks and also allows userland to start 854 * the forked process with signals blocked via the blockallsigs() 855 * mechanism if desired. 856 * 857 * XXX future - also inherit the lwp-specific process title ? 858 */ 859 if (lp1->lwp_lpmap && 860 (lp1->lwp_lpmap->blockallsigs & 0x7FFFFFFF)) { 861 lwp_usermap(lp2, 0); 862 if (lp2->lwp_lpmap) { 863 lp2->lwp_lpmap->blockallsigs = 864 lp1->lwp_lpmap->blockallsigs; 865 } 866 } 867 } 868 869 /* 870 * The next two functionms are general routines to handle adding/deleting 871 * items on the fork callout list. 872 * 873 * at_fork(): 874 * Take the arguments given and put them onto the fork callout list, 875 * However first make sure that it's not already there. 876 * Returns 0 on success or a standard error number. 877 */ 878 int 879 at_fork(forklist_fn function) 880 { 881 struct forklist *ep; 882 883 #ifdef INVARIANTS 884 /* let the programmer know if he's been stupid */ 885 if (rm_at_fork(function)) { 886 kprintf("WARNING: fork callout entry (%p) already present\n", 887 function); 888 } 889 #endif 890 ep = kmalloc(sizeof(*ep), M_ATFORK, M_WAITOK|M_ZERO); 891 ep->function = function; 892 TAILQ_INSERT_TAIL(&fork_list, ep, next); 893 return (0); 894 } 895 896 /* 897 * Scan the exit callout list for the given item and remove it.. 898 * Returns the number of items removed (0 or 1) 899 */ 900 int 901 rm_at_fork(forklist_fn function) 902 { 903 struct forklist *ep; 904 905 TAILQ_FOREACH(ep, &fork_list, next) { 906 if (ep->function == function) { 907 TAILQ_REMOVE(&fork_list, ep, next); 908 kfree(ep, M_ATFORK); 909 return(1); 910 } 911 } 912 return (0); 913 } 914 915 /* 916 * Add a forked process to the run queue after any remaining setup, such 917 * as setting the fork handler, has been completed. 918 * 919 * p2 is held by the caller. 920 */ 921 void 922 start_forked_proc(struct lwp *lp1, struct proc *p2) 923 { 924 struct lwp *lp2 = ONLY_LWP_IN_PROC(p2); 925 int pflags; 926 927 /* 928 * Move from SIDL to RUN queue, and activate the process's thread. 929 * Activation of the thread effectively makes the process "a" 930 * current process, so we do not setrunqueue(). 931 * 932 * YYY setrunqueue works here but we should clean up the trampoline 933 * code so we just schedule the LWKT thread and let the trampoline 934 * deal with the userland scheduler on return to userland. 935 */ 936 KASSERT(p2->p_stat == SIDL, 937 ("cannot start forked process, bad status: %p", p2)); 938 p2->p_usched->resetpriority(lp2); 939 crit_enter(); 940 p2->p_stat = SACTIVE; 941 lp2->lwp_stat = LSRUN; 942 p2->p_usched->setrunqueue(lp2); 943 crit_exit(); 944 945 /* 946 * Now can be swapped. 947 */ 948 PRELE(lp1->lwp_proc); 949 950 /* 951 * Preserve synchronization semantics of vfork. P_PPWAIT is set in 952 * the child until it has retired the parent's resources. The parent 953 * must wait for the flag to be cleared by the child. 954 * 955 * Interlock the flag/tsleep with atomic ops to avoid unnecessary 956 * p_token conflicts. 957 * 958 * XXX Is this use of an atomic op on a field that is not normally 959 * manipulated with atomic ops ok? 960 */ 961 while ((pflags = p2->p_flags) & P_PPWAIT) { 962 cpu_ccfence(); 963 tsleep_interlock(lp1->lwp_proc, 0); 964 if (atomic_cmpset_int(&p2->p_flags, pflags, pflags)) 965 tsleep(lp1->lwp_proc, PINTERLOCKED, "ppwait", 0); 966 } 967 } 968 969 /* 970 * procctl (idtype_t idtype, id_t id, int cmd, void *arg) 971 */ 972 int 973 sys_procctl(struct procctl_args *uap) 974 { 975 struct proc *p = curproc; 976 struct proc *p2; 977 struct sysreaper *reap; 978 union reaper_info udata; 979 int error; 980 981 if (uap->idtype != P_PID || uap->id != (id_t)p->p_pid) 982 return EINVAL; 983 984 switch(uap->cmd) { 985 case PROC_REAP_ACQUIRE: 986 lwkt_gettoken(&p->p_token); 987 reap = kmalloc(sizeof(*reap), M_REAPER, M_WAITOK|M_ZERO); 988 if (p->p_reaper == NULL || p->p_reaper->p != p) { 989 reaper_init(p, reap); 990 error = 0; 991 } else { 992 kfree(reap, M_REAPER); 993 error = EALREADY; 994 } 995 lwkt_reltoken(&p->p_token); 996 break; 997 case PROC_REAP_RELEASE: 998 lwkt_gettoken(&p->p_token); 999 release_again: 1000 reap = p->p_reaper; 1001 KKASSERT(reap != NULL); 1002 if (reap->p == p) { 1003 reaper_hold(reap); /* in case of thread race */ 1004 lockmgr(&reap->lock, LK_EXCLUSIVE); 1005 if (reap->p != p) { 1006 lockmgr(&reap->lock, LK_RELEASE); 1007 reaper_drop(reap); 1008 goto release_again; 1009 } 1010 reap->p = NULL; 1011 p->p_reaper = reap->parent; 1012 if (p->p_reaper) 1013 reaper_hold(p->p_reaper); 1014 lockmgr(&reap->lock, LK_RELEASE); 1015 reaper_drop(reap); /* our ref */ 1016 reaper_drop(reap); /* old p_reaper ref */ 1017 error = 0; 1018 } else { 1019 error = ENOTCONN; 1020 } 1021 lwkt_reltoken(&p->p_token); 1022 break; 1023 case PROC_REAP_STATUS: 1024 bzero(&udata, sizeof(udata)); 1025 lwkt_gettoken_shared(&p->p_token); 1026 if ((reap = p->p_reaper) != NULL && reap->p == p) { 1027 udata.status.flags = reap->flags; 1028 udata.status.refs = reap->refs - 1; /* minus ours */ 1029 } 1030 p2 = LIST_FIRST(&p->p_children); 1031 udata.status.pid_head = p2 ? p2->p_pid : -1; 1032 lwkt_reltoken(&p->p_token); 1033 1034 if (uap->data) { 1035 error = copyout(&udata, uap->data, 1036 sizeof(udata.status)); 1037 } else { 1038 error = 0; 1039 } 1040 break; 1041 default: 1042 error = EINVAL; 1043 break; 1044 } 1045 return error; 1046 } 1047 1048 /* 1049 * Bump ref on reaper, preventing destruction 1050 */ 1051 void 1052 reaper_hold(struct sysreaper *reap) 1053 { 1054 KKASSERT(reap->refs > 0); 1055 refcount_acquire(&reap->refs); 1056 } 1057 1058 /* 1059 * Drop ref on reaper, destroy the structure on the 1->0 1060 * transition and loop on the parent. 1061 */ 1062 void 1063 reaper_drop(struct sysreaper *next) 1064 { 1065 struct sysreaper *reap; 1066 1067 while ((reap = next) != NULL) { 1068 if (refcount_release(&reap->refs)) { 1069 next = reap->parent; 1070 KKASSERT(reap->p == NULL); 1071 lockmgr(&reaper_lock, LK_EXCLUSIVE); 1072 reap->parent = NULL; 1073 kfree(reap, M_REAPER); 1074 lockmgr(&reaper_lock, LK_RELEASE); 1075 } else { 1076 next = NULL; 1077 } 1078 } 1079 } 1080 1081 /* 1082 * Initialize a static or newly allocated reaper structure 1083 */ 1084 void 1085 reaper_init(struct proc *p, struct sysreaper *reap) 1086 { 1087 reap->parent = p->p_reaper; 1088 reap->p = p; 1089 if (p == initproc) { 1090 reap->flags = REAPER_STAT_OWNED | REAPER_STAT_REALINIT; 1091 reap->refs = 2; 1092 } else { 1093 reap->flags = REAPER_STAT_OWNED; 1094 reap->refs = 1; 1095 } 1096 lockinit(&reap->lock, "subrp", 0, 0); 1097 cpu_sfence(); 1098 p->p_reaper = reap; 1099 } 1100 1101 /* 1102 * Called with p->p_token held during exit. 1103 * 1104 * This is a bit simpler than RELEASE because there are no threads remaining 1105 * to race. We only release if we own the reaper, the exit code will handle 1106 * the final p_reaper release. 1107 */ 1108 struct sysreaper * 1109 reaper_exit(struct proc *p) 1110 { 1111 struct sysreaper *reap; 1112 1113 /* 1114 * Release acquired reaper 1115 */ 1116 if ((reap = p->p_reaper) != NULL && reap->p == p) { 1117 lockmgr(&reap->lock, LK_EXCLUSIVE); 1118 p->p_reaper = reap->parent; 1119 if (p->p_reaper) 1120 reaper_hold(p->p_reaper); 1121 reap->p = NULL; 1122 lockmgr(&reap->lock, LK_RELEASE); 1123 reaper_drop(reap); 1124 } 1125 1126 /* 1127 * Return and clear reaper (caller is holding p_token for us) 1128 * (reap->p does not equal p). Caller must drop it. 1129 */ 1130 if ((reap = p->p_reaper) != NULL) { 1131 p->p_reaper = NULL; 1132 } 1133 return reap; 1134 } 1135 1136 /* 1137 * Return a held (PHOLD) process representing the reaper for process (p). 1138 * NULL should not normally be returned. Caller should PRELE() the returned 1139 * reaper process when finished. 1140 * 1141 * Remove dead internal nodes while we are at it. 1142 * 1143 * Process (p)'s token must be held on call. 1144 * The returned process's token is NOT acquired by this routine. 1145 */ 1146 struct proc * 1147 reaper_get(struct sysreaper *reap) 1148 { 1149 struct sysreaper *next; 1150 struct proc *reproc; 1151 1152 if (reap == NULL) 1153 return NULL; 1154 1155 /* 1156 * Extra hold for loop 1157 */ 1158 reaper_hold(reap); 1159 1160 while (reap) { 1161 lockmgr(&reap->lock, LK_SHARED); 1162 if (reap->p) { 1163 /* 1164 * Probable reaper 1165 */ 1166 if (reap->p) { 1167 reproc = reap->p; 1168 PHOLD(reproc); 1169 lockmgr(&reap->lock, LK_RELEASE); 1170 reaper_drop(reap); 1171 return reproc; 1172 } 1173 1174 /* 1175 * Raced, try again 1176 */ 1177 lockmgr(&reap->lock, LK_RELEASE); 1178 continue; 1179 } 1180 1181 /* 1182 * Traverse upwards in the reaper topology, destroy 1183 * dead internal nodes when possible. 1184 * 1185 * NOTE: Our ref on next means that a dead node should 1186 * have 2 (ours and reap->parent's). 1187 */ 1188 next = reap->parent; 1189 while (next) { 1190 reaper_hold(next); 1191 if (next->refs == 2 && next->p == NULL) { 1192 lockmgr(&reap->lock, LK_RELEASE); 1193 lockmgr(&reap->lock, LK_EXCLUSIVE); 1194 if (next->refs == 2 && 1195 reap->parent == next && 1196 next->p == NULL) { 1197 /* 1198 * reap->parent inherits ref from next. 1199 */ 1200 reap->parent = next->parent; 1201 next->parent = NULL; 1202 reaper_drop(next); /* ours */ 1203 reaper_drop(next); /* old parent */ 1204 next = reap->parent; 1205 continue; /* possible chain */ 1206 } 1207 } 1208 break; 1209 } 1210 lockmgr(&reap->lock, LK_RELEASE); 1211 reaper_drop(reap); 1212 reap = next; 1213 } 1214 return NULL; 1215 } 1216 1217 /* 1218 * Test that the sender is allowed to send a signal to the target. 1219 * The sender process is assumed to have a stable reaper. The 1220 * target can be e.g. from a scan callback. 1221 * 1222 * Target cannot be the reaper process itself unless reaper_ok is specified, 1223 * or sender == target. 1224 */ 1225 int 1226 reaper_sigtest(struct proc *sender, struct proc *target, int reaper_ok) 1227 { 1228 struct sysreaper *sreap; 1229 struct sysreaper *reap; 1230 int r; 1231 1232 sreap = sender->p_reaper; 1233 if (sreap == NULL) 1234 return 1; 1235 1236 if (sreap == target->p_reaper) { 1237 if (sreap->p == target && sreap->p != sender && reaper_ok == 0) 1238 return 0; 1239 return 1; 1240 } 1241 lockmgr(&reaper_lock, LK_SHARED); 1242 r = 0; 1243 for (reap = target->p_reaper; reap; reap = reap->parent) { 1244 if (sreap == reap) { 1245 if (sreap->p != target || reaper_ok) 1246 r = 1; 1247 break; 1248 } 1249 } 1250 lockmgr(&reaper_lock, LK_RELEASE); 1251 1252 return r; 1253 } 1254