1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94 35 * $FreeBSD: src/sys/kern/kern_fork.c,v 1.72.2.14 2003/06/26 04:15:10 silby Exp $ 36 */ 37 38 #include "opt_ktrace.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/sysproto.h> 43 #include <sys/filedesc.h> 44 #include <sys/kernel.h> 45 #include <sys/sysctl.h> 46 #include <sys/malloc.h> 47 #include <sys/proc.h> 48 #include <sys/resourcevar.h> 49 #include <sys/vnode.h> 50 #include <sys/acct.h> 51 #include <sys/ktrace.h> 52 #include <sys/unistd.h> 53 #include <sys/jail.h> 54 #include <sys/lwp.h> 55 56 #include <vm/vm.h> 57 #include <sys/lock.h> 58 #include <vm/pmap.h> 59 #include <vm/vm_map.h> 60 #include <vm/vm_extern.h> 61 62 #include <sys/vmmeter.h> 63 #include <sys/refcount.h> 64 #include <sys/thread2.h> 65 #include <sys/signal2.h> 66 #include <sys/spinlock2.h> 67 68 #include <sys/dsched.h> 69 70 static MALLOC_DEFINE(M_ATFORK, "atfork", "atfork callback"); 71 static MALLOC_DEFINE(M_REAPER, "reaper", "process reapers"); 72 73 /* 74 * These are the stuctures used to create a callout list for things to do 75 * when forking a process 76 */ 77 struct forklist { 78 forklist_fn function; 79 TAILQ_ENTRY(forklist) next; 80 }; 81 82 TAILQ_HEAD(forklist_head, forklist); 83 static struct forklist_head fork_list = TAILQ_HEAD_INITIALIZER(fork_list); 84 85 static struct lwp *lwp_fork(struct lwp *, struct proc *, int flags, 86 const cpumask_t *mask); 87 static int lwp_create1(struct lwp_params *params, 88 const cpumask_t *mask); 89 90 int forksleep; /* Place for fork1() to sleep on. */ 91 92 /* 93 * Red-Black tree support for LWPs 94 */ 95 96 static int 97 rb_lwp_compare(struct lwp *lp1, struct lwp *lp2) 98 { 99 if (lp1->lwp_tid < lp2->lwp_tid) 100 return(-1); 101 if (lp1->lwp_tid > lp2->lwp_tid) 102 return(1); 103 return(0); 104 } 105 106 RB_GENERATE2(lwp_rb_tree, lwp, u.lwp_rbnode, rb_lwp_compare, lwpid_t, lwp_tid); 107 108 /* 109 * fork() system call 110 */ 111 int 112 sys_fork(struct fork_args *uap) 113 { 114 struct lwp *lp = curthread->td_lwp; 115 struct proc *p2; 116 int error; 117 118 error = fork1(lp, RFFDG | RFPROC | RFPGLOCK, &p2); 119 if (error == 0) { 120 PHOLD(p2); 121 start_forked_proc(lp, p2); 122 uap->sysmsg_fds[0] = p2->p_pid; 123 uap->sysmsg_fds[1] = 0; 124 PRELE(p2); 125 } 126 return error; 127 } 128 129 /* 130 * vfork() system call 131 */ 132 int 133 sys_vfork(struct vfork_args *uap) 134 { 135 struct lwp *lp = curthread->td_lwp; 136 struct proc *p2; 137 int error; 138 139 error = fork1(lp, RFFDG | RFPROC | RFPPWAIT | RFMEM | RFPGLOCK, &p2); 140 if (error == 0) { 141 PHOLD(p2); 142 start_forked_proc(lp, p2); 143 uap->sysmsg_fds[0] = p2->p_pid; 144 uap->sysmsg_fds[1] = 0; 145 PRELE(p2); 146 } 147 return error; 148 } 149 150 /* 151 * Handle rforks. An rfork may (1) operate on the current process without 152 * creating a new, (2) create a new process that shared the current process's 153 * vmspace, signals, and/or descriptors, or (3) create a new process that does 154 * not share these things (normal fork). 155 * 156 * Note that we only call start_forked_proc() if a new process is actually 157 * created. 158 * 159 * rfork { int flags } 160 */ 161 int 162 sys_rfork(struct rfork_args *uap) 163 { 164 struct lwp *lp = curthread->td_lwp; 165 struct proc *p2; 166 int error; 167 168 if ((uap->flags & RFKERNELONLY) != 0) 169 return (EINVAL); 170 171 error = fork1(lp, uap->flags | RFPGLOCK, &p2); 172 if (error == 0) { 173 if (p2) { 174 PHOLD(p2); 175 start_forked_proc(lp, p2); 176 uap->sysmsg_fds[0] = p2->p_pid; 177 uap->sysmsg_fds[1] = 0; 178 PRELE(p2); 179 } else { 180 uap->sysmsg_fds[0] = 0; 181 uap->sysmsg_fds[1] = 0; 182 } 183 } 184 return error; 185 } 186 187 static int 188 lwp_create1(struct lwp_params *uprm, const cpumask_t *umask) 189 { 190 struct proc *p = curproc; 191 struct lwp *lp; 192 struct lwp_params params; 193 cpumask_t *mask = NULL, mask0; 194 int error; 195 196 error = copyin(uprm, ¶ms, sizeof(params)); 197 if (error) 198 goto fail2; 199 200 if (umask != NULL) { 201 error = copyin(umask, &mask0, sizeof(mask0)); 202 if (error) 203 goto fail2; 204 CPUMASK_ANDMASK(mask0, smp_active_mask); 205 if (CPUMASK_TESTNZERO(mask0)) 206 mask = &mask0; 207 } 208 209 lwkt_gettoken(&p->p_token); 210 plimit_lwp_fork(p); /* force exclusive access */ 211 lp = lwp_fork(curthread->td_lwp, p, RFPROC | RFMEM, mask); 212 error = cpu_prepare_lwp(lp, ¶ms); 213 if (error) 214 goto fail; 215 if (params.lwp_tid1 != NULL && 216 (error = copyout(&lp->lwp_tid, params.lwp_tid1, sizeof(lp->lwp_tid)))) 217 goto fail; 218 if (params.lwp_tid2 != NULL && 219 (error = copyout(&lp->lwp_tid, params.lwp_tid2, sizeof(lp->lwp_tid)))) 220 goto fail; 221 222 /* 223 * Now schedule the new lwp. 224 */ 225 p->p_usched->resetpriority(lp); 226 crit_enter(); 227 lp->lwp_stat = LSRUN; 228 p->p_usched->setrunqueue(lp); 229 crit_exit(); 230 lwkt_reltoken(&p->p_token); 231 232 return (0); 233 234 fail: 235 /* 236 * Make sure no one is using this lwp, before it is removed from 237 * the tree. If we didn't wait it here, lwp tree iteration with 238 * blocking operation would be broken. 239 */ 240 while (lp->lwp_lock > 0) 241 tsleep(lp, 0, "lwpfail", 1); 242 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp); 243 --p->p_nthreads; 244 /* lwp_dispose expects an exited lwp, and a held proc */ 245 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT); 246 lp->lwp_thread->td_flags |= TDF_EXITING; 247 lwkt_remove_tdallq(lp->lwp_thread); 248 PHOLD(p); 249 biosched_done(lp->lwp_thread); 250 dsched_exit_thread(lp->lwp_thread); 251 lwp_dispose(lp); 252 lwkt_reltoken(&p->p_token); 253 fail2: 254 return (error); 255 } 256 257 /* 258 * Low level thread create used by pthreads. 259 */ 260 int 261 sys_lwp_create(struct lwp_create_args *uap) 262 { 263 264 return (lwp_create1(uap->params, NULL)); 265 } 266 267 int 268 sys_lwp_create2(struct lwp_create2_args *uap) 269 { 270 271 return (lwp_create1(uap->params, uap->mask)); 272 } 273 274 int nprocs = 1; /* process 0 */ 275 276 int 277 fork1(struct lwp *lp1, int flags, struct proc **procp) 278 { 279 struct proc *p1 = lp1->lwp_proc; 280 struct proc *p2; 281 struct proc *pptr; 282 struct pgrp *p1grp; 283 struct pgrp *plkgrp; 284 struct sysreaper *reap; 285 uid_t uid; 286 int ok, error; 287 static int curfail = 0; 288 static struct timeval lastfail; 289 struct forklist *ep; 290 struct filedesc_to_leader *fdtol; 291 292 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG)) 293 return (EINVAL); 294 295 lwkt_gettoken(&p1->p_token); 296 plkgrp = NULL; 297 p2 = NULL; 298 299 /* 300 * Here we don't create a new process, but we divorce 301 * certain parts of a process from itself. 302 */ 303 if ((flags & RFPROC) == 0) { 304 /* 305 * This kind of stunt does not work anymore if 306 * there are native threads (lwps) running 307 */ 308 if (p1->p_nthreads != 1) { 309 error = EINVAL; 310 goto done; 311 } 312 313 vm_fork(p1, 0, flags); 314 315 /* 316 * Close all file descriptors. 317 */ 318 if (flags & RFCFDG) { 319 struct filedesc *fdtmp; 320 fdtmp = fdinit(p1); 321 fdfree(p1, fdtmp); 322 } 323 324 /* 325 * Unshare file descriptors (from parent.) 326 */ 327 if (flags & RFFDG) { 328 if (p1->p_fd->fd_refcnt > 1) { 329 struct filedesc *newfd; 330 error = fdcopy(p1, &newfd); 331 if (error != 0) { 332 error = ENOMEM; 333 goto done; 334 } 335 fdfree(p1, newfd); 336 } 337 } 338 *procp = NULL; 339 error = 0; 340 goto done; 341 } 342 343 /* 344 * Interlock against process group signal delivery. If signals 345 * are pending after the interlock is obtained we have to restart 346 * the system call to process the signals. If we don't the child 347 * can miss a pgsignal (such as ^C) sent during the fork. 348 * 349 * We can't use CURSIG() here because it will process any STOPs 350 * and cause the process group lock to be held indefinitely. If 351 * a STOP occurs, the fork will be restarted after the CONT. 352 */ 353 p1grp = p1->p_pgrp; 354 if ((flags & RFPGLOCK) && (plkgrp = p1->p_pgrp) != NULL) { 355 pgref(plkgrp); 356 lockmgr(&plkgrp->pg_lock, LK_SHARED); 357 if (CURSIG_NOBLOCK(lp1)) { 358 error = ERESTART; 359 goto done; 360 } 361 } 362 363 /* 364 * Although process entries are dynamically created, we still keep 365 * a global limit on the maximum number we will create. Don't allow 366 * a nonprivileged user to use the last ten processes; don't let root 367 * exceed the limit. The variable nprocs is the current number of 368 * processes, maxproc is the limit. 369 */ 370 uid = lp1->lwp_thread->td_ucred->cr_ruid; 371 if ((nprocs >= maxproc - 10 && uid != 0) || nprocs >= maxproc) { 372 if (ppsratecheck(&lastfail, &curfail, 1)) 373 kprintf("maxproc limit exceeded by uid %d, please " 374 "see tuning(7) and login.conf(5).\n", uid); 375 tsleep(&forksleep, 0, "fork", hz / 2); 376 error = EAGAIN; 377 goto done; 378 } 379 380 /* 381 * Increment the nprocs resource before blocking can occur. There 382 * are hard-limits as to the number of processes that can run. 383 */ 384 atomic_add_int(&nprocs, 1); 385 386 /* 387 * Increment the count of procs running with this uid. This also 388 * applies to root. 389 */ 390 ok = chgproccnt(lp1->lwp_thread->td_ucred->cr_ruidinfo, 1, 391 plimit_getadjvalue(RLIMIT_NPROC)); 392 if (!ok) { 393 /* 394 * Back out the process count 395 */ 396 atomic_add_int(&nprocs, -1); 397 if (ppsratecheck(&lastfail, &curfail, 1)) { 398 kprintf("maxproc limit of %jd " 399 "exceeded by \"%s\" uid %d, " 400 "please see tuning(7) and login.conf(5).\n", 401 plimit_getadjvalue(RLIMIT_NPROC), 402 p1->p_comm, 403 uid); 404 } 405 tsleep(&forksleep, 0, "fork", hz / 2); 406 error = EAGAIN; 407 goto done; 408 } 409 410 /* 411 * Allocate a new process, don't get fancy: zero the structure. 412 */ 413 p2 = kmalloc(sizeof(struct proc), M_PROC, M_WAITOK|M_ZERO); 414 415 /* 416 * Core initialization. SIDL is a safety state that protects the 417 * partially initialized process once it starts getting hooked 418 * into system structures and becomes addressable. 419 * 420 * We must be sure to acquire p2->p_token as well, we must hold it 421 * once the process is on the allproc list to avoid things such 422 * as competing modifications to p_flags. 423 */ 424 mycpu->gd_forkid += ncpus; 425 p2->p_forkid = mycpu->gd_forkid + mycpu->gd_cpuid; 426 p2->p_lasttid = 0; /* first tid will be 1 */ 427 p2->p_stat = SIDL; 428 429 /* 430 * NOTE: Process 0 will not have a reaper, but process 1 (init) and 431 * all other processes always will. 432 */ 433 if ((reap = p1->p_reaper) != NULL) { 434 reaper_hold(reap); 435 p2->p_reaper = reap; 436 } else { 437 p2->p_reaper = NULL; 438 } 439 440 RB_INIT(&p2->p_lwp_tree); 441 spin_init(&p2->p_spin, "procfork1"); 442 lwkt_token_init(&p2->p_token, "proc"); 443 lwkt_gettoken(&p2->p_token); 444 445 /* 446 * Setup linkage for kernel based threading XXX lwp. Also add the 447 * process to the allproclist. 448 * 449 * The process structure is addressable after this point. 450 */ 451 if (flags & RFTHREAD) { 452 p2->p_peers = p1->p_peers; 453 p1->p_peers = p2; 454 p2->p_leader = p1->p_leader; 455 } else { 456 p2->p_leader = p2; 457 } 458 proc_add_allproc(p2); 459 460 /* 461 * Initialize the section which is copied verbatim from the parent. 462 */ 463 bcopy(&p1->p_startcopy, &p2->p_startcopy, 464 ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy)); 465 466 /* 467 * Duplicate sub-structures as needed. Increase reference counts 468 * on shared objects. 469 * 470 * NOTE: because we are now on the allproc list it is possible for 471 * other consumers to gain temporary references to p2 472 * (p2->p_lock can change). 473 */ 474 if (p1->p_flags & P_PROFIL) 475 startprofclock(p2); 476 p2->p_ucred = crhold(lp1->lwp_thread->td_ucred); 477 478 if (jailed(p2->p_ucred)) 479 p2->p_flags |= P_JAILED; 480 481 if (p2->p_args) 482 refcount_acquire(&p2->p_args->ar_ref); 483 484 p2->p_usched = p1->p_usched; 485 /* XXX: verify copy of the secondary iosched stuff */ 486 dsched_enter_proc(p2); 487 488 if (flags & RFSIGSHARE) { 489 p2->p_sigacts = p1->p_sigacts; 490 refcount_acquire(&p2->p_sigacts->ps_refcnt); 491 } else { 492 p2->p_sigacts = kmalloc(sizeof(*p2->p_sigacts), 493 M_SUBPROC, M_WAITOK); 494 bcopy(p1->p_sigacts, p2->p_sigacts, sizeof(*p2->p_sigacts)); 495 refcount_init(&p2->p_sigacts->ps_refcnt, 1); 496 } 497 if (flags & RFLINUXTHPN) 498 p2->p_sigparent = SIGUSR1; 499 else 500 p2->p_sigparent = SIGCHLD; 501 502 /* bump references to the text vnode (for procfs) */ 503 p2->p_textvp = p1->p_textvp; 504 if (p2->p_textvp) 505 vref(p2->p_textvp); 506 507 /* copy namecache handle to the text file */ 508 if (p1->p_textnch.mount) 509 cache_copy(&p1->p_textnch, &p2->p_textnch); 510 511 /* 512 * Handle file descriptors 513 */ 514 if (flags & RFCFDG) { 515 p2->p_fd = fdinit(p1); 516 fdtol = NULL; 517 } else if (flags & RFFDG) { 518 error = fdcopy(p1, &p2->p_fd); 519 if (error != 0) { 520 error = ENOMEM; 521 goto done; 522 } 523 fdtol = NULL; 524 } else { 525 p2->p_fd = fdshare(p1); 526 if (p1->p_fdtol == NULL) { 527 p1->p_fdtol = filedesc_to_leader_alloc(NULL, 528 p1->p_leader); 529 } 530 if ((flags & RFTHREAD) != 0) { 531 /* 532 * Shared file descriptor table and 533 * shared process leaders. 534 */ 535 fdtol = p1->p_fdtol; 536 fdtol->fdl_refcount++; 537 } else { 538 /* 539 * Shared file descriptor table, and 540 * different process leaders 541 */ 542 fdtol = filedesc_to_leader_alloc(p1->p_fdtol, p2); 543 } 544 } 545 p2->p_fdtol = fdtol; 546 p2->p_limit = plimit_fork(p1); 547 548 /* 549 * Adjust depth for resource downscaling 550 */ 551 if ((p2->p_depth & 31) != 31) 552 ++p2->p_depth; 553 554 /* 555 * Preserve some more flags in subprocess. P_PROFIL has already 556 * been preserved. 557 */ 558 p2->p_flags |= p1->p_flags & P_SUGID; 559 if (p1->p_session->s_ttyvp != NULL && (p1->p_flags & P_CONTROLT)) 560 p2->p_flags |= P_CONTROLT; 561 if (flags & RFPPWAIT) { 562 p2->p_flags |= P_PPWAIT; 563 if (p1->p_upmap) 564 atomic_add_int(&p1->p_upmap->invfork, 1); 565 } 566 567 /* 568 * Inherit the virtual kernel structure (allows a virtual kernel 569 * to fork to simulate multiple cpus). 570 */ 571 if (p1->p_vkernel) 572 vkernel_inherit(p1, p2); 573 574 /* 575 * Once we are on a pglist we may receive signals. XXX we might 576 * race a ^C being sent to the process group by not receiving it 577 * at all prior to this line. 578 */ 579 pgref(p1grp); 580 lwkt_gettoken(&p1grp->pg_token); 581 LIST_INSERT_AFTER(p1, p2, p_pglist); 582 lwkt_reltoken(&p1grp->pg_token); 583 584 /* 585 * Attach the new process to its parent. 586 * 587 * If RFNOWAIT is set, the newly created process becomes a child 588 * of the reaper (typically init). This effectively disassociates 589 * the child from the parent. 590 * 591 * Temporarily hold pptr for the RFNOWAIT case to avoid ripouts. 592 */ 593 if (flags & RFNOWAIT) { 594 pptr = reaper_get(reap); 595 if (pptr == NULL) { 596 pptr = initproc; 597 PHOLD(pptr); 598 } 599 } else { 600 pptr = p1; 601 } 602 p2->p_pptr = pptr; 603 LIST_INIT(&p2->p_children); 604 605 lwkt_gettoken(&pptr->p_token); 606 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling); 607 lwkt_reltoken(&pptr->p_token); 608 609 if (flags & RFNOWAIT) 610 PRELE(pptr); 611 612 varsymset_init(&p2->p_varsymset, &p1->p_varsymset); 613 callout_init_mp(&p2->p_ithandle); 614 615 #ifdef KTRACE 616 /* 617 * Copy traceflag and tracefile if enabled. If not inherited, 618 * these were zeroed above but we still could have a trace race 619 * so make sure p2's p_tracenode is NULL. 620 */ 621 if ((p1->p_traceflag & KTRFAC_INHERIT) && p2->p_tracenode == NULL) { 622 p2->p_traceflag = p1->p_traceflag; 623 p2->p_tracenode = ktrinherit(p1->p_tracenode); 624 } 625 #endif 626 627 /* 628 * This begins the section where we must prevent the parent 629 * from being swapped. 630 * 631 * Gets PRELE'd in the caller in start_forked_proc(). 632 */ 633 PHOLD(p1); 634 635 vm_fork(p1, p2, flags); 636 637 /* 638 * Create the first lwp associated with the new proc. 639 * It will return via a different execution path later, directly 640 * into userland, after it was put on the runq by 641 * start_forked_proc(). 642 */ 643 lwp_fork(lp1, p2, flags, NULL); 644 645 if (flags == (RFFDG | RFPROC | RFPGLOCK)) { 646 mycpu->gd_cnt.v_forks++; 647 mycpu->gd_cnt.v_forkpages += p2->p_vmspace->vm_dsize + 648 p2->p_vmspace->vm_ssize; 649 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM | RFPGLOCK)) { 650 mycpu->gd_cnt.v_vforks++; 651 mycpu->gd_cnt.v_vforkpages += p2->p_vmspace->vm_dsize + 652 p2->p_vmspace->vm_ssize; 653 } else if (p1 == &proc0) { 654 mycpu->gd_cnt.v_kthreads++; 655 mycpu->gd_cnt.v_kthreadpages += p2->p_vmspace->vm_dsize + 656 p2->p_vmspace->vm_ssize; 657 } else { 658 mycpu->gd_cnt.v_rforks++; 659 mycpu->gd_cnt.v_rforkpages += p2->p_vmspace->vm_dsize + 660 p2->p_vmspace->vm_ssize; 661 } 662 663 /* 664 * Both processes are set up, now check if any loadable modules want 665 * to adjust anything. 666 * What if they have an error? XXX 667 */ 668 TAILQ_FOREACH(ep, &fork_list, next) { 669 (*ep->function)(p1, p2, flags); 670 } 671 672 /* 673 * Set the start time. Note that the process is not runnable. The 674 * caller is responsible for making it runnable. 675 */ 676 microtime(&p2->p_start); 677 p2->p_acflag = AFORK; 678 679 /* 680 * tell any interested parties about the new process 681 */ 682 KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid); 683 684 /* 685 * Return child proc pointer to parent. 686 */ 687 *procp = p2; 688 error = 0; 689 done: 690 if (p2) 691 lwkt_reltoken(&p2->p_token); 692 lwkt_reltoken(&p1->p_token); 693 if (plkgrp) { 694 lockmgr(&plkgrp->pg_lock, LK_RELEASE); 695 pgrel(plkgrp); 696 } 697 return (error); 698 } 699 700 static struct lwp * 701 lwp_fork(struct lwp *origlp, struct proc *destproc, int flags, 702 const cpumask_t *mask) 703 { 704 globaldata_t gd = mycpu; 705 struct lwp *lp; 706 struct thread *td; 707 708 lp = kmalloc(sizeof(struct lwp), M_LWP, M_WAITOK|M_ZERO); 709 710 lp->lwp_proc = destproc; 711 lp->lwp_vmspace = destproc->p_vmspace; 712 lp->lwp_stat = LSRUN; 713 bcopy(&origlp->lwp_startcopy, &lp->lwp_startcopy, 714 (unsigned) ((caddr_t)&lp->lwp_endcopy - 715 (caddr_t)&lp->lwp_startcopy)); 716 if (mask != NULL) 717 lp->lwp_cpumask = *mask; 718 719 /* 720 * Reset the sigaltstack if memory is shared, otherwise inherit 721 * it. 722 */ 723 if (flags & RFMEM) { 724 lp->lwp_sigstk.ss_flags = SS_DISABLE; 725 lp->lwp_sigstk.ss_size = 0; 726 lp->lwp_sigstk.ss_sp = NULL; 727 lp->lwp_flags &= ~LWP_ALTSTACK; 728 } else { 729 lp->lwp_flags |= origlp->lwp_flags & LWP_ALTSTACK; 730 } 731 732 /* 733 * Set cpbase to the last timeout that occured (not the upcoming 734 * timeout). 735 * 736 * A critical section is required since a timer IPI can update 737 * scheduler specific data. 738 */ 739 crit_enter(); 740 lp->lwp_cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic; 741 destproc->p_usched->heuristic_forking(origlp, lp); 742 crit_exit(); 743 CPUMASK_ANDMASK(lp->lwp_cpumask, usched_mastermask); 744 lwkt_token_init(&lp->lwp_token, "lwp_token"); 745 spin_init(&lp->lwp_spin, "lwptoken"); 746 747 /* 748 * Assign the thread to the current cpu to begin with so we 749 * can manipulate it. 750 */ 751 td = lwkt_alloc_thread(NULL, LWKT_THREAD_STACK, gd->gd_cpuid, 0); 752 lp->lwp_thread = td; 753 td->td_ucred = crhold(destproc->p_ucred); 754 td->td_proc = destproc; 755 td->td_lwp = lp; 756 td->td_switch = cpu_heavy_switch; 757 #ifdef NO_LWKT_SPLIT_USERPRI 758 lwkt_setpri(td, TDPRI_USER_NORM); 759 #else 760 lwkt_setpri(td, TDPRI_KERN_USER); 761 #endif 762 lwkt_set_comm(td, "%s", destproc->p_comm); 763 764 /* 765 * cpu_fork will copy and update the pcb, set up the kernel stack, 766 * and make the child ready to run. 767 */ 768 cpu_fork(origlp, lp, flags); 769 kqueue_init(&lp->lwp_kqueue, destproc->p_fd); 770 771 /* 772 * Assign a TID to the lp. Loop until the insert succeeds (returns 773 * NULL). 774 * 775 * If we are in a vfork assign the same TID as the lwp that did the 776 * vfork(). This way if the user program messes around with 777 * pthread calls inside the vfork(), it will operate like an 778 * extension of the (blocked) parent. Also note that since the 779 * address space is being shared, insofar as pthreads is concerned, 780 * the code running in the vfork() is part of the original process. 781 */ 782 if (flags & RFPPWAIT) { 783 lp->lwp_tid = origlp->lwp_tid - 1; 784 } else { 785 lp->lwp_tid = destproc->p_lasttid; 786 } 787 788 do { 789 if (++lp->lwp_tid <= 0) 790 lp->lwp_tid = 1; 791 } while (lwp_rb_tree_RB_INSERT(&destproc->p_lwp_tree, lp) != NULL); 792 793 destproc->p_lasttid = lp->lwp_tid; 794 destproc->p_nthreads++; 795 796 /* 797 * This flag is set and never cleared. It means that the process 798 * was threaded at some point. Used to improve exit performance. 799 */ 800 destproc->p_flags |= P_MAYBETHREADED; 801 802 return (lp); 803 } 804 805 /* 806 * The next two functionms are general routines to handle adding/deleting 807 * items on the fork callout list. 808 * 809 * at_fork(): 810 * Take the arguments given and put them onto the fork callout list, 811 * However first make sure that it's not already there. 812 * Returns 0 on success or a standard error number. 813 */ 814 int 815 at_fork(forklist_fn function) 816 { 817 struct forklist *ep; 818 819 #ifdef INVARIANTS 820 /* let the programmer know if he's been stupid */ 821 if (rm_at_fork(function)) { 822 kprintf("WARNING: fork callout entry (%p) already present\n", 823 function); 824 } 825 #endif 826 ep = kmalloc(sizeof(*ep), M_ATFORK, M_WAITOK|M_ZERO); 827 ep->function = function; 828 TAILQ_INSERT_TAIL(&fork_list, ep, next); 829 return (0); 830 } 831 832 /* 833 * Scan the exit callout list for the given item and remove it.. 834 * Returns the number of items removed (0 or 1) 835 */ 836 int 837 rm_at_fork(forklist_fn function) 838 { 839 struct forklist *ep; 840 841 TAILQ_FOREACH(ep, &fork_list, next) { 842 if (ep->function == function) { 843 TAILQ_REMOVE(&fork_list, ep, next); 844 kfree(ep, M_ATFORK); 845 return(1); 846 } 847 } 848 return (0); 849 } 850 851 /* 852 * Add a forked process to the run queue after any remaining setup, such 853 * as setting the fork handler, has been completed. 854 * 855 * p2 is held by the caller. 856 */ 857 void 858 start_forked_proc(struct lwp *lp1, struct proc *p2) 859 { 860 struct lwp *lp2 = ONLY_LWP_IN_PROC(p2); 861 int pflags; 862 863 /* 864 * Move from SIDL to RUN queue, and activate the process's thread. 865 * Activation of the thread effectively makes the process "a" 866 * current process, so we do not setrunqueue(). 867 * 868 * YYY setrunqueue works here but we should clean up the trampoline 869 * code so we just schedule the LWKT thread and let the trampoline 870 * deal with the userland scheduler on return to userland. 871 */ 872 KASSERT(p2->p_stat == SIDL, 873 ("cannot start forked process, bad status: %p", p2)); 874 p2->p_usched->resetpriority(lp2); 875 crit_enter(); 876 p2->p_stat = SACTIVE; 877 lp2->lwp_stat = LSRUN; 878 p2->p_usched->setrunqueue(lp2); 879 crit_exit(); 880 881 /* 882 * Now can be swapped. 883 */ 884 PRELE(lp1->lwp_proc); 885 886 /* 887 * Preserve synchronization semantics of vfork. P_PPWAIT is set in 888 * the child until it has retired the parent's resources. The parent 889 * must wait for the flag to be cleared by the child. 890 * 891 * Interlock the flag/tsleep with atomic ops to avoid unnecessary 892 * p_token conflicts. 893 * 894 * XXX Is this use of an atomic op on a field that is not normally 895 * manipulated with atomic ops ok? 896 */ 897 while ((pflags = p2->p_flags) & P_PPWAIT) { 898 cpu_ccfence(); 899 tsleep_interlock(lp1->lwp_proc, 0); 900 if (atomic_cmpset_int(&p2->p_flags, pflags, pflags)) 901 tsleep(lp1->lwp_proc, PINTERLOCKED, "ppwait", 0); 902 } 903 } 904 905 /* 906 * procctl (idtype_t idtype, id_t id, int cmd, void *arg) 907 */ 908 int 909 sys_procctl(struct procctl_args *uap) 910 { 911 struct proc *p = curproc; 912 struct proc *p2; 913 struct sysreaper *reap; 914 union reaper_info udata; 915 int error; 916 917 if (uap->idtype != P_PID || uap->id != (id_t)p->p_pid) 918 return EINVAL; 919 920 switch(uap->cmd) { 921 case PROC_REAP_ACQUIRE: 922 lwkt_gettoken(&p->p_token); 923 reap = kmalloc(sizeof(*reap), M_REAPER, M_WAITOK|M_ZERO); 924 if (p->p_reaper == NULL || p->p_reaper->p != p) { 925 reaper_init(p, reap); 926 error = 0; 927 } else { 928 kfree(reap, M_REAPER); 929 error = EALREADY; 930 } 931 lwkt_reltoken(&p->p_token); 932 break; 933 case PROC_REAP_RELEASE: 934 lwkt_gettoken(&p->p_token); 935 release_again: 936 reap = p->p_reaper; 937 KKASSERT(reap != NULL); 938 if (reap->p == p) { 939 reaper_hold(reap); /* in case of thread race */ 940 lockmgr(&reap->lock, LK_EXCLUSIVE); 941 if (reap->p != p) { 942 lockmgr(&reap->lock, LK_RELEASE); 943 reaper_drop(reap); 944 goto release_again; 945 } 946 reap->p = NULL; 947 p->p_reaper = reap->parent; 948 if (p->p_reaper) 949 reaper_hold(p->p_reaper); 950 lockmgr(&reap->lock, LK_RELEASE); 951 reaper_drop(reap); /* our ref */ 952 reaper_drop(reap); /* old p_reaper ref */ 953 error = 0; 954 } else { 955 error = ENOTCONN; 956 } 957 lwkt_reltoken(&p->p_token); 958 break; 959 case PROC_REAP_STATUS: 960 bzero(&udata, sizeof(udata)); 961 lwkt_gettoken_shared(&p->p_token); 962 if ((reap = p->p_reaper) != NULL && reap->p == p) { 963 udata.status.flags = reap->flags; 964 udata.status.refs = reap->refs - 1; /* minus ours */ 965 } 966 p2 = LIST_FIRST(&p->p_children); 967 udata.status.pid_head = p2 ? p2->p_pid : -1; 968 lwkt_reltoken(&p->p_token); 969 970 if (uap->data) { 971 error = copyout(&udata, uap->data, 972 sizeof(udata.status)); 973 } else { 974 error = 0; 975 } 976 break; 977 default: 978 error = EINVAL; 979 break; 980 } 981 return error; 982 } 983 984 /* 985 * Bump ref on reaper, preventing destruction 986 */ 987 void 988 reaper_hold(struct sysreaper *reap) 989 { 990 KKASSERT(reap->refs > 0); 991 refcount_acquire(&reap->refs); 992 } 993 994 /* 995 * Drop ref on reaper, destroy the structure on the 1->0 996 * transition and loop on the parent. 997 */ 998 void 999 reaper_drop(struct sysreaper *next) 1000 { 1001 struct sysreaper *reap; 1002 1003 while ((reap = next) != NULL) { 1004 if (refcount_release(&reap->refs)) { 1005 next = reap->parent; 1006 KKASSERT(reap->p == NULL); 1007 reap->parent = NULL; 1008 kfree(reap, M_REAPER); 1009 } else { 1010 next = NULL; 1011 } 1012 } 1013 } 1014 1015 /* 1016 * Initialize a static or newly allocated reaper structure 1017 */ 1018 void 1019 reaper_init(struct proc *p, struct sysreaper *reap) 1020 { 1021 reap->parent = p->p_reaper; 1022 reap->p = p; 1023 if (p == initproc) { 1024 reap->flags = REAPER_STAT_OWNED | REAPER_STAT_REALINIT; 1025 reap->refs = 2; 1026 } else { 1027 reap->flags = REAPER_STAT_OWNED; 1028 reap->refs = 1; 1029 } 1030 lockinit(&reap->lock, "subrp", 0, 0); 1031 cpu_sfence(); 1032 p->p_reaper = reap; 1033 } 1034 1035 /* 1036 * Called with p->p_token held during exit. 1037 * 1038 * This is a bit simpler than RELEASE because there are no threads remaining 1039 * to race. We only release if we own the reaper, the exit code will handle 1040 * the final p_reaper release. 1041 */ 1042 struct sysreaper * 1043 reaper_exit(struct proc *p) 1044 { 1045 struct sysreaper *reap; 1046 1047 /* 1048 * Release acquired reaper 1049 */ 1050 if ((reap = p->p_reaper) != NULL && reap->p == p) { 1051 lockmgr(&reap->lock, LK_EXCLUSIVE); 1052 p->p_reaper = reap->parent; 1053 if (p->p_reaper) 1054 reaper_hold(p->p_reaper); 1055 reap->p = NULL; 1056 lockmgr(&reap->lock, LK_RELEASE); 1057 reaper_drop(reap); 1058 } 1059 1060 /* 1061 * Return and clear reaper (caller is holding p_token for us) 1062 * (reap->p does not equal p). Caller must drop it. 1063 */ 1064 if ((reap = p->p_reaper) != NULL) { 1065 p->p_reaper = NULL; 1066 } 1067 return reap; 1068 } 1069 1070 /* 1071 * Return a held (PHOLD) process representing the reaper for process (p). 1072 * NULL should not normally be returned. Caller should PRELE() the returned 1073 * reaper process when finished. 1074 * 1075 * Remove dead internal nodes while we are at it. 1076 * 1077 * Process (p)'s token must be held on call. 1078 * The returned process's token is NOT acquired by this routine. 1079 */ 1080 struct proc * 1081 reaper_get(struct sysreaper *reap) 1082 { 1083 struct sysreaper *next; 1084 struct proc *reproc; 1085 1086 if (reap == NULL) 1087 return NULL; 1088 1089 /* 1090 * Extra hold for loop 1091 */ 1092 reaper_hold(reap); 1093 1094 while (reap) { 1095 lockmgr(&reap->lock, LK_SHARED); 1096 if (reap->p) { 1097 /* 1098 * Probable reaper 1099 */ 1100 if (reap->p) { 1101 reproc = reap->p; 1102 PHOLD(reproc); 1103 lockmgr(&reap->lock, LK_RELEASE); 1104 reaper_drop(reap); 1105 return reproc; 1106 } 1107 1108 /* 1109 * Raced, try again 1110 */ 1111 lockmgr(&reap->lock, LK_RELEASE); 1112 continue; 1113 } 1114 1115 /* 1116 * Traverse upwards in the reaper topology, destroy 1117 * dead internal nodes when possible. 1118 * 1119 * NOTE: Our ref on next means that a dead node should 1120 * have 2 (ours and reap->parent's). 1121 */ 1122 next = reap->parent; 1123 while (next) { 1124 reaper_hold(next); 1125 if (next->refs == 2 && next->p == NULL) { 1126 lockmgr(&reap->lock, LK_RELEASE); 1127 lockmgr(&reap->lock, LK_EXCLUSIVE); 1128 if (next->refs == 2 && 1129 reap->parent == next && 1130 next->p == NULL) { 1131 /* 1132 * reap->parent inherits ref from next. 1133 */ 1134 reap->parent = next->parent; 1135 next->parent = NULL; 1136 reaper_drop(next); /* ours */ 1137 reaper_drop(next); /* old parent */ 1138 next = reap->parent; 1139 continue; /* possible chain */ 1140 } 1141 } 1142 break; 1143 } 1144 lockmgr(&reap->lock, LK_RELEASE); 1145 reaper_drop(reap); 1146 reap = next; 1147 } 1148 return NULL; 1149 } 1150