1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94 39 * $FreeBSD: src/sys/kern/kern_exit.c,v 1.92.2.11 2003/01/13 22:51:16 dillon Exp $ 40 * $DragonFly: src/sys/kern/kern_exit.c,v 1.91 2008/05/18 20:02:02 nth Exp $ 41 */ 42 43 #include "opt_compat.h" 44 #include "opt_ktrace.h" 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/sysproto.h> 49 #include <sys/kernel.h> 50 #include <sys/malloc.h> 51 #include <sys/proc.h> 52 #include <sys/ktrace.h> 53 #include <sys/pioctl.h> 54 #include <sys/tty.h> 55 #include <sys/wait.h> 56 #include <sys/vnode.h> 57 #include <sys/resourcevar.h> 58 #include <sys/signalvar.h> 59 #include <sys/taskqueue.h> 60 #include <sys/ptrace.h> 61 #include <sys/acct.h> /* for acct_process() function prototype */ 62 #include <sys/filedesc.h> 63 #include <sys/shm.h> 64 #include <sys/sem.h> 65 #include <sys/aio.h> 66 #include <sys/jail.h> 67 #include <sys/kern_syscall.h> 68 #include <sys/upcall.h> 69 #include <sys/caps.h> 70 #include <sys/unistd.h> 71 72 #include <vm/vm.h> 73 #include <vm/vm_param.h> 74 #include <sys/lock.h> 75 #include <vm/pmap.h> 76 #include <vm/vm_map.h> 77 #include <vm/vm_extern.h> 78 #include <sys/user.h> 79 80 #include <sys/thread2.h> 81 #include <sys/sysref2.h> 82 83 static void reaplwps(void *context, int dummy); 84 static void reaplwp(struct lwp *lp); 85 static void killlwps(struct lwp *lp); 86 87 static MALLOC_DEFINE(M_ATEXIT, "atexit", "atexit callback"); 88 static MALLOC_DEFINE(M_ZOMBIE, "zombie", "zombie proc status"); 89 90 /* 91 * callout list for things to do at exit time 92 */ 93 struct exitlist { 94 exitlist_fn function; 95 TAILQ_ENTRY(exitlist) next; 96 }; 97 98 TAILQ_HEAD(exit_list_head, exitlist); 99 static struct exit_list_head exit_list = TAILQ_HEAD_INITIALIZER(exit_list); 100 101 /* 102 * LWP reaper data 103 */ 104 struct task *deadlwp_task[MAXCPU]; 105 struct lwplist deadlwp_list[MAXCPU]; 106 107 /* 108 * exit -- 109 * Death of process. 110 * 111 * SYS_EXIT_ARGS(int rval) 112 */ 113 int 114 sys_exit(struct exit_args *uap) 115 { 116 exit1(W_EXITCODE(uap->rval, 0)); 117 /* NOTREACHED */ 118 } 119 120 /* 121 * Extended exit -- 122 * Death of a lwp or process with optional bells and whistles. 123 */ 124 int 125 sys_extexit(struct extexit_args *uap) 126 { 127 int action, who; 128 int error; 129 130 action = EXTEXIT_ACTION(uap->how); 131 who = EXTEXIT_WHO(uap->how); 132 133 /* Check parameters before we might perform some action */ 134 switch (who) { 135 case EXTEXIT_PROC: 136 case EXTEXIT_LWP: 137 break; 138 139 default: 140 return (EINVAL); 141 } 142 143 switch (action) { 144 case EXTEXIT_SIMPLE: 145 break; 146 147 case EXTEXIT_SETINT: 148 error = copyout(&uap->status, uap->addr, sizeof(uap->status)); 149 if (error) 150 return (error); 151 break; 152 153 default: 154 return (EINVAL); 155 } 156 157 switch (who) { 158 case EXTEXIT_LWP: 159 /* 160 * Be sure only to perform a simple lwp exit if there is at 161 * least one more lwp in the proc, which will call exit1() 162 * later, otherwise the proc will be an UNDEAD and not even a 163 * SZOMB! 164 */ 165 if (curproc->p_nthreads > 1) { 166 lwp_exit(0); 167 /* NOT REACHED */ 168 } 169 /* else last lwp in proc: do the real thing */ 170 /* FALLTHROUGH */ 171 172 default: /* to help gcc */ 173 case EXTEXIT_PROC: 174 exit1(W_EXITCODE(uap->status, 0)); 175 /* NOTREACHED */ 176 } 177 178 /* NOTREACHED */ 179 } 180 181 /* 182 * Kill all lwps associated with the current process except the 183 * current lwp. Return an error if we race another thread trying to 184 * do the same thing and lose the race. 185 * 186 * If forexec is non-zero the current thread and process flags are 187 * cleaned up so they can be reused. 188 */ 189 int 190 killalllwps(int forexec) 191 { 192 struct lwp *lp = curthread->td_lwp; 193 struct proc *p = lp->lwp_proc; 194 195 /* 196 * Interlock against P_WEXIT. Only one of the process's thread 197 * is allowed to do the master exit. 198 */ 199 if (p->p_flag & P_WEXIT) 200 return (EALREADY); 201 p->p_flag |= P_WEXIT; 202 203 /* 204 * Interlock with LWP_WEXIT and kill any remaining LWPs 205 */ 206 lp->lwp_flag |= LWP_WEXIT; 207 if (p->p_nthreads > 1) 208 killlwps(lp); 209 210 /* 211 * If doing this for an exec, clean up the remaining thread 212 * (us) for continuing operation after all the other threads 213 * have been killed. 214 */ 215 if (forexec) { 216 lp->lwp_flag &= ~LWP_WEXIT; 217 p->p_flag &= ~P_WEXIT; 218 } 219 return(0); 220 } 221 222 /* 223 * Kill all LWPs except the current one. Do not try to signal 224 * LWPs which have exited on their own or have already been 225 * signaled. 226 */ 227 static void 228 killlwps(struct lwp *lp) 229 { 230 struct proc *p = lp->lwp_proc; 231 struct lwp *tlp; 232 233 /* 234 * Kill the remaining LWPs. We must send the signal before setting 235 * LWP_WEXIT. The setting of WEXIT is optional but helps reduce 236 * races. tlp must be held across the call as it might block and 237 * allow the target lwp to rip itself out from under our loop. 238 */ 239 FOREACH_LWP_IN_PROC(tlp, p) { 240 LWPHOLD(tlp); 241 if ((tlp->lwp_flag & LWP_WEXIT) == 0) { 242 lwpsignal(p, tlp, SIGKILL); 243 tlp->lwp_flag |= LWP_WEXIT; 244 } 245 LWPRELE(tlp); 246 } 247 248 /* 249 * Wait for everything to clear out. 250 */ 251 while (p->p_nthreads > 1) { 252 tsleep(&p->p_nthreads, 0, "killlwps", 0); 253 } 254 } 255 256 /* 257 * Exit: deallocate address space and other resources, change proc state 258 * to zombie, and unlink proc from allproc and parent's lists. Save exit 259 * status and rusage for wait(). Check for child processes and orphan them. 260 */ 261 void 262 exit1(int rv) 263 { 264 struct thread *td = curthread; 265 struct proc *p = td->td_proc; 266 struct lwp *lp = td->td_lwp; 267 struct proc *q, *nq; 268 struct vmspace *vm; 269 struct vnode *vtmp; 270 struct exitlist *ep; 271 int error; 272 273 if (p->p_pid == 1) { 274 kprintf("init died (signal %d, exit %d)\n", 275 WTERMSIG(rv), WEXITSTATUS(rv)); 276 panic("Going nowhere without my init!"); 277 } 278 279 varsymset_clean(&p->p_varsymset); 280 lockuninit(&p->p_varsymset.vx_lock); 281 /* 282 * Kill all lwps associated with the current process, return an 283 * error if we race another thread trying to do the same thing 284 * and lose the race. 285 */ 286 error = killalllwps(0); 287 if (error) { 288 lwp_exit(0); 289 /* NOT REACHED */ 290 } 291 292 caps_exit(lp->lwp_thread); 293 aio_proc_rundown(p); 294 295 /* are we a task leader? */ 296 if (p == p->p_leader) { 297 struct kill_args killArgs; 298 killArgs.signum = SIGKILL; 299 q = p->p_peers; 300 while(q) { 301 killArgs.pid = q->p_pid; 302 /* 303 * The interface for kill is better 304 * than the internal signal 305 */ 306 sys_kill(&killArgs); 307 nq = q; 308 q = q->p_peers; 309 } 310 while (p->p_peers) 311 tsleep((caddr_t)p, 0, "exit1", 0); 312 } 313 314 #ifdef PGINPROF 315 vmsizmon(); 316 #endif 317 STOPEVENT(p, S_EXIT, rv); 318 wakeup(&p->p_stype); /* Wakeup anyone in procfs' PIOCWAIT */ 319 320 /* 321 * Check if any loadable modules need anything done at process exit. 322 * e.g. SYSV IPC stuff 323 * XXX what if one of these generates an error? 324 */ 325 TAILQ_FOREACH(ep, &exit_list, next) 326 (*ep->function)(td); 327 328 if (p->p_flag & P_PROFIL) 329 stopprofclock(p); 330 /* 331 * If parent is waiting for us to exit or exec, 332 * P_PPWAIT is set; we will wakeup the parent below. 333 */ 334 p->p_flag &= ~(P_TRACED | P_PPWAIT); 335 SIGEMPTYSET(p->p_siglist); 336 SIGEMPTYSET(lp->lwp_siglist); 337 if (timevalisset(&p->p_realtimer.it_value)) 338 callout_stop(&p->p_ithandle); 339 340 /* 341 * Reset any sigio structures pointing to us as a result of 342 * F_SETOWN with our pid. 343 */ 344 funsetownlst(&p->p_sigiolst); 345 346 /* 347 * Close open files and release open-file table. 348 * This may block! 349 */ 350 fdfree(p, NULL); 351 352 if(p->p_leader->p_peers) { 353 q = p->p_leader; 354 while(q->p_peers != p) 355 q = q->p_peers; 356 q->p_peers = p->p_peers; 357 wakeup((caddr_t)p->p_leader); 358 } 359 360 /* 361 * XXX Shutdown SYSV semaphores 362 */ 363 semexit(p); 364 365 KKASSERT(p->p_numposixlocks == 0); 366 367 /* The next two chunks should probably be moved to vmspace_exit. */ 368 vm = p->p_vmspace; 369 370 /* 371 * Release upcalls associated with this process 372 */ 373 if (vm->vm_upcalls) 374 upc_release(vm, lp); 375 376 /* 377 * Clean up data related to virtual kernel operation. Clean up 378 * any vkernel context related to the current lwp now so we can 379 * destroy p_vkernel. 380 */ 381 if (p->p_vkernel) { 382 vkernel_lwp_exit(lp); 383 vkernel_exit(p); 384 } 385 386 /* 387 * Release user portion of address space. 388 * This releases references to vnodes, 389 * which could cause I/O if the file has been unlinked. 390 * Need to do this early enough that we can still sleep. 391 * Can't free the entire vmspace as the kernel stack 392 * may be mapped within that space also. 393 * 394 * Processes sharing the same vmspace may exit in one order, and 395 * get cleaned up by vmspace_exit() in a different order. The 396 * last exiting process to reach this point releases as much of 397 * the environment as it can, and the last process cleaned up 398 * by vmspace_exit() (which decrements exitingcnt) cleans up the 399 * remainder. 400 */ 401 ++vm->vm_exitingcnt; 402 sysref_put(&vm->vm_sysref); 403 404 if (SESS_LEADER(p)) { 405 struct session *sp = p->p_session; 406 407 if (sp->s_ttyvp) { 408 /* 409 * We are the controlling process. Signal the 410 * foreground process group, drain the controlling 411 * terminal, and revoke access to the controlling 412 * terminal. 413 * 414 * NOTE: while waiting for the process group to exit 415 * it is possible that one of the processes in the 416 * group will revoke the tty, so the ttyclosesession() 417 * function will re-check sp->s_ttyvp. 418 */ 419 if (sp->s_ttyp && (sp->s_ttyp->t_session == sp)) { 420 if (sp->s_ttyp->t_pgrp) 421 pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1); 422 ttywait(sp->s_ttyp); 423 ttyclosesession(sp, 1); /* also revoke */ 424 } 425 /* 426 * Release the tty. If someone has it open via 427 * /dev/tty then close it (since they no longer can 428 * once we've NULL'd it out). 429 */ 430 ttyclosesession(sp, 0); 431 432 /* 433 * s_ttyp is not zero'd; we use this to indicate 434 * that the session once had a controlling terminal. 435 * (for logging and informational purposes) 436 */ 437 } 438 sp->s_leader = NULL; 439 } 440 fixjobc(p, p->p_pgrp, 0); 441 (void)acct_process(p); 442 #ifdef KTRACE 443 /* 444 * release trace file 445 */ 446 if (p->p_tracenode) 447 ktrdestroy(&p->p_tracenode); 448 p->p_traceflag = 0; 449 #endif 450 /* 451 * Release reference to text vnode 452 */ 453 if ((vtmp = p->p_textvp) != NULL) { 454 p->p_textvp = NULL; 455 vrele(vtmp); 456 } 457 458 /* 459 * Move the process to the zombie list. This will block 460 * until the process p_lock count reaches 0. The process will 461 * not be reaped until TDF_EXITING is set by cpu_thread_exit(), 462 * which is called from cpu_proc_exit(). 463 */ 464 proc_move_allproc_zombie(p); 465 466 q = LIST_FIRST(&p->p_children); 467 if (q) /* only need this if any child is S_ZOMB */ 468 wakeup((caddr_t) initproc); 469 for (; q != 0; q = nq) { 470 nq = LIST_NEXT(q, p_sibling); 471 LIST_REMOVE(q, p_sibling); 472 LIST_INSERT_HEAD(&initproc->p_children, q, p_sibling); 473 q->p_pptr = initproc; 474 q->p_sigparent = SIGCHLD; 475 /* 476 * Traced processes are killed 477 * since their existence means someone is screwing up. 478 */ 479 if (q->p_flag & P_TRACED) { 480 q->p_flag &= ~P_TRACED; 481 ksignal(q, SIGKILL); 482 } 483 } 484 485 /* 486 * Save exit status and final rusage info, adding in child rusage 487 * info and self times. 488 */ 489 p->p_xstat = rv; 490 calcru_proc(p, &p->p_ru); 491 ruadd(&p->p_ru, &p->p_cru); 492 493 /* 494 * notify interested parties of our demise. 495 */ 496 KNOTE(&p->p_klist, NOTE_EXIT); 497 498 /* 499 * Notify parent that we're gone. If parent has the PS_NOCLDWAIT 500 * flag set, notify process 1 instead (and hope it will handle 501 * this situation). 502 */ 503 if (p->p_pptr->p_sigacts->ps_flag & PS_NOCLDWAIT) { 504 struct proc *pp = p->p_pptr; 505 proc_reparent(p, initproc); 506 /* 507 * If this was the last child of our parent, notify 508 * parent, so in case he was wait(2)ing, he will 509 * continue. 510 */ 511 if (LIST_EMPTY(&pp->p_children)) 512 wakeup((caddr_t)pp); 513 } 514 515 if (p->p_sigparent && p->p_pptr != initproc) { 516 ksignal(p->p_pptr, p->p_sigparent); 517 } else { 518 ksignal(p->p_pptr, SIGCHLD); 519 } 520 521 wakeup((caddr_t)p->p_pptr); 522 /* 523 * cpu_exit is responsible for clearing curproc, since 524 * it is heavily integrated with the thread/switching sequence. 525 * 526 * Other substructures are freed from wait(). 527 */ 528 plimit_free(p); 529 530 /* 531 * Release the current user process designation on the process so 532 * the userland scheduler can work in someone else. 533 */ 534 p->p_usched->release_curproc(lp); 535 536 /* 537 * Finally, call machine-dependent code to release as many of the 538 * lwp's resources as we can and halt execution of this thread. 539 */ 540 lwp_exit(1); 541 } 542 543 void 544 lwp_exit(int masterexit) 545 { 546 struct lwp *lp = curthread->td_lwp; 547 struct proc *p = lp->lwp_proc; 548 549 /* 550 * lwp_exit() may be called without setting LWP_WEXIT, so 551 * make sure it is set here. 552 */ 553 lp->lwp_flag |= LWP_WEXIT; 554 555 /* 556 * Clean up any virtualization 557 */ 558 if (lp->lwp_vkernel) 559 vkernel_lwp_exit(lp); 560 561 /* 562 * Nobody actually wakes us when the lock 563 * count reaches zero, so just wait one tick. 564 */ 565 while (lp->lwp_lock > 0) 566 tsleep(lp, 0, "lwpexit", 1); 567 568 /* Hand down resource usage to our proc */ 569 ruadd(&p->p_ru, &lp->lwp_ru); 570 571 /* 572 * If we don't hold the process until the LWP is reaped wait*() 573 * may try to dispose of its vmspace before all the LWPs have 574 * actually terminated. 575 */ 576 PHOLD(p); 577 578 /* 579 * We have to use the reaper for all the LWPs except the one doing 580 * the master exit. The LWP doing the master exit can just be 581 * left on p_lwps and the process reaper will deal with it 582 * synchronously, which is much faster. 583 */ 584 if (masterexit == 0) { 585 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp); 586 --p->p_nthreads; 587 wakeup(&p->p_nthreads); 588 LIST_INSERT_HEAD(&deadlwp_list[mycpuid], lp, u.lwp_reap_entry); 589 taskqueue_enqueue(taskqueue_thread[mycpuid], deadlwp_task[mycpuid]); 590 } else { 591 --p->p_nthreads; 592 } 593 biosched_done(curthread); 594 cpu_lwp_exit(); 595 } 596 597 /* 598 * Wait until a lwp is completely dead. 599 * 600 * If the thread is still executing, which can't be waited upon, 601 * return failure. The caller is responsible of waiting a little 602 * bit and checking again. 603 * 604 * Suggested use: 605 * while (!lwp_wait(lp)) 606 * tsleep(lp, 0, "lwpwait", 1); 607 */ 608 static int 609 lwp_wait(struct lwp *lp) 610 { 611 struct thread *td = lp->lwp_thread;; 612 613 KKASSERT(lwkt_preempted_proc() != lp); 614 615 while (lp->lwp_lock > 0) 616 tsleep(lp, 0, "lwpwait1", 1); 617 618 lwkt_wait_free(td); 619 620 /* 621 * The lwp's thread may still be in the middle 622 * of switching away, we can't rip its stack out from 623 * under it until TDF_EXITING is set and both 624 * TDF_RUNNING and TDF_PREEMPT_LOCK are clear. 625 * TDF_PREEMPT_LOCK must be checked because TDF_RUNNING 626 * will be cleared temporarily if a thread gets 627 * preempted. 628 * 629 * YYY no wakeup occurs, so we simply return failure 630 * and let the caller deal with sleeping and calling 631 * us again. 632 */ 633 if ((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK|TDF_EXITING)) != 634 TDF_EXITING) 635 return (0); 636 637 return (1); 638 } 639 640 /* 641 * Release the resources associated with a lwp. 642 * The lwp must be completely dead. 643 */ 644 void 645 lwp_dispose(struct lwp *lp) 646 { 647 struct thread *td = lp->lwp_thread;; 648 649 KKASSERT(lwkt_preempted_proc() != lp); 650 KKASSERT(td->td_refs == 0); 651 KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK|TDF_EXITING)) == 652 TDF_EXITING); 653 654 PRELE(lp->lwp_proc); 655 lp->lwp_proc = NULL; 656 if (td != NULL) { 657 td->td_proc = NULL; 658 td->td_lwp = NULL; 659 lp->lwp_thread = NULL; 660 lwkt_free_thread(td); 661 } 662 kfree(lp, M_LWP); 663 } 664 665 int 666 sys_wait4(struct wait_args *uap) 667 { 668 struct rusage rusage; 669 int error, status; 670 671 error = kern_wait(uap->pid, uap->status ? &status : NULL, 672 uap->options, uap->rusage ? &rusage : NULL, &uap->sysmsg_result); 673 674 if (error == 0 && uap->status) 675 error = copyout(&status, uap->status, sizeof(*uap->status)); 676 if (error == 0 && uap->rusage) 677 error = copyout(&rusage, uap->rusage, sizeof(*uap->rusage)); 678 return (error); 679 } 680 681 /* 682 * wait1() 683 * 684 * wait_args(int pid, int *status, int options, struct rusage *rusage) 685 */ 686 int 687 kern_wait(pid_t pid, int *status, int options, struct rusage *rusage, int *res) 688 { 689 struct thread *td = curthread; 690 struct lwp *lp; 691 struct proc *q = td->td_proc; 692 struct proc *p, *t; 693 int nfound, error; 694 695 if (pid == 0) 696 pid = -q->p_pgid; 697 if (options &~ (WUNTRACED|WNOHANG|WCONTINUED|WLINUXCLONE)) 698 return (EINVAL); 699 loop: 700 /* 701 * Hack for backwards compatibility with badly written user code. 702 * Or perhaps we have to do this anyway, it is unclear. XXX 703 * 704 * The problem is that if a process group is stopped and the parent 705 * is doing a wait*(..., WUNTRACED, ...), it will see the STOP 706 * of the child and then stop itself when it tries to return from the 707 * system call. When the process group is resumed the parent will 708 * then get the STOP status even though the child has now resumed 709 * (a followup wait*() will get the CONT status). 710 * 711 * Previously the CONT would overwrite the STOP because the tstop 712 * was handled within tsleep(), and the parent would only see 713 * the CONT when both are stopped and continued together. This litte 714 * two-line hack restores this effect. 715 */ 716 while (q->p_stat == SSTOP) 717 tstop(); 718 719 nfound = 0; 720 LIST_FOREACH(p, &q->p_children, p_sibling) { 721 if (pid != WAIT_ANY && 722 p->p_pid != pid && p->p_pgid != -pid) 723 continue; 724 725 /* This special case handles a kthread spawned by linux_clone 726 * (see linux_misc.c). The linux_wait4 and linux_waitpid 727 * functions need to be able to distinguish between waiting 728 * on a process and waiting on a thread. It is a thread if 729 * p_sigparent is not SIGCHLD, and the WLINUXCLONE option 730 * signifies we want to wait for threads and not processes. 731 */ 732 if ((p->p_sigparent != SIGCHLD) ^ 733 ((options & WLINUXCLONE) != 0)) { 734 continue; 735 } 736 737 nfound++; 738 if (p->p_stat == SZOMB) { 739 /* 740 * We may go into SZOMB with threads still present. 741 * We must wait for them to exit before we can reap 742 * the master thread, otherwise we may race reaping 743 * non-master threads. 744 */ 745 while (p->p_nthreads > 0) { 746 tsleep(&p->p_nthreads, 0, "lwpzomb", hz); 747 } 748 749 /* 750 * Reap any LWPs left in p->p_lwps. This is usually 751 * just the last LWP. This must be done before 752 * we loop on p_lock since the lwps hold a ref on 753 * it as a vmspace interlock. 754 * 755 * Once that is accomplished p_nthreads had better 756 * be zero. 757 */ 758 while ((lp = RB_ROOT(&p->p_lwp_tree)) != NULL) { 759 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp); 760 reaplwp(lp); 761 } 762 KKASSERT(p->p_nthreads == 0); 763 764 /* 765 * Don't do anything really bad until all references 766 * to the process go away. This may include other 767 * LWPs which are still in the process of being 768 * reaped. We can't just pull the rug out from under 769 * them because they may still be using the VM space. 770 * 771 * Certain kernel facilities such as /proc will also 772 * put a hold on the process for short periods of 773 * time. 774 */ 775 while (p->p_lock) 776 tsleep(p, 0, "reap3", hz); 777 778 /* scheduling hook for heuristic */ 779 /* XXX no lwp available, we need a different heuristic */ 780 /* 781 p->p_usched->heuristic_exiting(td->td_lwp, deadlp); 782 */ 783 784 /* Take care of our return values. */ 785 *res = p->p_pid; 786 if (status) 787 *status = p->p_xstat; 788 if (rusage) 789 *rusage = p->p_ru; 790 /* 791 * If we got the child via a ptrace 'attach', 792 * we need to give it back to the old parent. 793 */ 794 if (p->p_oppid && (t = pfind(p->p_oppid))) { 795 p->p_oppid = 0; 796 proc_reparent(p, t); 797 ksignal(t, SIGCHLD); 798 wakeup((caddr_t)t); 799 return (0); 800 } 801 802 /* 803 * Unlink the proc from its process group so that 804 * the following operations won't lead to an 805 * inconsistent state for processes running down 806 * the zombie list. 807 */ 808 KKASSERT(p->p_lock == 0); 809 proc_remove_zombie(p); 810 leavepgrp(p); 811 812 p->p_xstat = 0; 813 ruadd(&q->p_cru, &p->p_ru); 814 815 /* 816 * Decrement the count of procs running with this uid. 817 */ 818 chgproccnt(p->p_ucred->cr_ruidinfo, -1, 0); 819 820 /* 821 * Free up credentials. 822 */ 823 crfree(p->p_ucred); 824 p->p_ucred = NULL; 825 826 /* 827 * Remove unused arguments 828 */ 829 if (p->p_args && --p->p_args->ar_ref == 0) 830 FREE(p->p_args, M_PARGS); 831 832 if (--p->p_sigacts->ps_refcnt == 0) { 833 kfree(p->p_sigacts, M_SUBPROC); 834 p->p_sigacts = NULL; 835 } 836 837 vm_waitproc(p); 838 kfree(p, M_PROC); 839 nprocs--; 840 return (0); 841 } 842 if (p->p_stat == SSTOP && (p->p_flag & P_WAITED) == 0 && 843 (p->p_flag & P_TRACED || options & WUNTRACED)) { 844 p->p_flag |= P_WAITED; 845 846 *res = p->p_pid; 847 if (status) 848 *status = W_STOPCODE(p->p_xstat); 849 /* Zero rusage so we get something consistent. */ 850 if (rusage) 851 bzero(rusage, sizeof(rusage)); 852 return (0); 853 } 854 if (options & WCONTINUED && (p->p_flag & P_CONTINUED)) { 855 *res = p->p_pid; 856 p->p_flag &= ~P_CONTINUED; 857 858 if (status) 859 *status = SIGCONT; 860 return (0); 861 } 862 } 863 if (nfound == 0) 864 return (ECHILD); 865 if (options & WNOHANG) { 866 *res = 0; 867 return (0); 868 } 869 error = tsleep((caddr_t)q, PCATCH, "wait", 0); 870 if (error) 871 return (error); 872 goto loop; 873 } 874 875 /* 876 * make process 'parent' the new parent of process 'child'. 877 */ 878 void 879 proc_reparent(struct proc *child, struct proc *parent) 880 { 881 882 if (child->p_pptr == parent) 883 return; 884 885 LIST_REMOVE(child, p_sibling); 886 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling); 887 child->p_pptr = parent; 888 } 889 890 /* 891 * The next two functions are to handle adding/deleting items on the 892 * exit callout list 893 * 894 * at_exit(): 895 * Take the arguments given and put them onto the exit callout list, 896 * However first make sure that it's not already there. 897 * returns 0 on success. 898 */ 899 900 int 901 at_exit(exitlist_fn function) 902 { 903 struct exitlist *ep; 904 905 #ifdef INVARIANTS 906 /* Be noisy if the programmer has lost track of things */ 907 if (rm_at_exit(function)) 908 kprintf("WARNING: exit callout entry (%p) already present\n", 909 function); 910 #endif 911 ep = kmalloc(sizeof(*ep), M_ATEXIT, M_NOWAIT); 912 if (ep == NULL) 913 return (ENOMEM); 914 ep->function = function; 915 TAILQ_INSERT_TAIL(&exit_list, ep, next); 916 return (0); 917 } 918 919 /* 920 * Scan the exit callout list for the given item and remove it. 921 * Returns the number of items removed (0 or 1) 922 */ 923 int 924 rm_at_exit(exitlist_fn function) 925 { 926 struct exitlist *ep; 927 928 TAILQ_FOREACH(ep, &exit_list, next) { 929 if (ep->function == function) { 930 TAILQ_REMOVE(&exit_list, ep, next); 931 kfree(ep, M_ATEXIT); 932 return(1); 933 } 934 } 935 return (0); 936 } 937 938 /* 939 * LWP reaper related code. 940 */ 941 static void 942 reaplwps(void *context, int dummy) 943 { 944 struct lwplist *lwplist = context; 945 struct lwp *lp; 946 947 get_mplock(); 948 while ((lp = LIST_FIRST(lwplist))) { 949 LIST_REMOVE(lp, u.lwp_reap_entry); 950 reaplwp(lp); 951 } 952 rel_mplock(); 953 } 954 955 static void 956 reaplwp(struct lwp *lp) 957 { 958 while (lwp_wait(lp) == 0) 959 tsleep(lp, 0, "lwpreap", 1); 960 lwp_dispose(lp); 961 } 962 963 static void 964 deadlwp_init(void) 965 { 966 int cpu; 967 968 for (cpu = 0; cpu < ncpus; cpu++) { 969 LIST_INIT(&deadlwp_list[cpu]); 970 deadlwp_task[cpu] = kmalloc(sizeof(*deadlwp_task[cpu]), M_DEVBUF, M_WAITOK); 971 TASK_INIT(deadlwp_task[cpu], 0, reaplwps, &deadlwp_list[cpu]); 972 } 973 } 974 975 SYSINIT(deadlwpinit, SI_SUB_CONFIGURE, SI_ORDER_ANY, deadlwp_init, NULL); 976