1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94 39 * $FreeBSD: src/sys/kern/kern_exit.c,v 1.92.2.11 2003/01/13 22:51:16 dillon Exp $ 40 * $DragonFly: src/sys/kern/kern_exit.c,v 1.90 2008/05/08 01:26:00 dillon Exp $ 41 */ 42 43 #include "opt_compat.h" 44 #include "opt_ktrace.h" 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/sysproto.h> 49 #include <sys/kernel.h> 50 #include <sys/malloc.h> 51 #include <sys/proc.h> 52 #include <sys/ktrace.h> 53 #include <sys/pioctl.h> 54 #include <sys/tty.h> 55 #include <sys/wait.h> 56 #include <sys/vnode.h> 57 #include <sys/resourcevar.h> 58 #include <sys/signalvar.h> 59 #include <sys/taskqueue.h> 60 #include <sys/ptrace.h> 61 #include <sys/acct.h> /* for acct_process() function prototype */ 62 #include <sys/filedesc.h> 63 #include <sys/shm.h> 64 #include <sys/sem.h> 65 #include <sys/aio.h> 66 #include <sys/jail.h> 67 #include <sys/kern_syscall.h> 68 #include <sys/upcall.h> 69 #include <sys/caps.h> 70 #include <sys/unistd.h> 71 72 #include <vm/vm.h> 73 #include <vm/vm_param.h> 74 #include <sys/lock.h> 75 #include <vm/pmap.h> 76 #include <vm/vm_map.h> 77 #include <vm/vm_zone.h> 78 #include <vm/vm_extern.h> 79 #include <sys/user.h> 80 81 #include <sys/thread2.h> 82 #include <sys/sysref2.h> 83 84 static void reaplwps(void *context, int dummy); 85 static void reaplwp(struct lwp *lp); 86 static void killlwps(struct lwp *lp); 87 88 static MALLOC_DEFINE(M_ATEXIT, "atexit", "atexit callback"); 89 static MALLOC_DEFINE(M_ZOMBIE, "zombie", "zombie proc status"); 90 91 /* 92 * callout list for things to do at exit time 93 */ 94 struct exitlist { 95 exitlist_fn function; 96 TAILQ_ENTRY(exitlist) next; 97 }; 98 99 TAILQ_HEAD(exit_list_head, exitlist); 100 static struct exit_list_head exit_list = TAILQ_HEAD_INITIALIZER(exit_list); 101 102 /* 103 * LWP reaper data 104 */ 105 struct task *deadlwp_task[MAXCPU]; 106 struct lwplist deadlwp_list[MAXCPU]; 107 108 /* 109 * exit -- 110 * Death of process. 111 * 112 * SYS_EXIT_ARGS(int rval) 113 */ 114 int 115 sys_exit(struct exit_args *uap) 116 { 117 exit1(W_EXITCODE(uap->rval, 0)); 118 /* NOTREACHED */ 119 } 120 121 /* 122 * Extended exit -- 123 * Death of a lwp or process with optional bells and whistles. 124 */ 125 int 126 sys_extexit(struct extexit_args *uap) 127 { 128 int action, who; 129 int error; 130 131 action = EXTEXIT_ACTION(uap->how); 132 who = EXTEXIT_WHO(uap->how); 133 134 /* Check parameters before we might perform some action */ 135 switch (who) { 136 case EXTEXIT_PROC: 137 case EXTEXIT_LWP: 138 break; 139 140 default: 141 return (EINVAL); 142 } 143 144 switch (action) { 145 case EXTEXIT_SIMPLE: 146 break; 147 148 case EXTEXIT_SETINT: 149 error = copyout(&uap->status, uap->addr, sizeof(uap->status)); 150 if (error) 151 return (error); 152 break; 153 154 default: 155 return (EINVAL); 156 } 157 158 switch (who) { 159 case EXTEXIT_LWP: 160 /* 161 * Be sure only to perform a simple lwp exit if there is at 162 * least one more lwp in the proc, which will call exit1() 163 * later, otherwise the proc will be an UNDEAD and not even a 164 * SZOMB! 165 */ 166 if (curproc->p_nthreads > 1) { 167 lwp_exit(0); 168 /* NOT REACHED */ 169 } 170 /* else last lwp in proc: do the real thing */ 171 /* FALLTHROUGH */ 172 173 default: /* to help gcc */ 174 case EXTEXIT_PROC: 175 exit1(W_EXITCODE(uap->status, 0)); 176 /* NOTREACHED */ 177 } 178 179 /* NOTREACHED */ 180 } 181 182 /* 183 * Kill all lwps associated with the current process except the 184 * current lwp. Return an error if we race another thread trying to 185 * do the same thing and lose the race. 186 * 187 * If forexec is non-zero the current thread and process flags are 188 * cleaned up so they can be reused. 189 */ 190 int 191 killalllwps(int forexec) 192 { 193 struct lwp *lp = curthread->td_lwp; 194 struct proc *p = lp->lwp_proc; 195 196 /* 197 * Interlock against P_WEXIT. Only one of the process's thread 198 * is allowed to do the master exit. 199 */ 200 if (p->p_flag & P_WEXIT) 201 return (EALREADY); 202 p->p_flag |= P_WEXIT; 203 204 /* 205 * Interlock with LWP_WEXIT and kill any remaining LWPs 206 */ 207 lp->lwp_flag |= LWP_WEXIT; 208 if (p->p_nthreads > 1) 209 killlwps(lp); 210 211 /* 212 * If doing this for an exec, clean up the remaining thread 213 * (us) for continuing operation after all the other threads 214 * have been killed. 215 */ 216 if (forexec) { 217 lp->lwp_flag &= ~LWP_WEXIT; 218 p->p_flag &= ~P_WEXIT; 219 } 220 return(0); 221 } 222 223 /* 224 * Kill all LWPs except the current one. Do not try to signal 225 * LWPs which have exited on their own or have already been 226 * signaled. 227 */ 228 static void 229 killlwps(struct lwp *lp) 230 { 231 struct proc *p = lp->lwp_proc; 232 struct lwp *tlp; 233 234 /* 235 * Kill the remaining LWPs. We must send the signal before setting 236 * LWP_WEXIT. The setting of WEXIT is optional but helps reduce 237 * races. tlp must be held across the call as it might block and 238 * allow the target lwp to rip itself out from under our loop. 239 */ 240 FOREACH_LWP_IN_PROC(tlp, p) { 241 LWPHOLD(tlp); 242 if ((tlp->lwp_flag & LWP_WEXIT) == 0) { 243 lwpsignal(p, tlp, SIGKILL); 244 tlp->lwp_flag |= LWP_WEXIT; 245 } 246 LWPRELE(tlp); 247 } 248 249 /* 250 * Wait for everything to clear out. 251 */ 252 while (p->p_nthreads > 1) { 253 if (bootverbose) 254 kprintf("killlwps: waiting for %d lwps of pid " 255 "%d to die\n", 256 p->p_nthreads - 1, p->p_pid); 257 tsleep(&p->p_nthreads, 0, "killlwps", hz); 258 } 259 } 260 261 /* 262 * Exit: deallocate address space and other resources, change proc state 263 * to zombie, and unlink proc from allproc and parent's lists. Save exit 264 * status and rusage for wait(). Check for child processes and orphan them. 265 */ 266 void 267 exit1(int rv) 268 { 269 struct thread *td = curthread; 270 struct proc *p = td->td_proc; 271 struct lwp *lp = td->td_lwp; 272 struct proc *q, *nq; 273 struct vmspace *vm; 274 struct vnode *vtmp; 275 struct exitlist *ep; 276 int error; 277 278 if (p->p_pid == 1) { 279 kprintf("init died (signal %d, exit %d)\n", 280 WTERMSIG(rv), WEXITSTATUS(rv)); 281 panic("Going nowhere without my init!"); 282 } 283 284 /* 285 * Kill all lwps associated with the current process, return an 286 * error if we race another thread trying to do the same thing 287 * and lose the race. 288 */ 289 error = killalllwps(0); 290 if (error) { 291 lwp_exit(0); 292 /* NOT REACHED */ 293 } 294 295 caps_exit(lp->lwp_thread); 296 aio_proc_rundown(p); 297 298 /* are we a task leader? */ 299 if (p == p->p_leader) { 300 struct kill_args killArgs; 301 killArgs.signum = SIGKILL; 302 q = p->p_peers; 303 while(q) { 304 killArgs.pid = q->p_pid; 305 /* 306 * The interface for kill is better 307 * than the internal signal 308 */ 309 sys_kill(&killArgs); 310 nq = q; 311 q = q->p_peers; 312 } 313 while (p->p_peers) 314 tsleep((caddr_t)p, 0, "exit1", 0); 315 } 316 317 #ifdef PGINPROF 318 vmsizmon(); 319 #endif 320 STOPEVENT(p, S_EXIT, rv); 321 wakeup(&p->p_stype); /* Wakeup anyone in procfs' PIOCWAIT */ 322 323 /* 324 * Check if any loadable modules need anything done at process exit. 325 * e.g. SYSV IPC stuff 326 * XXX what if one of these generates an error? 327 */ 328 TAILQ_FOREACH(ep, &exit_list, next) 329 (*ep->function)(td); 330 331 if (p->p_flag & P_PROFIL) 332 stopprofclock(p); 333 /* 334 * If parent is waiting for us to exit or exec, 335 * P_PPWAIT is set; we will wakeup the parent below. 336 */ 337 p->p_flag &= ~(P_TRACED | P_PPWAIT); 338 SIGEMPTYSET(p->p_siglist); 339 SIGEMPTYSET(lp->lwp_siglist); 340 if (timevalisset(&p->p_realtimer.it_value)) 341 callout_stop(&p->p_ithandle); 342 343 /* 344 * Reset any sigio structures pointing to us as a result of 345 * F_SETOWN with our pid. 346 */ 347 funsetownlst(&p->p_sigiolst); 348 349 /* 350 * Close open files and release open-file table. 351 * This may block! 352 */ 353 fdfree(p); 354 p->p_fd = NULL; 355 356 if(p->p_leader->p_peers) { 357 q = p->p_leader; 358 while(q->p_peers != p) 359 q = q->p_peers; 360 q->p_peers = p->p_peers; 361 wakeup((caddr_t)p->p_leader); 362 } 363 364 /* 365 * XXX Shutdown SYSV semaphores 366 */ 367 semexit(p); 368 369 KKASSERT(p->p_numposixlocks == 0); 370 371 /* The next two chunks should probably be moved to vmspace_exit. */ 372 vm = p->p_vmspace; 373 374 /* 375 * Release upcalls associated with this process 376 */ 377 if (vm->vm_upcalls) 378 upc_release(vm, lp); 379 380 /* 381 * Clean up data related to virtual kernel operation. Clean up 382 * any vkernel context related to the current lwp now so we can 383 * destroy p_vkernel. 384 */ 385 if (p->p_vkernel) { 386 vkernel_lwp_exit(lp); 387 vkernel_exit(p); 388 } 389 390 /* 391 * Release user portion of address space. 392 * This releases references to vnodes, 393 * which could cause I/O if the file has been unlinked. 394 * Need to do this early enough that we can still sleep. 395 * Can't free the entire vmspace as the kernel stack 396 * may be mapped within that space also. 397 * 398 * Processes sharing the same vmspace may exit in one order, and 399 * get cleaned up by vmspace_exit() in a different order. The 400 * last exiting process to reach this point releases as much of 401 * the environment as it can, and the last process cleaned up 402 * by vmspace_exit() (which decrements exitingcnt) cleans up the 403 * remainder. 404 */ 405 ++vm->vm_exitingcnt; 406 sysref_put(&vm->vm_sysref); 407 408 if (SESS_LEADER(p)) { 409 struct session *sp = p->p_session; 410 411 if (sp->s_ttyvp) { 412 /* 413 * We are the controlling process. Signal the 414 * foreground process group, drain the controlling 415 * terminal, and revoke access to the controlling 416 * terminal. 417 * 418 * NOTE: while waiting for the process group to exit 419 * it is possible that one of the processes in the 420 * group will revoke the tty, so the ttyclosesession() 421 * function will re-check sp->s_ttyvp. 422 */ 423 if (sp->s_ttyp && (sp->s_ttyp->t_session == sp)) { 424 if (sp->s_ttyp->t_pgrp) 425 pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1); 426 ttywait(sp->s_ttyp); 427 ttyclosesession(sp, 1); /* also revoke */ 428 } 429 /* 430 * Release the tty. If someone has it open via 431 * /dev/tty then close it (since they no longer can 432 * once we've NULL'd it out). 433 */ 434 ttyclosesession(sp, 0); 435 436 /* 437 * s_ttyp is not zero'd; we use this to indicate 438 * that the session once had a controlling terminal. 439 * (for logging and informational purposes) 440 */ 441 } 442 sp->s_leader = NULL; 443 } 444 fixjobc(p, p->p_pgrp, 0); 445 (void)acct_process(p); 446 #ifdef KTRACE 447 /* 448 * release trace file 449 */ 450 if (p->p_tracenode) 451 ktrdestroy(&p->p_tracenode); 452 p->p_traceflag = 0; 453 #endif 454 /* 455 * Release reference to text vnode 456 */ 457 if ((vtmp = p->p_textvp) != NULL) { 458 p->p_textvp = NULL; 459 vrele(vtmp); 460 } 461 462 /* 463 * Move the process to the zombie list. This will block 464 * until the process p_lock count reaches 0. The process will 465 * not be reaped until TDF_EXITING is set by cpu_thread_exit(), 466 * which is called from cpu_proc_exit(). 467 */ 468 proc_move_allproc_zombie(p); 469 470 q = LIST_FIRST(&p->p_children); 471 if (q) /* only need this if any child is S_ZOMB */ 472 wakeup((caddr_t) initproc); 473 for (; q != 0; q = nq) { 474 nq = LIST_NEXT(q, p_sibling); 475 LIST_REMOVE(q, p_sibling); 476 LIST_INSERT_HEAD(&initproc->p_children, q, p_sibling); 477 q->p_pptr = initproc; 478 q->p_sigparent = SIGCHLD; 479 /* 480 * Traced processes are killed 481 * since their existence means someone is screwing up. 482 */ 483 if (q->p_flag & P_TRACED) { 484 q->p_flag &= ~P_TRACED; 485 ksignal(q, SIGKILL); 486 } 487 } 488 489 /* 490 * Save exit status and final rusage info, adding in child rusage 491 * info and self times. 492 */ 493 p->p_xstat = rv; 494 calcru_proc(p, &p->p_ru); 495 ruadd(&p->p_ru, &p->p_cru); 496 497 /* 498 * notify interested parties of our demise. 499 */ 500 KNOTE(&p->p_klist, NOTE_EXIT); 501 502 /* 503 * Notify parent that we're gone. If parent has the PS_NOCLDWAIT 504 * flag set, notify process 1 instead (and hope it will handle 505 * this situation). 506 */ 507 if (p->p_pptr->p_sigacts->ps_flag & PS_NOCLDWAIT) { 508 struct proc *pp = p->p_pptr; 509 proc_reparent(p, initproc); 510 /* 511 * If this was the last child of our parent, notify 512 * parent, so in case he was wait(2)ing, he will 513 * continue. 514 */ 515 if (LIST_EMPTY(&pp->p_children)) 516 wakeup((caddr_t)pp); 517 } 518 519 if (p->p_sigparent && p->p_pptr != initproc) { 520 ksignal(p->p_pptr, p->p_sigparent); 521 } else { 522 ksignal(p->p_pptr, SIGCHLD); 523 } 524 525 wakeup((caddr_t)p->p_pptr); 526 /* 527 * cpu_exit is responsible for clearing curproc, since 528 * it is heavily integrated with the thread/switching sequence. 529 * 530 * Other substructures are freed from wait(). 531 */ 532 plimit_free(p); 533 534 /* 535 * Release the current user process designation on the process so 536 * the userland scheduler can work in someone else. 537 */ 538 p->p_usched->release_curproc(lp); 539 540 /* 541 * Finally, call machine-dependent code to release as many of the 542 * lwp's resources as we can and halt execution of this thread. 543 */ 544 lwp_exit(1); 545 } 546 547 void 548 lwp_exit(int masterexit) 549 { 550 struct lwp *lp = curthread->td_lwp; 551 struct proc *p = lp->lwp_proc; 552 553 /* 554 * lwp_exit() may be called without setting LWP_WEXIT, so 555 * make sure it is set here. 556 */ 557 lp->lwp_flag |= LWP_WEXIT; 558 559 /* 560 * Clean up any virtualization 561 */ 562 if (lp->lwp_vkernel) 563 vkernel_lwp_exit(lp); 564 565 /* 566 * Nobody actually wakes us when the lock 567 * count reaches zero, so just wait one tick. 568 */ 569 while (lp->lwp_lock > 0) 570 tsleep(lp, 0, "lwpexit", 1); 571 572 /* Hand down resource usage to our proc */ 573 ruadd(&p->p_ru, &lp->lwp_ru); 574 575 /* 576 * If we don't hold the process until the LWP is reaped wait*() 577 * may try to dispose of its vmspace before all the LWPs have 578 * actually terminated. 579 */ 580 PHOLD(p); 581 582 /* 583 * We have to use the reaper for all the LWPs except the one doing 584 * the master exit. The LWP doing the master exit can just be 585 * left on p_lwps and the process reaper will deal with it 586 * synchronously, which is much faster. 587 */ 588 if (masterexit == 0) { 589 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp); 590 --p->p_nthreads; 591 wakeup(&p->p_nthreads); 592 LIST_INSERT_HEAD(&deadlwp_list[mycpuid], lp, u.lwp_reap_entry); 593 taskqueue_enqueue(taskqueue_thread[mycpuid], deadlwp_task[mycpuid]); 594 } else { 595 --p->p_nthreads; 596 } 597 cpu_lwp_exit(); 598 } 599 600 /* 601 * Wait until a lwp is completely dead. 602 * 603 * If the thread is still executing, which can't be waited upon, 604 * return failure. The caller is responsible of waiting a little 605 * bit and checking again. 606 * 607 * Suggested use: 608 * while (!lwp_wait(lp)) 609 * tsleep(lp, 0, "lwpwait", 1); 610 */ 611 static int 612 lwp_wait(struct lwp *lp) 613 { 614 struct thread *td = lp->lwp_thread;; 615 616 KKASSERT(lwkt_preempted_proc() != lp); 617 618 while (lp->lwp_lock > 0) 619 tsleep(lp, 0, "lwpwait1", 1); 620 621 lwkt_wait_free(td); 622 623 /* 624 * The lwp's thread may still be in the middle 625 * of switching away, we can't rip its stack out from 626 * under it until TDF_EXITING is set and both 627 * TDF_RUNNING and TDF_PREEMPT_LOCK are clear. 628 * TDF_PREEMPT_LOCK must be checked because TDF_RUNNING 629 * will be cleared temporarily if a thread gets 630 * preempted. 631 * 632 * YYY no wakeup occurs, so we simply return failure 633 * and let the caller deal with sleeping and calling 634 * us again. 635 */ 636 if ((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK|TDF_EXITING)) != 637 TDF_EXITING) 638 return (0); 639 640 return (1); 641 } 642 643 /* 644 * Release the resources associated with a lwp. 645 * The lwp must be completely dead. 646 */ 647 void 648 lwp_dispose(struct lwp *lp) 649 { 650 struct thread *td = lp->lwp_thread;; 651 652 KKASSERT(lwkt_preempted_proc() != lp); 653 KKASSERT(td->td_refs == 0); 654 KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK|TDF_EXITING)) == 655 TDF_EXITING); 656 657 PRELE(lp->lwp_proc); 658 lp->lwp_proc = NULL; 659 if (td != NULL) { 660 td->td_proc = NULL; 661 td->td_lwp = NULL; 662 lp->lwp_thread = NULL; 663 lwkt_free_thread(td); 664 } 665 zfree(lwp_zone, lp); 666 } 667 668 int 669 sys_wait4(struct wait_args *uap) 670 { 671 struct rusage rusage; 672 int error, status; 673 674 error = kern_wait(uap->pid, uap->status ? &status : NULL, 675 uap->options, uap->rusage ? &rusage : NULL, &uap->sysmsg_fds[0]); 676 677 if (error == 0 && uap->status) 678 error = copyout(&status, uap->status, sizeof(*uap->status)); 679 if (error == 0 && uap->rusage) 680 error = copyout(&rusage, uap->rusage, sizeof(*uap->rusage)); 681 return (error); 682 } 683 684 /* 685 * wait1() 686 * 687 * wait_args(int pid, int *status, int options, struct rusage *rusage) 688 */ 689 int 690 kern_wait(pid_t pid, int *status, int options, struct rusage *rusage, int *res) 691 { 692 struct thread *td = curthread; 693 struct lwp *lp; 694 struct proc *q = td->td_proc; 695 struct proc *p, *t; 696 int nfound, error; 697 698 if (pid == 0) 699 pid = -q->p_pgid; 700 if (options &~ (WUNTRACED|WNOHANG|WCONTINUED|WLINUXCLONE)) 701 return (EINVAL); 702 loop: 703 /* 704 * Hack for backwards compatibility with badly written user code. 705 * Or perhaps we have to do this anyway, it is unclear. XXX 706 * 707 * The problem is that if a process group is stopped and the parent 708 * is doing a wait*(..., WUNTRACED, ...), it will see the STOP 709 * of the child and then stop itself when it tries to return from the 710 * system call. When the process group is resumed the parent will 711 * then get the STOP status even though the child has now resumed 712 * (a followup wait*() will get the CONT status). 713 * 714 * Previously the CONT would overwrite the STOP because the tstop 715 * was handled within tsleep(), and the parent would only see 716 * the CONT when both are stopped and continued together. This litte 717 * two-line hack restores this effect. 718 */ 719 while (q->p_stat == SSTOP) 720 tstop(); 721 722 nfound = 0; 723 LIST_FOREACH(p, &q->p_children, p_sibling) { 724 if (pid != WAIT_ANY && 725 p->p_pid != pid && p->p_pgid != -pid) 726 continue; 727 728 /* This special case handles a kthread spawned by linux_clone 729 * (see linux_misc.c). The linux_wait4 and linux_waitpid 730 * functions need to be able to distinguish between waiting 731 * on a process and waiting on a thread. It is a thread if 732 * p_sigparent is not SIGCHLD, and the WLINUXCLONE option 733 * signifies we want to wait for threads and not processes. 734 */ 735 if ((p->p_sigparent != SIGCHLD) ^ 736 ((options & WLINUXCLONE) != 0)) { 737 continue; 738 } 739 740 nfound++; 741 if (p->p_stat == SZOMB) { 742 /* 743 * We may go into SZOMB with threads still present. 744 * We must wait for them to exit before we can reap 745 * the master thread, otherwise we may race reaping 746 * non-master threads. 747 */ 748 while (p->p_nthreads > 0) { 749 tsleep(&p->p_nthreads, 0, "lwpzomb", hz); 750 } 751 752 /* 753 * Reap any LWPs left in p->p_lwps. This is usually 754 * just the last LWP. This must be done before 755 * we loop on p_lock since the lwps hold a ref on 756 * it as a vmspace interlock. 757 * 758 * Once that is accomplished p_nthreads had better 759 * be zero. 760 */ 761 while ((lp = RB_ROOT(&p->p_lwp_tree)) != NULL) { 762 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp); 763 reaplwp(lp); 764 } 765 KKASSERT(p->p_nthreads == 0); 766 767 /* 768 * Don't do anything really bad until all references 769 * to the process go away. This may include other 770 * LWPs which are still in the process of being 771 * reaped. We can't just pull the rug out from under 772 * them because they may still be using the VM space. 773 * 774 * Certain kernel facilities such as /proc will also 775 * put a hold on the process for short periods of 776 * time. 777 */ 778 while (p->p_lock) 779 tsleep(p, 0, "reap3", hz); 780 781 /* scheduling hook for heuristic */ 782 /* XXX no lwp available, we need a different heuristic */ 783 /* 784 p->p_usched->heuristic_exiting(td->td_lwp, deadlp); 785 */ 786 787 /* Take care of our return values. */ 788 *res = p->p_pid; 789 if (status) 790 *status = p->p_xstat; 791 if (rusage) 792 *rusage = p->p_ru; 793 /* 794 * If we got the child via a ptrace 'attach', 795 * we need to give it back to the old parent. 796 */ 797 if (p->p_oppid && (t = pfind(p->p_oppid))) { 798 p->p_oppid = 0; 799 proc_reparent(p, t); 800 ksignal(t, SIGCHLD); 801 wakeup((caddr_t)t); 802 return (0); 803 } 804 p->p_xstat = 0; 805 ruadd(&q->p_cru, &p->p_ru); 806 807 /* 808 * Decrement the count of procs running with this uid. 809 */ 810 chgproccnt(p->p_ucred->cr_ruidinfo, -1, 0); 811 812 /* 813 * Free up credentials. 814 */ 815 crfree(p->p_ucred); 816 p->p_ucred = NULL; 817 818 /* 819 * Remove unused arguments 820 */ 821 if (p->p_args && --p->p_args->ar_ref == 0) 822 FREE(p->p_args, M_PARGS); 823 824 /* 825 * Finally finished with old proc entry. 826 * Unlink it from its process group and free it. 827 */ 828 proc_remove_zombie(p); 829 leavepgrp(p); 830 831 if (--p->p_sigacts->ps_refcnt == 0) { 832 kfree(p->p_sigacts, M_SUBPROC); 833 p->p_sigacts = NULL; 834 } 835 836 vm_waitproc(p); 837 kfree(p, M_PROC); 838 nprocs--; 839 return (0); 840 } 841 if (p->p_stat == SSTOP && (p->p_flag & P_WAITED) == 0 && 842 (p->p_flag & P_TRACED || options & WUNTRACED)) { 843 p->p_flag |= P_WAITED; 844 845 *res = p->p_pid; 846 if (status) 847 *status = W_STOPCODE(p->p_xstat); 848 /* Zero rusage so we get something consistent. */ 849 if (rusage) 850 bzero(rusage, sizeof(rusage)); 851 return (0); 852 } 853 if (options & WCONTINUED && (p->p_flag & P_CONTINUED)) { 854 *res = p->p_pid; 855 p->p_flag &= ~P_CONTINUED; 856 857 if (status) 858 *status = SIGCONT; 859 return (0); 860 } 861 } 862 if (nfound == 0) 863 return (ECHILD); 864 if (options & WNOHANG) { 865 *res = 0; 866 return (0); 867 } 868 error = tsleep((caddr_t)q, PCATCH, "wait", 0); 869 if (error) 870 return (error); 871 goto loop; 872 } 873 874 /* 875 * make process 'parent' the new parent of process 'child'. 876 */ 877 void 878 proc_reparent(struct proc *child, struct proc *parent) 879 { 880 881 if (child->p_pptr == parent) 882 return; 883 884 LIST_REMOVE(child, p_sibling); 885 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling); 886 child->p_pptr = parent; 887 } 888 889 /* 890 * The next two functions are to handle adding/deleting items on the 891 * exit callout list 892 * 893 * at_exit(): 894 * Take the arguments given and put them onto the exit callout list, 895 * However first make sure that it's not already there. 896 * returns 0 on success. 897 */ 898 899 int 900 at_exit(exitlist_fn function) 901 { 902 struct exitlist *ep; 903 904 #ifdef INVARIANTS 905 /* Be noisy if the programmer has lost track of things */ 906 if (rm_at_exit(function)) 907 kprintf("WARNING: exit callout entry (%p) already present\n", 908 function); 909 #endif 910 ep = kmalloc(sizeof(*ep), M_ATEXIT, M_NOWAIT); 911 if (ep == NULL) 912 return (ENOMEM); 913 ep->function = function; 914 TAILQ_INSERT_TAIL(&exit_list, ep, next); 915 return (0); 916 } 917 918 /* 919 * Scan the exit callout list for the given item and remove it. 920 * Returns the number of items removed (0 or 1) 921 */ 922 int 923 rm_at_exit(exitlist_fn function) 924 { 925 struct exitlist *ep; 926 927 TAILQ_FOREACH(ep, &exit_list, next) { 928 if (ep->function == function) { 929 TAILQ_REMOVE(&exit_list, ep, next); 930 kfree(ep, M_ATEXIT); 931 return(1); 932 } 933 } 934 return (0); 935 } 936 937 /* 938 * LWP reaper related code. 939 */ 940 static void 941 reaplwps(void *context, int dummy) 942 { 943 struct lwplist *lwplist = context; 944 struct lwp *lp; 945 946 while ((lp = LIST_FIRST(lwplist))) { 947 LIST_REMOVE(lp, u.lwp_reap_entry); 948 reaplwp(lp); 949 } 950 } 951 952 static void 953 reaplwp(struct lwp *lp) 954 { 955 while (lwp_wait(lp) == 0) 956 tsleep(lp, 0, "lwpreap", 1); 957 lwp_dispose(lp); 958 } 959 960 static void 961 deadlwp_init(void) 962 { 963 int cpu; 964 965 for (cpu = 0; cpu < ncpus; cpu++) { 966 LIST_INIT(&deadlwp_list[cpu]); 967 deadlwp_task[cpu] = kmalloc(sizeof(*deadlwp_task[cpu]), M_DEVBUF, M_WAITOK); 968 TASK_INIT(deadlwp_task[cpu], 0, reaplwps, &deadlwp_list[cpu]); 969 } 970 } 971 972 SYSINIT(deadlwpinit, SI_SUB_CONFIGURE, SI_ORDER_ANY, deadlwp_init, NULL); 973