1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94 35 * $FreeBSD: src/sys/kern/kern_exit.c,v 1.92.2.11 2003/01/13 22:51:16 dillon Exp $ 36 */ 37 38 #include "opt_compat.h" 39 #include "opt_ktrace.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/sysproto.h> 44 #include <sys/kernel.h> 45 #include <sys/malloc.h> 46 #include <sys/proc.h> 47 #include <sys/ktrace.h> 48 #include <sys/pioctl.h> 49 #include <sys/tty.h> 50 #include <sys/wait.h> 51 #include <sys/vnode.h> 52 #include <sys/resourcevar.h> 53 #include <sys/signalvar.h> 54 #include <sys/taskqueue.h> 55 #include <sys/ptrace.h> 56 #include <sys/acct.h> /* for acct_process() function prototype */ 57 #include <sys/filedesc.h> 58 #include <sys/shm.h> 59 #include <sys/sem.h> 60 #include <sys/jail.h> 61 #include <sys/kern_syscall.h> 62 #include <sys/unistd.h> 63 #include <sys/eventhandler.h> 64 #include <sys/dsched.h> 65 66 #include <vm/vm.h> 67 #include <vm/vm_param.h> 68 #include <sys/lock.h> 69 #include <vm/pmap.h> 70 #include <vm/vm_map.h> 71 #include <vm/vm_extern.h> 72 #include <sys/user.h> 73 74 #include <sys/refcount.h> 75 #include <sys/thread2.h> 76 #include <sys/sysref2.h> 77 #include <sys/mplock2.h> 78 79 #include <machine/vmm.h> 80 81 static void reaplwps(void *context, int dummy); 82 static void reaplwp(struct lwp *lp); 83 static void killlwps(struct lwp *lp); 84 85 static MALLOC_DEFINE(M_ATEXIT, "atexit", "atexit callback"); 86 87 /* 88 * callout list for things to do at exit time 89 */ 90 struct exitlist { 91 exitlist_fn function; 92 TAILQ_ENTRY(exitlist) next; 93 }; 94 95 TAILQ_HEAD(exit_list_head, exitlist); 96 static struct exit_list_head exit_list = TAILQ_HEAD_INITIALIZER(exit_list); 97 98 /* 99 * LWP reaper data 100 */ 101 static struct task *deadlwp_task[MAXCPU]; 102 static struct lwplist deadlwp_list[MAXCPU]; 103 static struct lwkt_token deadlwp_token[MAXCPU]; 104 105 /* 106 * exit -- 107 * Death of process. 108 * 109 * SYS_EXIT_ARGS(int rval) 110 */ 111 int 112 sys_exit(struct exit_args *uap) 113 { 114 exit1(W_EXITCODE(uap->rval, 0)); 115 /* NOTREACHED */ 116 } 117 118 /* 119 * Extended exit -- 120 * Death of a lwp or process with optional bells and whistles. 121 */ 122 int 123 sys_extexit(struct extexit_args *uap) 124 { 125 struct proc *p = curproc; 126 int action, who; 127 int error; 128 129 action = EXTEXIT_ACTION(uap->how); 130 who = EXTEXIT_WHO(uap->how); 131 132 /* Check parameters before we might perform some action */ 133 switch (who) { 134 case EXTEXIT_PROC: 135 case EXTEXIT_LWP: 136 break; 137 default: 138 return (EINVAL); 139 } 140 141 switch (action) { 142 case EXTEXIT_SIMPLE: 143 break; 144 case EXTEXIT_SETINT: 145 error = copyout(&uap->status, uap->addr, sizeof(uap->status)); 146 if (error) 147 return (error); 148 break; 149 default: 150 return (EINVAL); 151 } 152 153 lwkt_gettoken(&p->p_token); 154 155 switch (who) { 156 case EXTEXIT_LWP: 157 /* 158 * Be sure only to perform a simple lwp exit if there is at 159 * least one more lwp in the proc, which will call exit1() 160 * later, otherwise the proc will be an UNDEAD and not even a 161 * SZOMB! 162 */ 163 if (p->p_nthreads > 1) { 164 lwp_exit(0, NULL); /* called w/ p_token held */ 165 /* NOT REACHED */ 166 } 167 /* else last lwp in proc: do the real thing */ 168 /* FALLTHROUGH */ 169 default: /* to help gcc */ 170 case EXTEXIT_PROC: 171 lwkt_reltoken(&p->p_token); 172 exit1(W_EXITCODE(uap->status, 0)); 173 /* NOTREACHED */ 174 } 175 176 /* NOTREACHED */ 177 lwkt_reltoken(&p->p_token); /* safety */ 178 } 179 180 /* 181 * Kill all lwps associated with the current process except the 182 * current lwp. Return an error if we race another thread trying to 183 * do the same thing and lose the race. 184 * 185 * If forexec is non-zero the current thread and process flags are 186 * cleaned up so they can be reused. 187 * 188 * Caller must hold curproc->p_token 189 */ 190 int 191 killalllwps(int forexec) 192 { 193 struct lwp *lp = curthread->td_lwp; 194 struct proc *p = lp->lwp_proc; 195 int fakestop; 196 197 /* 198 * Interlock against P_WEXIT. Only one of the process's thread 199 * is allowed to do the master exit. 200 */ 201 if (p->p_flags & P_WEXIT) 202 return (EALREADY); 203 p->p_flags |= P_WEXIT; 204 205 /* 206 * Set temporary stopped state in case we are racing a coredump. 207 * Otherwise the coredump may hang forever. 208 */ 209 if (lp->lwp_mpflags & LWP_MP_WSTOP) { 210 fakestop = 0; 211 } else { 212 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WSTOP); 213 ++p->p_nstopped; 214 fakestop = 1; 215 wakeup(&p->p_nstopped); 216 } 217 218 /* 219 * Interlock with LWP_MP_WEXIT and kill any remaining LWPs 220 */ 221 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT); 222 if (p->p_nthreads > 1) 223 killlwps(lp); 224 225 /* 226 * Undo temporary stopped state 227 */ 228 if (fakestop) { 229 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_WSTOP); 230 --p->p_nstopped; 231 } 232 233 /* 234 * If doing this for an exec, clean up the remaining thread 235 * (us) for continuing operation after all the other threads 236 * have been killed. 237 */ 238 if (forexec) { 239 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_WEXIT); 240 p->p_flags &= ~P_WEXIT; 241 } 242 return(0); 243 } 244 245 /* 246 * Kill all LWPs except the current one. Do not try to signal 247 * LWPs which have exited on their own or have already been 248 * signaled. 249 */ 250 static void 251 killlwps(struct lwp *lp) 252 { 253 struct proc *p = lp->lwp_proc; 254 struct lwp *tlp; 255 256 /* 257 * Kill the remaining LWPs. We must send the signal before setting 258 * LWP_MP_WEXIT. The setting of WEXIT is optional but helps reduce 259 * races. tlp must be held across the call as it might block and 260 * allow the target lwp to rip itself out from under our loop. 261 */ 262 FOREACH_LWP_IN_PROC(tlp, p) { 263 LWPHOLD(tlp); 264 lwkt_gettoken(&tlp->lwp_token); 265 if ((tlp->lwp_mpflags & LWP_MP_WEXIT) == 0) { 266 lwpsignal(p, tlp, SIGKILL); 267 atomic_set_int(&tlp->lwp_mpflags, LWP_MP_WEXIT); 268 } 269 lwkt_reltoken(&tlp->lwp_token); 270 LWPRELE(tlp); 271 } 272 273 /* 274 * Wait for everything to clear out. 275 */ 276 while (p->p_nthreads > 1) 277 tsleep(&p->p_nthreads, 0, "killlwps", 0); 278 } 279 280 /* 281 * Exit: deallocate address space and other resources, change proc state 282 * to zombie, and unlink proc from allproc and parent's lists. Save exit 283 * status and rusage for wait(). Check for child processes and orphan them. 284 */ 285 void 286 exit1(int rv) 287 { 288 struct thread *td = curthread; 289 struct proc *p = td->td_proc; 290 struct lwp *lp = td->td_lwp; 291 struct proc *q; 292 struct proc *pp; 293 struct proc *reproc; 294 struct sysreaper *reap; 295 struct vmspace *vm; 296 struct vnode *vtmp; 297 struct exitlist *ep; 298 int error; 299 300 lwkt_gettoken(&p->p_token); 301 302 if (p->p_pid == 1) { 303 kprintf("init died (signal %d, exit %d)\n", 304 WTERMSIG(rv), WEXITSTATUS(rv)); 305 panic("Going nowhere without my init!"); 306 } 307 varsymset_clean(&p->p_varsymset); 308 lockuninit(&p->p_varsymset.vx_lock); 309 310 /* 311 * Kill all lwps associated with the current process, return an 312 * error if we race another thread trying to do the same thing 313 * and lose the race. 314 */ 315 error = killalllwps(0); 316 if (error) { 317 lwp_exit(0, NULL); 318 /* NOT REACHED */ 319 } 320 321 /* are we a task leader? */ 322 if (p == p->p_leader) { 323 struct kill_args killArgs; 324 killArgs.signum = SIGKILL; 325 q = p->p_peers; 326 while(q) { 327 killArgs.pid = q->p_pid; 328 /* 329 * The interface for kill is better 330 * than the internal signal 331 */ 332 sys_kill(&killArgs); 333 q = q->p_peers; 334 } 335 while (p->p_peers) 336 tsleep((caddr_t)p, 0, "exit1", 0); 337 } 338 339 #ifdef PGINPROF 340 vmsizmon(); 341 #endif 342 STOPEVENT(p, S_EXIT, rv); 343 p->p_flags |= P_POSTEXIT; /* stop procfs stepping */ 344 345 /* 346 * Check if any loadable modules need anything done at process exit. 347 * e.g. SYSV IPC stuff 348 * XXX what if one of these generates an error? 349 */ 350 p->p_xstat = rv; 351 EVENTHANDLER_INVOKE(process_exit, p); 352 353 /* 354 * XXX: imho, the eventhandler stuff is much cleaner than this. 355 * Maybe we should move everything to use eventhandler. 356 */ 357 TAILQ_FOREACH(ep, &exit_list, next) 358 (*ep->function)(td); 359 360 if (p->p_flags & P_PROFIL) 361 stopprofclock(p); 362 363 SIGEMPTYSET(p->p_siglist); 364 SIGEMPTYSET(lp->lwp_siglist); 365 if (timevalisset(&p->p_realtimer.it_value)) 366 callout_stop_sync(&p->p_ithandle); 367 368 /* 369 * Reset any sigio structures pointing to us as a result of 370 * F_SETOWN with our pid. 371 */ 372 funsetownlst(&p->p_sigiolst); 373 374 /* 375 * Close open files and release open-file table. 376 * This may block! 377 */ 378 fdfree(p, NULL); 379 380 if (p->p_leader->p_peers) { 381 q = p->p_leader; 382 while(q->p_peers != p) 383 q = q->p_peers; 384 q->p_peers = p->p_peers; 385 wakeup((caddr_t)p->p_leader); 386 } 387 388 /* 389 * XXX Shutdown SYSV semaphores 390 */ 391 semexit(p); 392 393 KKASSERT(p->p_numposixlocks == 0); 394 395 /* The next two chunks should probably be moved to vmspace_exit. */ 396 vm = p->p_vmspace; 397 398 /* 399 * Clean up data related to virtual kernel operation. Clean up 400 * any vkernel context related to the current lwp now so we can 401 * destroy p_vkernel. 402 */ 403 if (p->p_vkernel) { 404 vkernel_lwp_exit(lp); 405 vkernel_exit(p); 406 } 407 408 /* 409 * Release the user portion of address space. The exitbump prevents 410 * the vmspace from being completely eradicated (using holdcnt). 411 * This releases references to vnodes, which could cause I/O if the 412 * file has been unlinked. We need to do this early enough that 413 * we can still sleep. 414 * 415 * We can't free the entire vmspace as the kernel stack may be mapped 416 * within that space also. 417 * 418 * Processes sharing the same vmspace may exit in one order, and 419 * get cleaned up by vmspace_exit() in a different order. The 420 * last exiting process to reach this point releases as much of 421 * the environment as it can, and the last process cleaned up 422 * by vmspace_exit() (which decrements exitingcnt) cleans up the 423 * remainder. 424 */ 425 vmspace_relexit(vm); 426 427 if (SESS_LEADER(p)) { 428 struct session *sp = p->p_session; 429 430 if (sp->s_ttyvp) { 431 /* 432 * We are the controlling process. Signal the 433 * foreground process group, drain the controlling 434 * terminal, and revoke access to the controlling 435 * terminal. 436 * 437 * NOTE: while waiting for the process group to exit 438 * it is possible that one of the processes in the 439 * group will revoke the tty, so the ttyclosesession() 440 * function will re-check sp->s_ttyvp. 441 */ 442 if (sp->s_ttyp && (sp->s_ttyp->t_session == sp)) { 443 if (sp->s_ttyp->t_pgrp) 444 pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1); 445 ttywait(sp->s_ttyp); 446 ttyclosesession(sp, 1); /* also revoke */ 447 } 448 /* 449 * Release the tty. If someone has it open via 450 * /dev/tty then close it (since they no longer can 451 * once we've NULL'd it out). 452 */ 453 ttyclosesession(sp, 0); 454 455 /* 456 * s_ttyp is not zero'd; we use this to indicate 457 * that the session once had a controlling terminal. 458 * (for logging and informational purposes) 459 */ 460 } 461 sp->s_leader = NULL; 462 } 463 fixjobc(p, p->p_pgrp, 0); 464 (void)acct_process(p); 465 #ifdef KTRACE 466 /* 467 * release trace file 468 */ 469 if (p->p_tracenode) 470 ktrdestroy(&p->p_tracenode); 471 p->p_traceflag = 0; 472 #endif 473 /* 474 * Release reference to text vnode 475 */ 476 if ((vtmp = p->p_textvp) != NULL) { 477 p->p_textvp = NULL; 478 vrele(vtmp); 479 } 480 481 /* Release namecache handle to text file */ 482 if (p->p_textnch.ncp) 483 cache_drop(&p->p_textnch); 484 485 /* 486 * We have to handle PPWAIT here or proc_move_allproc_zombie() 487 * will block on the PHOLD() the parent is doing. 488 * 489 * We are using the flag as an interlock so an atomic op is 490 * necessary to synchronize with the parent's cpu. 491 */ 492 if (p->p_flags & P_PPWAIT) { 493 if (p->p_pptr && p->p_pptr->p_upmap) 494 p->p_pptr->p_upmap->invfork = 0; 495 atomic_clear_int(&p->p_flags, P_PPWAIT); 496 wakeup(p->p_pptr); 497 } 498 499 /* 500 * Move the process to the zombie list. This will block 501 * until the process p_lock count reaches 0. The process will 502 * not be reaped until TDF_EXITING is set by cpu_thread_exit(), 503 * which is called from cpu_proc_exit(). 504 * 505 * Interlock against waiters using p_waitgen. We increment 506 * p_waitgen after completing the move of our process to the 507 * zombie list. 508 * 509 * WARNING: pp becomes stale when we block, clear it now as a 510 * reminder. 511 */ 512 proc_move_allproc_zombie(p); 513 pp = p->p_pptr; 514 atomic_add_long(&pp->p_waitgen, 1); 515 pp = NULL; 516 517 /* 518 * release controlled reaper for exit if we own it and return the 519 * remaining reaper (the one for us), which we will drop after we 520 * are done. 521 */ 522 reap = reaper_exit(p); 523 524 /* 525 * Reparent all of this process's children to the init process or 526 * to the designated reaper. We must hold the reaper's p_token in 527 * order to safely mess with p_children. 528 * 529 * We already hold p->p_token (to remove the children from our list). 530 */ 531 reproc = NULL; 532 q = LIST_FIRST(&p->p_children); 533 if (q) { 534 reproc = reaper_get(reap); 535 lwkt_gettoken(&reproc->p_token); 536 while ((q = LIST_FIRST(&p->p_children)) != NULL) { 537 PHOLD(q); 538 lwkt_gettoken(&q->p_token); 539 if (q != LIST_FIRST(&p->p_children)) { 540 lwkt_reltoken(&q->p_token); 541 PRELE(q); 542 continue; 543 } 544 LIST_REMOVE(q, p_sibling); 545 LIST_INSERT_HEAD(&reproc->p_children, q, p_sibling); 546 q->p_pptr = reproc; 547 q->p_sigparent = SIGCHLD; 548 549 /* 550 * Traced processes are killed 551 * since their existence means someone is screwing up. 552 */ 553 if (q->p_flags & P_TRACED) { 554 q->p_flags &= ~P_TRACED; 555 ksignal(q, SIGKILL); 556 } 557 lwkt_reltoken(&q->p_token); 558 PRELE(q); 559 } 560 lwkt_reltoken(&reproc->p_token); 561 wakeup(reproc); 562 } 563 564 /* 565 * Save exit status and final rusage info, adding in child rusage 566 * info and self times. 567 */ 568 calcru_proc(p, &p->p_ru); 569 ruadd(&p->p_ru, &p->p_cru); 570 571 /* 572 * notify interested parties of our demise. 573 */ 574 KNOTE(&p->p_klist, NOTE_EXIT); 575 576 /* 577 * Notify parent that we're gone. If parent has the PS_NOCLDWAIT 578 * flag set, or if the handler is set to SIG_IGN, notify the reaper 579 * instead (it will handle this situation). 580 * 581 * NOTE: The reaper can still be the parent process. 582 * 583 * (must reload pp) 584 */ 585 if (p->p_pptr->p_sigacts->ps_flag & (PS_NOCLDWAIT | PS_CLDSIGIGN)) { 586 if (reproc == NULL) 587 reproc = reaper_get(reap); 588 proc_reparent(p, reproc); 589 } 590 if (reproc) 591 PRELE(reproc); 592 if (reap) 593 reaper_drop(reap); 594 595 /* 596 * Signal (possibly new) parent. 597 */ 598 pp = p->p_pptr; 599 PHOLD(pp); 600 if (p->p_sigparent && pp != initproc) { 601 int sig = p->p_sigparent; 602 603 if (sig != SIGUSR1 && sig != SIGCHLD) 604 sig = SIGCHLD; 605 ksignal(pp, sig); 606 } else { 607 ksignal(pp, SIGCHLD); 608 } 609 p->p_flags &= ~P_TRACED; 610 PRELE(pp); 611 612 /* 613 * cpu_exit is responsible for clearing curproc, since 614 * it is heavily integrated with the thread/switching sequence. 615 * 616 * Other substructures are freed from wait(). 617 */ 618 plimit_free(p); 619 620 /* 621 * Finally, call machine-dependent code to release as many of the 622 * lwp's resources as we can and halt execution of this thread. 623 * 624 * pp is a wild pointer now but still the correct wakeup() target. 625 * lwp_exit() only uses it to send the wakeup() signal to the likely 626 * parent. Any reparenting race that occurs will get a signal 627 * automatically and not be an issue. 628 */ 629 lwp_exit(1, pp); 630 } 631 632 /* 633 * Eventually called by every exiting LWP 634 * 635 * p->p_token must be held. mplock may be held and will be released. 636 */ 637 void 638 lwp_exit(int masterexit, void *waddr) 639 { 640 struct thread *td = curthread; 641 struct lwp *lp = td->td_lwp; 642 struct proc *p = lp->lwp_proc; 643 int dowake = 0; 644 645 /* 646 * Release the current user process designation on the process so 647 * the userland scheduler can work in someone else. 648 */ 649 p->p_usched->release_curproc(lp); 650 651 /* 652 * lwp_exit() may be called without setting LWP_MP_WEXIT, so 653 * make sure it is set here. 654 */ 655 ASSERT_LWKT_TOKEN_HELD(&p->p_token); 656 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT); 657 658 /* 659 * Clean up any virtualization 660 */ 661 if (lp->lwp_vkernel) 662 vkernel_lwp_exit(lp); 663 664 if (td->td_vmm) 665 vmm_vmdestroy(); 666 667 /* 668 * Clean up select/poll support 669 */ 670 kqueue_terminate(&lp->lwp_kqueue); 671 672 /* 673 * Clean up any syscall-cached ucred 674 */ 675 if (td->td_ucred) { 676 crfree(td->td_ucred); 677 td->td_ucred = NULL; 678 } 679 680 /* 681 * Nobody actually wakes us when the lock 682 * count reaches zero, so just wait one tick. 683 */ 684 while (lp->lwp_lock > 0) 685 tsleep(lp, 0, "lwpexit", 1); 686 687 /* Hand down resource usage to our proc */ 688 ruadd(&p->p_ru, &lp->lwp_ru); 689 690 /* 691 * If we don't hold the process until the LWP is reaped wait*() 692 * may try to dispose of its vmspace before all the LWPs have 693 * actually terminated. 694 */ 695 PHOLD(p); 696 697 /* 698 * Do any remaining work that might block on us. We should be 699 * coded such that further blocking is ok after decrementing 700 * p_nthreads but don't take the chance. 701 */ 702 dsched_exit_thread(td); 703 biosched_done(curthread); 704 705 /* 706 * We have to use the reaper for all the LWPs except the one doing 707 * the master exit. The LWP doing the master exit can just be 708 * left on p_lwps and the process reaper will deal with it 709 * synchronously, which is much faster. 710 * 711 * Wakeup anyone waiting on p_nthreads to drop to 1 or 0. 712 * 713 * The process is left held until the reaper calls lwp_dispose() on 714 * the lp (after calling lwp_wait()). 715 */ 716 if (masterexit == 0) { 717 int cpu = mycpuid; 718 719 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp); 720 --p->p_nthreads; 721 if ((p->p_flags & P_MAYBETHREADED) && p->p_nthreads <= 1) 722 dowake = 1; 723 lwkt_gettoken(&deadlwp_token[cpu]); 724 LIST_INSERT_HEAD(&deadlwp_list[cpu], lp, u.lwp_reap_entry); 725 taskqueue_enqueue(taskqueue_thread[cpu], deadlwp_task[cpu]); 726 lwkt_reltoken(&deadlwp_token[cpu]); 727 } else { 728 --p->p_nthreads; 729 if ((p->p_flags & P_MAYBETHREADED) && p->p_nthreads <= 1) 730 dowake = 1; 731 } 732 733 /* 734 * We no longer need p_token. 735 * 736 * Tell the userland scheduler that we are going away 737 */ 738 lwkt_reltoken(&p->p_token); 739 p->p_usched->heuristic_exiting(lp, p); 740 741 /* 742 * Issue late wakeups after releasing our token to give us a chance 743 * to deschedule and switch away before another cpu in a wait*() 744 * reaps us. This is done as late as possible to reduce contention. 745 */ 746 if (dowake) 747 wakeup(&p->p_nthreads); 748 if (waddr) 749 wakeup(waddr); 750 751 cpu_lwp_exit(); 752 } 753 754 /* 755 * Wait until a lwp is completely dead. The final interlock in this drama 756 * is when TDF_EXITING is set in cpu_thread_exit() just before the final 757 * switchout. 758 * 759 * At the point TDF_EXITING is set a complete exit is accomplished when 760 * TDF_RUNNING and TDF_PREEMPT_LOCK are both clear. td_mpflags has two 761 * post-switch interlock flags that can be used to wait for the TDF_ 762 * flags to clear. 763 * 764 * Returns non-zero on success, and zero if the caller needs to retry 765 * the lwp_wait(). 766 */ 767 static int 768 lwp_wait(struct lwp *lp) 769 { 770 struct thread *td = lp->lwp_thread; 771 u_int mpflags; 772 773 KKASSERT(lwkt_preempted_proc() != lp); 774 775 /* 776 * This bit of code uses the thread destruction interlock 777 * managed by lwkt_switch_return() to wait for the lwp's 778 * thread to completely disengage. 779 * 780 * It is possible for us to race another cpu core so we 781 * have to do this correctly. 782 */ 783 for (;;) { 784 mpflags = td->td_mpflags; 785 cpu_ccfence(); 786 if (mpflags & TDF_MP_EXITSIG) 787 break; 788 tsleep_interlock(td, 0); 789 if (atomic_cmpset_int(&td->td_mpflags, mpflags, 790 mpflags | TDF_MP_EXITWAIT)) { 791 tsleep(td, PINTERLOCKED, "lwpxt", 0); 792 } 793 } 794 795 /* 796 * We've already waited for the core exit but there can still 797 * be other refs from e.g. process scans and such. 798 */ 799 if (lp->lwp_lock > 0) { 800 tsleep(lp, 0, "lwpwait1", 1); 801 return(0); 802 } 803 if (td->td_refs) { 804 tsleep(td, 0, "lwpwait2", 1); 805 return(0); 806 } 807 808 /* 809 * Now that we have the thread destruction interlock these flags 810 * really should already be cleaned up, keep a check for safety. 811 * 812 * We can't rip its stack out from under it until TDF_EXITING is 813 * set and both TDF_RUNNING and TDF_PREEMPT_LOCK are clear. 814 * TDF_PREEMPT_LOCK must be checked because TDF_RUNNING 815 * will be cleared temporarily if a thread gets preempted. 816 */ 817 while ((td->td_flags & (TDF_RUNNING | 818 TDF_RUNQ | 819 TDF_PREEMPT_LOCK | 820 TDF_EXITING)) != TDF_EXITING) { 821 tsleep(lp, 0, "lwpwait3", 1); 822 return (0); 823 } 824 825 KASSERT((td->td_flags & (TDF_RUNQ|TDF_TSLEEPQ)) == 0, 826 ("lwp_wait: td %p (%s) still on run or sleep queue", 827 td, td->td_comm)); 828 return (1); 829 } 830 831 /* 832 * Release the resources associated with a lwp. 833 * The lwp must be completely dead. 834 */ 835 void 836 lwp_dispose(struct lwp *lp) 837 { 838 struct thread *td = lp->lwp_thread; 839 840 KKASSERT(lwkt_preempted_proc() != lp); 841 KKASSERT(lp->lwp_lock == 0); 842 KKASSERT(td->td_refs == 0); 843 KKASSERT((td->td_flags & (TDF_RUNNING | 844 TDF_RUNQ | 845 TDF_PREEMPT_LOCK | 846 TDF_EXITING)) == TDF_EXITING); 847 848 PRELE(lp->lwp_proc); 849 lp->lwp_proc = NULL; 850 if (td != NULL) { 851 td->td_proc = NULL; 852 td->td_lwp = NULL; 853 lp->lwp_thread = NULL; 854 lwkt_free_thread(td); 855 } 856 kfree(lp, M_LWP); 857 } 858 859 int 860 sys_wait4(struct wait_args *uap) 861 { 862 struct rusage rusage; 863 int error, status; 864 865 error = kern_wait(uap->pid, (uap->status ? &status : NULL), 866 uap->options, (uap->rusage ? &rusage : NULL), 867 &uap->sysmsg_result); 868 869 if (error == 0 && uap->status) 870 error = copyout(&status, uap->status, sizeof(*uap->status)); 871 if (error == 0 && uap->rusage) 872 error = copyout(&rusage, uap->rusage, sizeof(*uap->rusage)); 873 return (error); 874 } 875 876 /* 877 * wait1() 878 * 879 * wait_args(int pid, int *status, int options, struct rusage *rusage) 880 */ 881 int 882 kern_wait(pid_t pid, int *status, int options, struct rusage *rusage, int *res) 883 { 884 struct thread *td = curthread; 885 struct lwp *lp; 886 struct proc *q = td->td_proc; 887 struct proc *p, *t; 888 struct pargs *pa; 889 struct sigacts *ps; 890 int nfound, error; 891 long waitgen; 892 893 if (pid == 0) 894 pid = -q->p_pgid; 895 if (options &~ (WUNTRACED|WNOHANG|WCONTINUED|WLINUXCLONE)) 896 return (EINVAL); 897 898 /* 899 * Protect the q->p_children list 900 */ 901 lwkt_gettoken(&q->p_token); 902 loop: 903 /* 904 * All sorts of things can change due to blocking so we have to loop 905 * all the way back up here. 906 * 907 * The problem is that if a process group is stopped and the parent 908 * is doing a wait*(..., WUNTRACED, ...), it will see the STOP 909 * of the child and then stop itself when it tries to return from the 910 * system call. When the process group is resumed the parent will 911 * then get the STOP status even though the child has now resumed 912 * (a followup wait*() will get the CONT status). 913 * 914 * Previously the CONT would overwrite the STOP because the tstop 915 * was handled within tsleep(), and the parent would only see 916 * the CONT when both are stopped and continued together. This little 917 * two-line hack restores this effect. 918 */ 919 while (q->p_stat == SSTOP || q->p_stat == SCORE) 920 tstop(); 921 922 nfound = 0; 923 924 /* 925 * Loop on children. 926 * 927 * NOTE: We don't want to break q's p_token in the loop for the 928 * case where no children are found or we risk breaking the 929 * interlock between child and parent. 930 */ 931 waitgen = atomic_fetchadd_long(&q->p_waitgen, 0x80000000); 932 LIST_FOREACH(p, &q->p_children, p_sibling) { 933 if (pid != WAIT_ANY && 934 p->p_pid != pid && p->p_pgid != -pid) { 935 continue; 936 } 937 938 /* 939 * This special case handles a kthread spawned by linux_clone 940 * (see linux_misc.c). The linux_wait4 and linux_waitpid 941 * functions need to be able to distinguish between waiting 942 * on a process and waiting on a thread. It is a thread if 943 * p_sigparent is not SIGCHLD, and the WLINUXCLONE option 944 * signifies we want to wait for threads and not processes. 945 */ 946 if ((p->p_sigparent != SIGCHLD) ^ 947 ((options & WLINUXCLONE) != 0)) { 948 continue; 949 } 950 951 nfound++; 952 if (p->p_stat == SZOMB) { 953 /* 954 * We may go into SZOMB with threads still present. 955 * We must wait for them to exit before we can reap 956 * the master thread, otherwise we may race reaping 957 * non-master threads. 958 * 959 * Only this routine can remove a process from 960 * the zombie list and destroy it, use PACQUIREZOMB() 961 * to serialize us and loop if it blocks (interlocked 962 * by the parent's q->p_token). 963 * 964 * WARNING! (p) can be invalid when PHOLDZOMB(p) 965 * returns non-zero. Be sure not to 966 * mess with it. 967 */ 968 if (PHOLDZOMB(p)) 969 goto loop; 970 lwkt_gettoken(&p->p_token); 971 if (p->p_pptr != q) { 972 lwkt_reltoken(&p->p_token); 973 PRELEZOMB(p); 974 goto loop; 975 } 976 while (p->p_nthreads > 0) { 977 tsleep(&p->p_nthreads, 0, "lwpzomb", hz); 978 } 979 980 /* 981 * Reap any LWPs left in p->p_lwps. This is usually 982 * just the last LWP. This must be done before 983 * we loop on p_lock since the lwps hold a ref on 984 * it as a vmspace interlock. 985 * 986 * Once that is accomplished p_nthreads had better 987 * be zero. 988 */ 989 while ((lp = RB_ROOT(&p->p_lwp_tree)) != NULL) { 990 /* 991 * Make sure no one is using this lwp, before 992 * it is removed from the tree. If we didn't 993 * wait it here, lwp tree iteration with 994 * blocking operation would be broken. 995 */ 996 while (lp->lwp_lock > 0) 997 tsleep(lp, 0, "zomblwp", 1); 998 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp); 999 reaplwp(lp); 1000 } 1001 KKASSERT(p->p_nthreads == 0); 1002 1003 /* 1004 * Don't do anything really bad until all references 1005 * to the process go away. This may include other 1006 * LWPs which are still in the process of being 1007 * reaped. We can't just pull the rug out from under 1008 * them because they may still be using the VM space. 1009 * 1010 * Certain kernel facilities such as /proc will also 1011 * put a hold on the process for short periods of 1012 * time. 1013 */ 1014 PRELE(p); 1015 PSTALL(p, "reap3", 0); 1016 1017 /* Take care of our return values. */ 1018 *res = p->p_pid; 1019 1020 if (status) 1021 *status = p->p_xstat; 1022 if (rusage) 1023 *rusage = p->p_ru; 1024 1025 /* 1026 * If we got the child via a ptrace 'attach', 1027 * we need to give it back to the old parent. 1028 */ 1029 if (p->p_oppid && (t = pfind(p->p_oppid)) != NULL) { 1030 PHOLD(p); 1031 p->p_oppid = 0; 1032 proc_reparent(p, t); 1033 ksignal(t, SIGCHLD); 1034 wakeup((caddr_t)t); 1035 error = 0; 1036 PRELE(t); 1037 lwkt_reltoken(&p->p_token); 1038 PRELEZOMB(p); 1039 goto done; 1040 } 1041 1042 /* 1043 * Unlink the proc from its process group so that 1044 * the following operations won't lead to an 1045 * inconsistent state for processes running down 1046 * the zombie list. 1047 */ 1048 proc_remove_zombie(p); 1049 proc_userunmap(p); 1050 lwkt_reltoken(&p->p_token); 1051 leavepgrp(p); 1052 1053 p->p_xstat = 0; 1054 ruadd(&q->p_cru, &p->p_ru); 1055 1056 /* 1057 * Decrement the count of procs running with this uid. 1058 */ 1059 chgproccnt(p->p_ucred->cr_ruidinfo, -1, 0); 1060 1061 /* 1062 * Free up credentials. 1063 */ 1064 crfree(p->p_ucred); 1065 p->p_ucred = NULL; 1066 1067 /* 1068 * Remove unused arguments 1069 */ 1070 pa = p->p_args; 1071 p->p_args = NULL; 1072 if (pa && refcount_release(&pa->ar_ref)) { 1073 kfree(pa, M_PARGS); 1074 pa = NULL; 1075 } 1076 1077 ps = p->p_sigacts; 1078 p->p_sigacts = NULL; 1079 if (ps && refcount_release(&ps->ps_refcnt)) { 1080 kfree(ps, M_SUBPROC); 1081 ps = NULL; 1082 } 1083 1084 /* 1085 * Our exitingcount was incremented when the process 1086 * became a zombie, now that the process has been 1087 * removed from (almost) all lists we should be able 1088 * to safely destroy its vmspace. Wait for any current 1089 * holders to go away (so the vmspace remains stable), 1090 * then scrap it. 1091 */ 1092 PSTALL(p, "reap4", 0); 1093 vmspace_exitfree(p); 1094 PSTALL(p, "reap5", 0); 1095 1096 /* 1097 * NOTE: We have to officially release ZOMB in order 1098 * to ensure that a racing thread in kern_wait() 1099 * which blocked on ZOMB is woken up. 1100 */ 1101 PHOLD(p); 1102 PRELEZOMB(p); 1103 kfree(p, M_PROC); 1104 atomic_add_int(&nprocs, -1); 1105 error = 0; 1106 goto done; 1107 } 1108 if ((p->p_stat == SSTOP || p->p_stat == SCORE) && 1109 (p->p_flags & P_WAITED) == 0 && 1110 ((p->p_flags & P_TRACED) || (options & WUNTRACED))) { 1111 PHOLD(p); 1112 lwkt_gettoken(&p->p_token); 1113 if (p->p_pptr != q) { 1114 lwkt_reltoken(&p->p_token); 1115 PRELE(p); 1116 goto loop; 1117 } 1118 if ((p->p_stat != SSTOP && p->p_stat != SCORE) || 1119 (p->p_flags & P_WAITED) != 0 || 1120 ((p->p_flags & P_TRACED) == 0 && 1121 (options & WUNTRACED) == 0)) { 1122 lwkt_reltoken(&p->p_token); 1123 PRELE(p); 1124 goto loop; 1125 } 1126 1127 p->p_flags |= P_WAITED; 1128 1129 *res = p->p_pid; 1130 if (status) 1131 *status = W_STOPCODE(p->p_xstat); 1132 /* Zero rusage so we get something consistent. */ 1133 if (rusage) 1134 bzero(rusage, sizeof(*rusage)); 1135 error = 0; 1136 lwkt_reltoken(&p->p_token); 1137 PRELE(p); 1138 goto done; 1139 } 1140 if ((options & WCONTINUED) && (p->p_flags & P_CONTINUED)) { 1141 PHOLD(p); 1142 lwkt_gettoken(&p->p_token); 1143 if (p->p_pptr != q) { 1144 lwkt_reltoken(&p->p_token); 1145 PRELE(p); 1146 goto loop; 1147 } 1148 if ((p->p_flags & P_CONTINUED) == 0) { 1149 lwkt_reltoken(&p->p_token); 1150 PRELE(p); 1151 goto loop; 1152 } 1153 1154 *res = p->p_pid; 1155 p->p_flags &= ~P_CONTINUED; 1156 1157 if (status) 1158 *status = SIGCONT; 1159 error = 0; 1160 lwkt_reltoken(&p->p_token); 1161 PRELE(p); 1162 goto done; 1163 } 1164 } 1165 if (nfound == 0) { 1166 error = ECHILD; 1167 goto done; 1168 } 1169 if (options & WNOHANG) { 1170 *res = 0; 1171 error = 0; 1172 goto done; 1173 } 1174 1175 /* 1176 * Wait for signal - interlocked using q->p_waitgen. 1177 */ 1178 error = 0; 1179 while ((waitgen & 0x7FFFFFFF) == (q->p_waitgen & 0x7FFFFFFF)) { 1180 tsleep_interlock(q, PCATCH); 1181 waitgen = atomic_fetchadd_long(&q->p_waitgen, 0x80000000); 1182 if ((waitgen & 0x7FFFFFFF) == (q->p_waitgen & 0x7FFFFFFF)) { 1183 error = tsleep(q, PCATCH | PINTERLOCKED, "wait", 0); 1184 break; 1185 } 1186 } 1187 if (error) { 1188 done: 1189 lwkt_reltoken(&q->p_token); 1190 return (error); 1191 } 1192 goto loop; 1193 } 1194 1195 /* 1196 * Change child's parent process to parent. 1197 * 1198 * p_children/p_sibling requires the parent's token, and 1199 * changing pptr requires the child's token, so we have to 1200 * get three tokens to do this operation. We also need to 1201 * hold pointers that might get ripped out from under us to 1202 * preserve structural integrity. 1203 * 1204 * It is possible to race another reparent or disconnect or other 1205 * similar operation. We must retry when this situation occurs. 1206 * Once we successfully reparent the process we no longer care 1207 * about any races. 1208 */ 1209 void 1210 proc_reparent(struct proc *child, struct proc *parent) 1211 { 1212 struct proc *opp; 1213 1214 PHOLD(parent); 1215 while ((opp = child->p_pptr) != parent) { 1216 PHOLD(opp); 1217 lwkt_gettoken(&opp->p_token); 1218 lwkt_gettoken(&child->p_token); 1219 lwkt_gettoken(&parent->p_token); 1220 if (child->p_pptr != opp) { 1221 lwkt_reltoken(&parent->p_token); 1222 lwkt_reltoken(&child->p_token); 1223 lwkt_reltoken(&opp->p_token); 1224 PRELE(opp); 1225 continue; 1226 } 1227 LIST_REMOVE(child, p_sibling); 1228 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling); 1229 child->p_pptr = parent; 1230 lwkt_reltoken(&parent->p_token); 1231 lwkt_reltoken(&child->p_token); 1232 lwkt_reltoken(&opp->p_token); 1233 if (LIST_EMPTY(&opp->p_children)) 1234 wakeup(opp); 1235 PRELE(opp); 1236 break; 1237 } 1238 PRELE(parent); 1239 } 1240 1241 /* 1242 * The next two functions are to handle adding/deleting items on the 1243 * exit callout list 1244 * 1245 * at_exit(): 1246 * Take the arguments given and put them onto the exit callout list, 1247 * However first make sure that it's not already there. 1248 * returns 0 on success. 1249 */ 1250 1251 int 1252 at_exit(exitlist_fn function) 1253 { 1254 struct exitlist *ep; 1255 1256 #ifdef INVARIANTS 1257 /* Be noisy if the programmer has lost track of things */ 1258 if (rm_at_exit(function)) 1259 kprintf("WARNING: exit callout entry (%p) already present\n", 1260 function); 1261 #endif 1262 ep = kmalloc(sizeof(*ep), M_ATEXIT, M_NOWAIT); 1263 if (ep == NULL) 1264 return (ENOMEM); 1265 ep->function = function; 1266 TAILQ_INSERT_TAIL(&exit_list, ep, next); 1267 return (0); 1268 } 1269 1270 /* 1271 * Scan the exit callout list for the given item and remove it. 1272 * Returns the number of items removed (0 or 1) 1273 */ 1274 int 1275 rm_at_exit(exitlist_fn function) 1276 { 1277 struct exitlist *ep; 1278 1279 TAILQ_FOREACH(ep, &exit_list, next) { 1280 if (ep->function == function) { 1281 TAILQ_REMOVE(&exit_list, ep, next); 1282 kfree(ep, M_ATEXIT); 1283 return(1); 1284 } 1285 } 1286 return (0); 1287 } 1288 1289 /* 1290 * LWP reaper related code. 1291 */ 1292 static void 1293 reaplwps(void *context, int dummy) 1294 { 1295 struct lwplist *lwplist = context; 1296 struct lwp *lp; 1297 int cpu = mycpuid; 1298 1299 lwkt_gettoken(&deadlwp_token[cpu]); 1300 while ((lp = LIST_FIRST(lwplist))) { 1301 LIST_REMOVE(lp, u.lwp_reap_entry); 1302 reaplwp(lp); 1303 } 1304 lwkt_reltoken(&deadlwp_token[cpu]); 1305 } 1306 1307 static void 1308 reaplwp(struct lwp *lp) 1309 { 1310 while (lwp_wait(lp) == 0) 1311 ; 1312 lwp_dispose(lp); 1313 } 1314 1315 static void 1316 deadlwp_init(void) 1317 { 1318 int cpu; 1319 1320 for (cpu = 0; cpu < ncpus; cpu++) { 1321 lwkt_token_init(&deadlwp_token[cpu], "deadlwpl"); 1322 LIST_INIT(&deadlwp_list[cpu]); 1323 deadlwp_task[cpu] = kmalloc(sizeof(*deadlwp_task[cpu]), 1324 M_DEVBUF, M_WAITOK); 1325 TASK_INIT(deadlwp_task[cpu], 0, reaplwps, &deadlwp_list[cpu]); 1326 } 1327 } 1328 1329 SYSINIT(deadlwpinit, SI_SUB_CONFIGURE, SI_ORDER_ANY, deadlwp_init, NULL); 1330