1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94 35 * $FreeBSD: src/sys/kern/kern_exit.c,v 1.92.2.11 2003/01/13 22:51:16 dillon Exp $ 36 */ 37 38 #include "opt_ktrace.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/sysproto.h> 43 #include <sys/kernel.h> 44 #include <sys/malloc.h> 45 #include <sys/proc.h> 46 #include <sys/ktrace.h> 47 #include <sys/pioctl.h> 48 #include <sys/tty.h> 49 #include <sys/wait.h> 50 #include <sys/vnode.h> 51 #include <sys/resourcevar.h> 52 #include <sys/signalvar.h> 53 #include <sys/taskqueue.h> 54 #include <sys/ptrace.h> 55 #include <sys/acct.h> /* for acct_process() function prototype */ 56 #include <sys/filedesc.h> 57 #include <sys/shm.h> 58 #include <sys/sem.h> 59 #include <sys/jail.h> 60 #include <sys/kern_syscall.h> 61 #include <sys/unistd.h> 62 #include <sys/eventhandler.h> 63 #include <sys/dsched.h> 64 65 #include <vm/vm.h> 66 #include <vm/vm_param.h> 67 #include <sys/lock.h> 68 #include <vm/pmap.h> 69 #include <vm/vm_map.h> 70 #include <vm/vm_extern.h> 71 #include <sys/user.h> 72 73 #include <sys/refcount.h> 74 #include <sys/thread2.h> 75 #include <sys/sysref2.h> 76 #include <sys/mplock2.h> 77 78 #include <machine/vmm.h> 79 80 static void reaplwps(void *context, int dummy); 81 static void reaplwp(struct lwp *lp); 82 static void killlwps(struct lwp *lp); 83 84 static MALLOC_DEFINE(M_ATEXIT, "atexit", "atexit callback"); 85 86 /* 87 * callout list for things to do at exit time 88 */ 89 struct exitlist { 90 exitlist_fn function; 91 TAILQ_ENTRY(exitlist) next; 92 }; 93 94 TAILQ_HEAD(exit_list_head, exitlist); 95 static struct exit_list_head exit_list = TAILQ_HEAD_INITIALIZER(exit_list); 96 97 /* 98 * LWP reaper data 99 */ 100 static struct task *deadlwp_task[MAXCPU]; 101 static struct lwplist deadlwp_list[MAXCPU]; 102 static struct lwkt_token deadlwp_token[MAXCPU]; 103 104 /* 105 * exit -- 106 * Death of process. 107 * 108 * SYS_EXIT_ARGS(int rval) 109 */ 110 int 111 sys_exit(struct exit_args *uap) 112 { 113 exit1(W_EXITCODE(uap->rval, 0)); 114 /* NOTREACHED */ 115 } 116 117 /* 118 * Extended exit -- 119 * Death of a lwp or process with optional bells and whistles. 120 */ 121 int 122 sys_extexit(struct extexit_args *uap) 123 { 124 struct proc *p = curproc; 125 int action, who; 126 int error; 127 128 action = EXTEXIT_ACTION(uap->how); 129 who = EXTEXIT_WHO(uap->how); 130 131 /* Check parameters before we might perform some action */ 132 switch (who) { 133 case EXTEXIT_PROC: 134 case EXTEXIT_LWP: 135 break; 136 default: 137 return (EINVAL); 138 } 139 140 switch (action) { 141 case EXTEXIT_SIMPLE: 142 break; 143 case EXTEXIT_SETINT: 144 error = copyout(&uap->status, uap->addr, sizeof(uap->status)); 145 if (error) 146 return (error); 147 break; 148 default: 149 return (EINVAL); 150 } 151 152 lwkt_gettoken(&p->p_token); 153 154 switch (who) { 155 case EXTEXIT_LWP: 156 /* 157 * Be sure only to perform a simple lwp exit if there is at 158 * least one more lwp in the proc, which will call exit1() 159 * later, otherwise the proc will be an UNDEAD and not even a 160 * SZOMB! 161 */ 162 if (p->p_nthreads > 1) { 163 lwp_exit(0, NULL); /* called w/ p_token held */ 164 /* NOT REACHED */ 165 } 166 /* else last lwp in proc: do the real thing */ 167 /* FALLTHROUGH */ 168 default: /* to help gcc */ 169 case EXTEXIT_PROC: 170 lwkt_reltoken(&p->p_token); 171 exit1(W_EXITCODE(uap->status, 0)); 172 /* NOTREACHED */ 173 } 174 175 /* NOTREACHED */ 176 lwkt_reltoken(&p->p_token); /* safety */ 177 } 178 179 /* 180 * Kill all lwps associated with the current process except the 181 * current lwp. Return an error if we race another thread trying to 182 * do the same thing and lose the race. 183 * 184 * If forexec is non-zero the current thread and process flags are 185 * cleaned up so they can be reused. 186 * 187 * Caller must hold curproc->p_token 188 */ 189 int 190 killalllwps(int forexec) 191 { 192 struct lwp *lp = curthread->td_lwp; 193 struct proc *p = lp->lwp_proc; 194 int fakestop; 195 196 /* 197 * Interlock against P_WEXIT. Only one of the process's thread 198 * is allowed to do the master exit. 199 */ 200 if (p->p_flags & P_WEXIT) 201 return (EALREADY); 202 p->p_flags |= P_WEXIT; 203 204 /* 205 * Set temporary stopped state in case we are racing a coredump. 206 * Otherwise the coredump may hang forever. 207 */ 208 if (lp->lwp_mpflags & LWP_MP_WSTOP) { 209 fakestop = 0; 210 } else { 211 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WSTOP); 212 ++p->p_nstopped; 213 fakestop = 1; 214 wakeup(&p->p_nstopped); 215 } 216 217 /* 218 * Interlock with LWP_MP_WEXIT and kill any remaining LWPs 219 */ 220 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT); 221 if (p->p_nthreads > 1) 222 killlwps(lp); 223 224 /* 225 * Undo temporary stopped state 226 */ 227 if (fakestop) { 228 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_WSTOP); 229 --p->p_nstopped; 230 } 231 232 /* 233 * If doing this for an exec, clean up the remaining thread 234 * (us) for continuing operation after all the other threads 235 * have been killed. 236 */ 237 if (forexec) { 238 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_WEXIT); 239 p->p_flags &= ~P_WEXIT; 240 } 241 return(0); 242 } 243 244 /* 245 * Kill all LWPs except the current one. Do not try to signal 246 * LWPs which have exited on their own or have already been 247 * signaled. 248 */ 249 static void 250 killlwps(struct lwp *lp) 251 { 252 struct proc *p = lp->lwp_proc; 253 struct lwp *tlp; 254 255 /* 256 * Kill the remaining LWPs. We must send the signal before setting 257 * LWP_MP_WEXIT. The setting of WEXIT is optional but helps reduce 258 * races. tlp must be held across the call as it might block and 259 * allow the target lwp to rip itself out from under our loop. 260 */ 261 FOREACH_LWP_IN_PROC(tlp, p) { 262 LWPHOLD(tlp); 263 lwkt_gettoken(&tlp->lwp_token); 264 if ((tlp->lwp_mpflags & LWP_MP_WEXIT) == 0) { 265 atomic_set_int(&tlp->lwp_mpflags, LWP_MP_WEXIT); 266 lwpsignal(p, tlp, SIGKILL); 267 } 268 lwkt_reltoken(&tlp->lwp_token); 269 LWPRELE(tlp); 270 } 271 272 /* 273 * Wait for everything to clear out. Also make sure any tstop()s 274 * are signalled (we are holding p_token for the interlock). 275 */ 276 wakeup(p); 277 while (p->p_nthreads > 1) 278 tsleep(&p->p_nthreads, 0, "killlwps", 0); 279 } 280 281 /* 282 * Exit: deallocate address space and other resources, change proc state 283 * to zombie, and unlink proc from allproc and parent's lists. Save exit 284 * status and rusage for wait(). Check for child processes and orphan them. 285 */ 286 void 287 exit1(int rv) 288 { 289 struct thread *td = curthread; 290 struct proc *p = td->td_proc; 291 struct lwp *lp = td->td_lwp; 292 struct proc *q; 293 struct proc *pp; 294 struct proc *reproc; 295 struct sysreaper *reap; 296 struct vmspace *vm; 297 struct vnode *vtmp; 298 struct exitlist *ep; 299 int error; 300 301 lwkt_gettoken(&p->p_token); 302 303 if (p->p_pid == 1) { 304 kprintf("init died (signal %d, exit %d)\n", 305 WTERMSIG(rv), WEXITSTATUS(rv)); 306 panic("Going nowhere without my init!"); 307 } 308 varsymset_clean(&p->p_varsymset); 309 lockuninit(&p->p_varsymset.vx_lock); 310 311 /* 312 * Kill all lwps associated with the current process, return an 313 * error if we race another thread trying to do the same thing 314 * and lose the race. 315 */ 316 error = killalllwps(0); 317 if (error) { 318 lwp_exit(0, NULL); 319 /* NOT REACHED */ 320 } 321 322 /* are we a task leader? */ 323 if (p == p->p_leader) { 324 struct kill_args killArgs; 325 killArgs.signum = SIGKILL; 326 q = p->p_peers; 327 while(q) { 328 killArgs.pid = q->p_pid; 329 /* 330 * The interface for kill is better 331 * than the internal signal 332 */ 333 sys_kill(&killArgs); 334 q = q->p_peers; 335 } 336 while (p->p_peers) 337 tsleep((caddr_t)p, 0, "exit1", 0); 338 } 339 340 #ifdef PGINPROF 341 vmsizmon(); 342 #endif 343 STOPEVENT(p, S_EXIT, rv); 344 p->p_flags |= P_POSTEXIT; /* stop procfs stepping */ 345 346 /* 347 * Check if any loadable modules need anything done at process exit. 348 * e.g. SYSV IPC stuff 349 * XXX what if one of these generates an error? 350 */ 351 p->p_xstat = rv; 352 EVENTHANDLER_INVOKE(process_exit, p); 353 354 /* 355 * XXX: imho, the eventhandler stuff is much cleaner than this. 356 * Maybe we should move everything to use eventhandler. 357 */ 358 TAILQ_FOREACH(ep, &exit_list, next) 359 (*ep->function)(td); 360 361 if (p->p_flags & P_PROFIL) 362 stopprofclock(p); 363 364 SIGEMPTYSET(p->p_siglist); 365 SIGEMPTYSET(lp->lwp_siglist); 366 if (timevalisset(&p->p_realtimer.it_value)) 367 callout_stop_sync(&p->p_ithandle); 368 369 /* 370 * Reset any sigio structures pointing to us as a result of 371 * F_SETOWN with our pid. 372 */ 373 funsetownlst(&p->p_sigiolst); 374 375 /* 376 * Close open files and release open-file table. 377 * This may block! 378 */ 379 fdfree(p, NULL); 380 381 if (p->p_leader->p_peers) { 382 q = p->p_leader; 383 while(q->p_peers != p) 384 q = q->p_peers; 385 q->p_peers = p->p_peers; 386 wakeup((caddr_t)p->p_leader); 387 } 388 389 /* 390 * XXX Shutdown SYSV semaphores 391 */ 392 semexit(p); 393 394 KKASSERT(p->p_numposixlocks == 0); 395 396 /* The next two chunks should probably be moved to vmspace_exit. */ 397 vm = p->p_vmspace; 398 399 /* 400 * Clean up data related to virtual kernel operation. Clean up 401 * any vkernel context related to the current lwp now so we can 402 * destroy p_vkernel. 403 */ 404 if (p->p_vkernel) { 405 vkernel_lwp_exit(lp); 406 vkernel_exit(p); 407 } 408 409 /* 410 * Release the user portion of address space. The exitbump prevents 411 * the vmspace from being completely eradicated (using holdcnt). 412 * This releases references to vnodes, which could cause I/O if the 413 * file has been unlinked. We need to do this early enough that 414 * we can still sleep. 415 * 416 * We can't free the entire vmspace as the kernel stack may be mapped 417 * within that space also. 418 * 419 * Processes sharing the same vmspace may exit in one order, and 420 * get cleaned up by vmspace_exit() in a different order. The 421 * last exiting process to reach this point releases as much of 422 * the environment as it can, and the last process cleaned up 423 * by vmspace_exit() (which decrements exitingcnt) cleans up the 424 * remainder. 425 * 426 * NOTE: Releasing p_token around this call is helpful if the 427 * vmspace had a huge RSS. Otherwise some other process 428 * trying to do an allproc or other scan (like 'ps') may 429 * stall for a long time. 430 */ 431 lwkt_reltoken(&p->p_token); 432 vmspace_relexit(vm); 433 lwkt_gettoken(&p->p_token); 434 435 if (SESS_LEADER(p)) { 436 struct session *sp = p->p_session; 437 438 if (sp->s_ttyvp) { 439 /* 440 * We are the controlling process. Signal the 441 * foreground process group, drain the controlling 442 * terminal, and revoke access to the controlling 443 * terminal. 444 * 445 * NOTE: while waiting for the process group to exit 446 * it is possible that one of the processes in the 447 * group will revoke the tty, so the ttyclosesession() 448 * function will re-check sp->s_ttyvp. 449 */ 450 if (sp->s_ttyp && (sp->s_ttyp->t_session == sp)) { 451 if (sp->s_ttyp->t_pgrp) 452 pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1); 453 ttywait(sp->s_ttyp); 454 ttyclosesession(sp, 1); /* also revoke */ 455 } 456 /* 457 * Release the tty. If someone has it open via 458 * /dev/tty then close it (since they no longer can 459 * once we've NULL'd it out). 460 */ 461 ttyclosesession(sp, 0); 462 463 /* 464 * s_ttyp is not zero'd; we use this to indicate 465 * that the session once had a controlling terminal. 466 * (for logging and informational purposes) 467 */ 468 } 469 sp->s_leader = NULL; 470 } 471 fixjobc(p, p->p_pgrp, 0); 472 (void)acct_process(p); 473 #ifdef KTRACE 474 /* 475 * release trace file 476 */ 477 if (p->p_tracenode) 478 ktrdestroy(&p->p_tracenode); 479 p->p_traceflag = 0; 480 #endif 481 /* 482 * Release reference to text vnode 483 */ 484 if ((vtmp = p->p_textvp) != NULL) { 485 p->p_textvp = NULL; 486 vrele(vtmp); 487 } 488 489 /* Release namecache handle to text file */ 490 if (p->p_textnch.ncp) 491 cache_drop(&p->p_textnch); 492 493 /* 494 * We have to handle PPWAIT here or proc_move_allproc_zombie() 495 * will block on the PHOLD() the parent is doing. 496 * 497 * We are using the flag as an interlock so an atomic op is 498 * necessary to synchronize with the parent's cpu. 499 */ 500 if (p->p_flags & P_PPWAIT) { 501 if (p->p_pptr && p->p_pptr->p_upmap) 502 atomic_add_int(&p->p_pptr->p_upmap->invfork, -1); 503 atomic_clear_int(&p->p_flags, P_PPWAIT); 504 wakeup(p->p_pptr); 505 } 506 507 /* 508 * Move the process to the zombie list. This will block 509 * until the process p_lock count reaches 0. The process will 510 * not be reaped until TDF_EXITING is set by cpu_thread_exit(), 511 * which is called from cpu_proc_exit(). 512 * 513 * Interlock against waiters using p_waitgen. We increment 514 * p_waitgen after completing the move of our process to the 515 * zombie list. 516 * 517 * WARNING: pp becomes stale when we block, clear it now as a 518 * reminder. 519 */ 520 proc_move_allproc_zombie(p); 521 pp = p->p_pptr; 522 atomic_add_long(&pp->p_waitgen, 1); 523 pp = NULL; 524 525 /* 526 * release controlled reaper for exit if we own it and return the 527 * remaining reaper (the one for us), which we will drop after we 528 * are done. 529 */ 530 reap = reaper_exit(p); 531 532 /* 533 * Reparent all of this process's children to the init process or 534 * to the designated reaper. We must hold the reaper's p_token in 535 * order to safely mess with p_children. 536 * 537 * We already hold p->p_token (to remove the children from our list). 538 */ 539 reproc = NULL; 540 q = LIST_FIRST(&p->p_children); 541 if (q) { 542 reproc = reaper_get(reap); 543 lwkt_gettoken(&reproc->p_token); 544 while ((q = LIST_FIRST(&p->p_children)) != NULL) { 545 PHOLD(q); 546 lwkt_gettoken(&q->p_token); 547 if (q != LIST_FIRST(&p->p_children)) { 548 lwkt_reltoken(&q->p_token); 549 PRELE(q); 550 continue; 551 } 552 LIST_REMOVE(q, p_sibling); 553 LIST_INSERT_HEAD(&reproc->p_children, q, p_sibling); 554 q->p_pptr = reproc; 555 q->p_sigparent = SIGCHLD; 556 557 /* 558 * Traced processes are killed 559 * since their existence means someone is screwing up. 560 */ 561 if (q->p_flags & P_TRACED) { 562 q->p_flags &= ~P_TRACED; 563 ksignal(q, SIGKILL); 564 } 565 lwkt_reltoken(&q->p_token); 566 PRELE(q); 567 } 568 lwkt_reltoken(&reproc->p_token); 569 wakeup(reproc); 570 } 571 572 /* 573 * Save exit status and final rusage info, adding in child rusage 574 * info and self times. 575 */ 576 calcru_proc(p, &p->p_ru); 577 ruadd(&p->p_ru, &p->p_cru); 578 579 /* 580 * notify interested parties of our demise. 581 */ 582 KNOTE(&p->p_klist, NOTE_EXIT); 583 584 /* 585 * Notify parent that we're gone. If parent has the PS_NOCLDWAIT 586 * flag set, or if the handler is set to SIG_IGN, notify the reaper 587 * instead (it will handle this situation). 588 * 589 * NOTE: The reaper can still be the parent process. 590 * 591 * (must reload pp) 592 */ 593 if (p->p_pptr->p_sigacts->ps_flag & (PS_NOCLDWAIT | PS_CLDSIGIGN)) { 594 if (reproc == NULL) 595 reproc = reaper_get(reap); 596 proc_reparent(p, reproc); 597 } 598 if (reproc) 599 PRELE(reproc); 600 if (reap) 601 reaper_drop(reap); 602 603 /* 604 * Signal (possibly new) parent. 605 */ 606 pp = p->p_pptr; 607 PHOLD(pp); 608 if (p->p_sigparent && pp != initproc) { 609 int sig = p->p_sigparent; 610 611 if (sig != SIGUSR1 && sig != SIGCHLD) 612 sig = SIGCHLD; 613 ksignal(pp, sig); 614 } else { 615 ksignal(pp, SIGCHLD); 616 } 617 p->p_flags &= ~P_TRACED; 618 PRELE(pp); 619 620 /* 621 * cpu_exit is responsible for clearing curproc, since 622 * it is heavily integrated with the thread/switching sequence. 623 * 624 * Other substructures are freed from wait(). 625 */ 626 plimit_free(p); 627 628 /* 629 * Finally, call machine-dependent code to release as many of the 630 * lwp's resources as we can and halt execution of this thread. 631 * 632 * pp is a wild pointer now but still the correct wakeup() target. 633 * lwp_exit() only uses it to send the wakeup() signal to the likely 634 * parent. Any reparenting race that occurs will get a signal 635 * automatically and not be an issue. 636 */ 637 lwp_exit(1, pp); 638 } 639 640 /* 641 * Eventually called by every exiting LWP 642 * 643 * p->p_token must be held. mplock may be held and will be released. 644 */ 645 void 646 lwp_exit(int masterexit, void *waddr) 647 { 648 struct thread *td = curthread; 649 struct lwp *lp = td->td_lwp; 650 struct proc *p = lp->lwp_proc; 651 int dowake = 0; 652 653 /* 654 * Release the current user process designation on the process so 655 * the userland scheduler can work in someone else. 656 */ 657 p->p_usched->release_curproc(lp); 658 659 /* 660 * lwp_exit() may be called without setting LWP_MP_WEXIT, so 661 * make sure it is set here. 662 */ 663 ASSERT_LWKT_TOKEN_HELD(&p->p_token); 664 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT); 665 666 /* 667 * Clean up any virtualization 668 */ 669 if (lp->lwp_vkernel) 670 vkernel_lwp_exit(lp); 671 672 if (td->td_vmm) 673 vmm_vmdestroy(); 674 675 /* 676 * Clean up select/poll support 677 */ 678 kqueue_terminate(&lp->lwp_kqueue); 679 680 /* 681 * Clean up any syscall-cached ucred 682 */ 683 if (td->td_ucred) { 684 crfree(td->td_ucred); 685 td->td_ucred = NULL; 686 } 687 688 /* 689 * Nobody actually wakes us when the lock 690 * count reaches zero, so just wait one tick. 691 */ 692 while (lp->lwp_lock > 0) 693 tsleep(lp, 0, "lwpexit", 1); 694 695 /* Hand down resource usage to our proc */ 696 ruadd(&p->p_ru, &lp->lwp_ru); 697 698 /* 699 * If we don't hold the process until the LWP is reaped wait*() 700 * may try to dispose of its vmspace before all the LWPs have 701 * actually terminated. 702 */ 703 PHOLD(p); 704 705 /* 706 * Do any remaining work that might block on us. We should be 707 * coded such that further blocking is ok after decrementing 708 * p_nthreads but don't take the chance. 709 */ 710 dsched_exit_thread(td); 711 biosched_done(curthread); 712 713 /* 714 * We have to use the reaper for all the LWPs except the one doing 715 * the master exit. The LWP doing the master exit can just be 716 * left on p_lwps and the process reaper will deal with it 717 * synchronously, which is much faster. 718 * 719 * Wakeup anyone waiting on p_nthreads to drop to 1 or 0. 720 * 721 * The process is left held until the reaper calls lwp_dispose() on 722 * the lp (after calling lwp_wait()). 723 */ 724 if (masterexit == 0) { 725 int cpu = mycpuid; 726 727 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp); 728 --p->p_nthreads; 729 if ((p->p_flags & P_MAYBETHREADED) && p->p_nthreads <= 1) 730 dowake = 1; 731 lwkt_gettoken(&deadlwp_token[cpu]); 732 LIST_INSERT_HEAD(&deadlwp_list[cpu], lp, u.lwp_reap_entry); 733 taskqueue_enqueue(taskqueue_thread[cpu], deadlwp_task[cpu]); 734 lwkt_reltoken(&deadlwp_token[cpu]); 735 } else { 736 --p->p_nthreads; 737 if ((p->p_flags & P_MAYBETHREADED) && p->p_nthreads <= 1) 738 dowake = 1; 739 } 740 741 /* 742 * We no longer need p_token. 743 * 744 * Tell the userland scheduler that we are going away 745 */ 746 lwkt_reltoken(&p->p_token); 747 p->p_usched->heuristic_exiting(lp, p); 748 749 /* 750 * Issue late wakeups after releasing our token to give us a chance 751 * to deschedule and switch away before another cpu in a wait*() 752 * reaps us. This is done as late as possible to reduce contention. 753 */ 754 if (dowake) 755 wakeup(&p->p_nthreads); 756 if (waddr) 757 wakeup(waddr); 758 759 cpu_lwp_exit(); 760 } 761 762 /* 763 * Wait until a lwp is completely dead. The final interlock in this drama 764 * is when TDF_EXITING is set in cpu_thread_exit() just before the final 765 * switchout. 766 * 767 * At the point TDF_EXITING is set a complete exit is accomplished when 768 * TDF_RUNNING and TDF_PREEMPT_LOCK are both clear. td_mpflags has two 769 * post-switch interlock flags that can be used to wait for the TDF_ 770 * flags to clear. 771 * 772 * Returns non-zero on success, and zero if the caller needs to retry 773 * the lwp_wait(). 774 */ 775 static int 776 lwp_wait(struct lwp *lp) 777 { 778 struct thread *td = lp->lwp_thread; 779 u_int mpflags; 780 781 KKASSERT(lwkt_preempted_proc() != lp); 782 783 /* 784 * This bit of code uses the thread destruction interlock 785 * managed by lwkt_switch_return() to wait for the lwp's 786 * thread to completely disengage. 787 * 788 * It is possible for us to race another cpu core so we 789 * have to do this correctly. 790 */ 791 for (;;) { 792 mpflags = td->td_mpflags; 793 cpu_ccfence(); 794 if (mpflags & TDF_MP_EXITSIG) 795 break; 796 tsleep_interlock(td, 0); 797 if (atomic_cmpset_int(&td->td_mpflags, mpflags, 798 mpflags | TDF_MP_EXITWAIT)) { 799 tsleep(td, PINTERLOCKED, "lwpxt", 0); 800 } 801 } 802 803 /* 804 * We've already waited for the core exit but there can still 805 * be other refs from e.g. process scans and such. 806 */ 807 if (lp->lwp_lock > 0) { 808 tsleep(lp, 0, "lwpwait1", 1); 809 return(0); 810 } 811 if (td->td_refs) { 812 tsleep(td, 0, "lwpwait2", 1); 813 return(0); 814 } 815 816 /* 817 * Now that we have the thread destruction interlock these flags 818 * really should already be cleaned up, keep a check for safety. 819 * 820 * We can't rip its stack out from under it until TDF_EXITING is 821 * set and both TDF_RUNNING and TDF_PREEMPT_LOCK are clear. 822 * TDF_PREEMPT_LOCK must be checked because TDF_RUNNING 823 * will be cleared temporarily if a thread gets preempted. 824 */ 825 while ((td->td_flags & (TDF_RUNNING | 826 TDF_RUNQ | 827 TDF_PREEMPT_LOCK | 828 TDF_EXITING)) != TDF_EXITING) { 829 tsleep(lp, 0, "lwpwait3", 1); 830 return (0); 831 } 832 833 KASSERT((td->td_flags & (TDF_RUNQ|TDF_TSLEEPQ)) == 0, 834 ("lwp_wait: td %p (%s) still on run or sleep queue", 835 td, td->td_comm)); 836 return (1); 837 } 838 839 /* 840 * Release the resources associated with a lwp. 841 * The lwp must be completely dead. 842 */ 843 void 844 lwp_dispose(struct lwp *lp) 845 { 846 struct thread *td = lp->lwp_thread; 847 848 KKASSERT(lwkt_preempted_proc() != lp); 849 KKASSERT(lp->lwp_lock == 0); 850 KKASSERT(td->td_refs == 0); 851 KKASSERT((td->td_flags & (TDF_RUNNING | 852 TDF_RUNQ | 853 TDF_PREEMPT_LOCK | 854 TDF_EXITING)) == TDF_EXITING); 855 856 PRELE(lp->lwp_proc); 857 lp->lwp_proc = NULL; 858 if (td != NULL) { 859 td->td_proc = NULL; 860 td->td_lwp = NULL; 861 lp->lwp_thread = NULL; 862 lwkt_free_thread(td); 863 } 864 kfree(lp, M_LWP); 865 } 866 867 int 868 sys_wait4(struct wait_args *uap) 869 { 870 struct rusage rusage; 871 int error, status; 872 873 error = kern_wait(uap->pid, (uap->status ? &status : NULL), 874 uap->options, (uap->rusage ? &rusage : NULL), 875 &uap->sysmsg_result); 876 877 if (error == 0 && uap->status) 878 error = copyout(&status, uap->status, sizeof(*uap->status)); 879 if (error == 0 && uap->rusage) 880 error = copyout(&rusage, uap->rusage, sizeof(*uap->rusage)); 881 return (error); 882 } 883 884 /* 885 * wait1() 886 * 887 * wait_args(int pid, int *status, int options, struct rusage *rusage) 888 */ 889 int 890 kern_wait(pid_t pid, int *status, int options, struct rusage *rusage, int *res) 891 { 892 struct thread *td = curthread; 893 struct lwp *lp; 894 struct proc *q = td->td_proc; 895 struct proc *p, *t; 896 struct pargs *pa; 897 struct sigacts *ps; 898 int nfound, error; 899 long waitgen; 900 901 if (pid == 0) 902 pid = -q->p_pgid; 903 if (options &~ (WUNTRACED|WNOHANG|WCONTINUED|WLINUXCLONE)) 904 return (EINVAL); 905 906 /* 907 * Protect the q->p_children list 908 */ 909 lwkt_gettoken(&q->p_token); 910 loop: 911 /* 912 * All sorts of things can change due to blocking so we have to loop 913 * all the way back up here. 914 * 915 * The problem is that if a process group is stopped and the parent 916 * is doing a wait*(..., WUNTRACED, ...), it will see the STOP 917 * of the child and then stop itself when it tries to return from the 918 * system call. When the process group is resumed the parent will 919 * then get the STOP status even though the child has now resumed 920 * (a followup wait*() will get the CONT status). 921 * 922 * Previously the CONT would overwrite the STOP because the tstop 923 * was handled within tsleep(), and the parent would only see 924 * the CONT when both are stopped and continued together. This little 925 * two-line hack restores this effect. 926 */ 927 if (STOPLWP(q, td->td_lwp)) 928 tstop(); 929 930 nfound = 0; 931 932 /* 933 * Loop on children. 934 * 935 * NOTE: We don't want to break q's p_token in the loop for the 936 * case where no children are found or we risk breaking the 937 * interlock between child and parent. 938 */ 939 waitgen = atomic_fetchadd_long(&q->p_waitgen, 0x80000000); 940 LIST_FOREACH(p, &q->p_children, p_sibling) { 941 if (pid != WAIT_ANY && 942 p->p_pid != pid && p->p_pgid != -pid) { 943 continue; 944 } 945 946 /* 947 * This special case handles a kthread spawned by linux_clone 948 * (see linux_misc.c). The linux_wait4 and linux_waitpid 949 * functions need to be able to distinguish between waiting 950 * on a process and waiting on a thread. It is a thread if 951 * p_sigparent is not SIGCHLD, and the WLINUXCLONE option 952 * signifies we want to wait for threads and not processes. 953 */ 954 if ((p->p_sigparent != SIGCHLD) ^ 955 ((options & WLINUXCLONE) != 0)) { 956 continue; 957 } 958 959 nfound++; 960 if (p->p_stat == SZOMB) { 961 /* 962 * We may go into SZOMB with threads still present. 963 * We must wait for them to exit before we can reap 964 * the master thread, otherwise we may race reaping 965 * non-master threads. 966 * 967 * Only this routine can remove a process from 968 * the zombie list and destroy it, use PACQUIREZOMB() 969 * to serialize us and loop if it blocks (interlocked 970 * by the parent's q->p_token). 971 * 972 * WARNING! (p) can be invalid when PHOLDZOMB(p) 973 * returns non-zero. Be sure not to 974 * mess with it. 975 */ 976 if (PHOLDZOMB(p)) 977 goto loop; 978 lwkt_gettoken(&p->p_token); 979 if (p->p_pptr != q) { 980 lwkt_reltoken(&p->p_token); 981 PRELEZOMB(p); 982 goto loop; 983 } 984 while (p->p_nthreads > 0) { 985 tsleep(&p->p_nthreads, 0, "lwpzomb", hz); 986 } 987 988 /* 989 * Reap any LWPs left in p->p_lwps. This is usually 990 * just the last LWP. This must be done before 991 * we loop on p_lock since the lwps hold a ref on 992 * it as a vmspace interlock. 993 * 994 * Once that is accomplished p_nthreads had better 995 * be zero. 996 */ 997 while ((lp = RB_ROOT(&p->p_lwp_tree)) != NULL) { 998 /* 999 * Make sure no one is using this lwp, before 1000 * it is removed from the tree. If we didn't 1001 * wait it here, lwp tree iteration with 1002 * blocking operation would be broken. 1003 */ 1004 while (lp->lwp_lock > 0) 1005 tsleep(lp, 0, "zomblwp", 1); 1006 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp); 1007 reaplwp(lp); 1008 } 1009 KKASSERT(p->p_nthreads == 0); 1010 1011 /* 1012 * Don't do anything really bad until all references 1013 * to the process go away. This may include other 1014 * LWPs which are still in the process of being 1015 * reaped. We can't just pull the rug out from under 1016 * them because they may still be using the VM space. 1017 * 1018 * Certain kernel facilities such as /proc will also 1019 * put a hold on the process for short periods of 1020 * time. 1021 */ 1022 PRELE(p); 1023 PSTALL(p, "reap3", 0); 1024 1025 /* Take care of our return values. */ 1026 *res = p->p_pid; 1027 1028 if (status) 1029 *status = p->p_xstat; 1030 if (rusage) 1031 *rusage = p->p_ru; 1032 1033 /* 1034 * If we got the child via a ptrace 'attach', 1035 * we need to give it back to the old parent. 1036 */ 1037 if (p->p_oppid && (t = pfind(p->p_oppid)) != NULL) { 1038 PHOLD(p); 1039 p->p_oppid = 0; 1040 proc_reparent(p, t); 1041 ksignal(t, SIGCHLD); 1042 wakeup((caddr_t)t); 1043 error = 0; 1044 PRELE(t); 1045 lwkt_reltoken(&p->p_token); 1046 PRELEZOMB(p); 1047 goto done; 1048 } 1049 1050 /* 1051 * Unlink the proc from its process group so that 1052 * the following operations won't lead to an 1053 * inconsistent state for processes running down 1054 * the zombie list. 1055 */ 1056 proc_remove_zombie(p); 1057 proc_userunmap(p); 1058 lwkt_reltoken(&p->p_token); 1059 leavepgrp(p); 1060 1061 p->p_xstat = 0; 1062 ruadd(&q->p_cru, &p->p_ru); 1063 1064 /* 1065 * Decrement the count of procs running with this uid. 1066 */ 1067 chgproccnt(p->p_ucred->cr_ruidinfo, -1, 0); 1068 1069 /* 1070 * Free up credentials. 1071 */ 1072 crfree(p->p_ucred); 1073 p->p_ucred = NULL; 1074 1075 /* 1076 * Remove unused arguments 1077 */ 1078 pa = p->p_args; 1079 p->p_args = NULL; 1080 if (pa && refcount_release(&pa->ar_ref)) { 1081 kfree(pa, M_PARGS); 1082 pa = NULL; 1083 } 1084 1085 ps = p->p_sigacts; 1086 p->p_sigacts = NULL; 1087 if (ps && refcount_release(&ps->ps_refcnt)) { 1088 kfree(ps, M_SUBPROC); 1089 ps = NULL; 1090 } 1091 1092 /* 1093 * Our exitingcount was incremented when the process 1094 * became a zombie, now that the process has been 1095 * removed from (almost) all lists we should be able 1096 * to safely destroy its vmspace. Wait for any current 1097 * holders to go away (so the vmspace remains stable), 1098 * then scrap it. 1099 * 1100 * NOTE: Releasing the parent process (q) p_token 1101 * across the vmspace_exitfree() call is 1102 * important here to reduce stalls on 1103 * interactions with (q) (such as 1104 * fork/exec/wait or 'ps'). 1105 */ 1106 PSTALL(p, "reap4", 0); 1107 lwkt_reltoken(&q->p_token); 1108 vmspace_exitfree(p); 1109 lwkt_gettoken(&q->p_token); 1110 PSTALL(p, "reap5", 0); 1111 1112 /* 1113 * NOTE: We have to officially release ZOMB in order 1114 * to ensure that a racing thread in kern_wait() 1115 * which blocked on ZOMB is woken up. 1116 */ 1117 PHOLD(p); 1118 PRELEZOMB(p); 1119 kfree(p, M_PROC); 1120 atomic_add_int(&nprocs, -1); 1121 error = 0; 1122 goto done; 1123 } 1124 if ((p->p_stat == SSTOP || p->p_stat == SCORE) && 1125 (p->p_flags & P_WAITED) == 0 && 1126 ((p->p_flags & P_TRACED) || (options & WUNTRACED))) { 1127 PHOLD(p); 1128 lwkt_gettoken(&p->p_token); 1129 if (p->p_pptr != q) { 1130 lwkt_reltoken(&p->p_token); 1131 PRELE(p); 1132 goto loop; 1133 } 1134 if ((p->p_stat != SSTOP && p->p_stat != SCORE) || 1135 (p->p_flags & P_WAITED) != 0 || 1136 ((p->p_flags & P_TRACED) == 0 && 1137 (options & WUNTRACED) == 0)) { 1138 lwkt_reltoken(&p->p_token); 1139 PRELE(p); 1140 goto loop; 1141 } 1142 1143 p->p_flags |= P_WAITED; 1144 1145 *res = p->p_pid; 1146 if (status) 1147 *status = W_STOPCODE(p->p_xstat); 1148 /* Zero rusage so we get something consistent. */ 1149 if (rusage) 1150 bzero(rusage, sizeof(*rusage)); 1151 error = 0; 1152 lwkt_reltoken(&p->p_token); 1153 PRELE(p); 1154 goto done; 1155 } 1156 if ((options & WCONTINUED) && (p->p_flags & P_CONTINUED)) { 1157 PHOLD(p); 1158 lwkt_gettoken(&p->p_token); 1159 if (p->p_pptr != q) { 1160 lwkt_reltoken(&p->p_token); 1161 PRELE(p); 1162 goto loop; 1163 } 1164 if ((p->p_flags & P_CONTINUED) == 0) { 1165 lwkt_reltoken(&p->p_token); 1166 PRELE(p); 1167 goto loop; 1168 } 1169 1170 *res = p->p_pid; 1171 p->p_flags &= ~P_CONTINUED; 1172 1173 if (status) 1174 *status = SIGCONT; 1175 error = 0; 1176 lwkt_reltoken(&p->p_token); 1177 PRELE(p); 1178 goto done; 1179 } 1180 } 1181 if (nfound == 0) { 1182 error = ECHILD; 1183 goto done; 1184 } 1185 if (options & WNOHANG) { 1186 *res = 0; 1187 error = 0; 1188 goto done; 1189 } 1190 1191 /* 1192 * Wait for signal - interlocked using q->p_waitgen. 1193 */ 1194 error = 0; 1195 while ((waitgen & 0x7FFFFFFF) == (q->p_waitgen & 0x7FFFFFFF)) { 1196 tsleep_interlock(q, PCATCH); 1197 waitgen = atomic_fetchadd_long(&q->p_waitgen, 0x80000000); 1198 if ((waitgen & 0x7FFFFFFF) == (q->p_waitgen & 0x7FFFFFFF)) { 1199 error = tsleep(q, PCATCH | PINTERLOCKED, "wait", 0); 1200 break; 1201 } 1202 } 1203 if (error) { 1204 done: 1205 lwkt_reltoken(&q->p_token); 1206 return (error); 1207 } 1208 goto loop; 1209 } 1210 1211 /* 1212 * Change child's parent process to parent. 1213 * 1214 * p_children/p_sibling requires the parent's token, and 1215 * changing pptr requires the child's token, so we have to 1216 * get three tokens to do this operation. We also need to 1217 * hold pointers that might get ripped out from under us to 1218 * preserve structural integrity. 1219 * 1220 * It is possible to race another reparent or disconnect or other 1221 * similar operation. We must retry when this situation occurs. 1222 * Once we successfully reparent the process we no longer care 1223 * about any races. 1224 */ 1225 void 1226 proc_reparent(struct proc *child, struct proc *parent) 1227 { 1228 struct proc *opp; 1229 1230 PHOLD(parent); 1231 while ((opp = child->p_pptr) != parent) { 1232 PHOLD(opp); 1233 lwkt_gettoken(&opp->p_token); 1234 lwkt_gettoken(&child->p_token); 1235 lwkt_gettoken(&parent->p_token); 1236 if (child->p_pptr != opp) { 1237 lwkt_reltoken(&parent->p_token); 1238 lwkt_reltoken(&child->p_token); 1239 lwkt_reltoken(&opp->p_token); 1240 PRELE(opp); 1241 continue; 1242 } 1243 LIST_REMOVE(child, p_sibling); 1244 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling); 1245 child->p_pptr = parent; 1246 lwkt_reltoken(&parent->p_token); 1247 lwkt_reltoken(&child->p_token); 1248 lwkt_reltoken(&opp->p_token); 1249 if (LIST_EMPTY(&opp->p_children)) 1250 wakeup(opp); 1251 PRELE(opp); 1252 break; 1253 } 1254 PRELE(parent); 1255 } 1256 1257 /* 1258 * The next two functions are to handle adding/deleting items on the 1259 * exit callout list 1260 * 1261 * at_exit(): 1262 * Take the arguments given and put them onto the exit callout list, 1263 * However first make sure that it's not already there. 1264 * returns 0 on success. 1265 */ 1266 1267 int 1268 at_exit(exitlist_fn function) 1269 { 1270 struct exitlist *ep; 1271 1272 #ifdef INVARIANTS 1273 /* Be noisy if the programmer has lost track of things */ 1274 if (rm_at_exit(function)) 1275 kprintf("WARNING: exit callout entry (%p) already present\n", 1276 function); 1277 #endif 1278 ep = kmalloc(sizeof(*ep), M_ATEXIT, M_NOWAIT); 1279 if (ep == NULL) 1280 return (ENOMEM); 1281 ep->function = function; 1282 TAILQ_INSERT_TAIL(&exit_list, ep, next); 1283 return (0); 1284 } 1285 1286 /* 1287 * Scan the exit callout list for the given item and remove it. 1288 * Returns the number of items removed (0 or 1) 1289 */ 1290 int 1291 rm_at_exit(exitlist_fn function) 1292 { 1293 struct exitlist *ep; 1294 1295 TAILQ_FOREACH(ep, &exit_list, next) { 1296 if (ep->function == function) { 1297 TAILQ_REMOVE(&exit_list, ep, next); 1298 kfree(ep, M_ATEXIT); 1299 return(1); 1300 } 1301 } 1302 return (0); 1303 } 1304 1305 /* 1306 * LWP reaper related code. 1307 */ 1308 static void 1309 reaplwps(void *context, int dummy) 1310 { 1311 struct lwplist *lwplist = context; 1312 struct lwp *lp; 1313 int cpu = mycpuid; 1314 1315 lwkt_gettoken(&deadlwp_token[cpu]); 1316 while ((lp = LIST_FIRST(lwplist))) { 1317 LIST_REMOVE(lp, u.lwp_reap_entry); 1318 reaplwp(lp); 1319 } 1320 lwkt_reltoken(&deadlwp_token[cpu]); 1321 } 1322 1323 static void 1324 reaplwp(struct lwp *lp) 1325 { 1326 while (lwp_wait(lp) == 0) 1327 ; 1328 lwp_dispose(lp); 1329 } 1330 1331 static void 1332 deadlwp_init(void) 1333 { 1334 int cpu; 1335 1336 for (cpu = 0; cpu < ncpus; cpu++) { 1337 lwkt_token_init(&deadlwp_token[cpu], "deadlwpl"); 1338 LIST_INIT(&deadlwp_list[cpu]); 1339 deadlwp_task[cpu] = kmalloc(sizeof(*deadlwp_task[cpu]), 1340 M_DEVBUF, M_WAITOK); 1341 TASK_INIT(deadlwp_task[cpu], 0, reaplwps, &deadlwp_list[cpu]); 1342 } 1343 } 1344 1345 SYSINIT(deadlwpinit, SI_SUB_CONFIGURE, SI_ORDER_ANY, deadlwp_init, NULL); 1346