1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94 35 * $FreeBSD: src/sys/kern/kern_exit.c,v 1.92.2.11 2003/01/13 22:51:16 dillon Exp $ 36 */ 37 38 #include "opt_ktrace.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/sysmsg.h> 43 #include <sys/kernel.h> 44 #include <sys/malloc.h> 45 #include <sys/proc.h> 46 #include <sys/ktrace.h> 47 #include <sys/pioctl.h> 48 #include <sys/tty.h> 49 #include <sys/wait.h> 50 #include <sys/vnode.h> 51 #include <sys/resourcevar.h> 52 #include <sys/signalvar.h> 53 #include <sys/taskqueue.h> 54 #include <sys/ptrace.h> 55 #include <sys/acct.h> /* for acct_process() function prototype */ 56 #include <sys/filedesc.h> 57 #include <sys/shm.h> 58 #include <sys/sem.h> 59 #include <sys/jail.h> 60 #include <sys/kern_syscall.h> 61 #include <sys/unistd.h> 62 #include <sys/eventhandler.h> 63 #include <sys/dsched.h> 64 65 #include <vm/vm.h> 66 #include <vm/vm_param.h> 67 #include <sys/lock.h> 68 #include <vm/pmap.h> 69 #include <vm/vm_map.h> 70 #include <vm/vm_extern.h> 71 72 #include <sys/refcount.h> 73 #include <sys/spinlock2.h> 74 75 static void reaplwps(void *context, int dummy); 76 static void reaplwp(struct lwp *lp); 77 static void killlwps(struct lwp *lp); 78 79 static MALLOC_DEFINE(M_ATEXIT, "atexit", "atexit callback"); 80 81 /* 82 * callout list for things to do at exit time 83 */ 84 struct exitlist { 85 exitlist_fn function; 86 TAILQ_ENTRY(exitlist) next; 87 }; 88 89 TAILQ_HEAD(exit_list_head, exitlist); 90 static struct exit_list_head exit_list = TAILQ_HEAD_INITIALIZER(exit_list); 91 92 /* 93 * LWP reaper data 94 */ 95 static struct task *deadlwp_task[MAXCPU]; 96 static struct lwplist deadlwp_list[MAXCPU]; 97 static struct lwkt_token deadlwp_token[MAXCPU]; 98 99 void (*linux_task_drop_callback)(thread_t td); 100 void (*linux_proc_drop_callback)(struct proc *p); 101 102 /* 103 * exit -- 104 * Death of process. 105 * 106 * SYS_EXIT_ARGS(int rval) 107 */ 108 int 109 sys_exit(struct sysmsg *sysmsg, const struct exit_args *uap) 110 { 111 exit1(W_EXITCODE(uap->rval, 0)); 112 /* NOTREACHED */ 113 } 114 115 /* 116 * Extended exit -- 117 * Death of a lwp or process with optional bells and whistles. 118 */ 119 int 120 sys_extexit(struct sysmsg *sysmsg, const struct extexit_args *uap) 121 { 122 struct proc *p = curproc; 123 int action, who; 124 int error; 125 126 action = EXTEXIT_ACTION(uap->how); 127 who = EXTEXIT_WHO(uap->how); 128 129 /* Check parameters before we might perform some action */ 130 switch (who) { 131 case EXTEXIT_PROC: 132 case EXTEXIT_LWP: 133 break; 134 default: 135 return (EINVAL); 136 } 137 138 switch (action) { 139 case EXTEXIT_SIMPLE: 140 break; 141 case EXTEXIT_SETINT: 142 error = copyout(&uap->status, uap->addr, sizeof(uap->status)); 143 if (error) 144 return (error); 145 break; 146 default: 147 return (EINVAL); 148 } 149 150 lwkt_gettoken(&p->p_token); 151 152 switch (who) { 153 case EXTEXIT_LWP: 154 /* 155 * Be sure only to perform a simple lwp exit if there is at 156 * least one more lwp in the proc, which will call exit1() 157 * later, otherwise the proc will be an UNDEAD and not even a 158 * SZOMB! 159 */ 160 if (p->p_nthreads > 1) { 161 lwp_exit(0, NULL); /* called w/ p_token held */ 162 /* NOT REACHED */ 163 } 164 /* else last lwp in proc: do the real thing */ 165 /* FALLTHROUGH */ 166 default: /* to help gcc */ 167 case EXTEXIT_PROC: 168 lwkt_reltoken(&p->p_token); 169 exit1(W_EXITCODE(uap->status, 0)); 170 /* NOTREACHED */ 171 } 172 173 /* NOTREACHED */ 174 lwkt_reltoken(&p->p_token); /* safety */ 175 } 176 177 /* 178 * Kill all lwps associated with the current process except the 179 * current lwp. Return an error if we race another thread trying to 180 * do the same thing and lose the race. 181 * 182 * If forexec is non-zero the current thread and process flags are 183 * cleaned up so they can be reused. 184 */ 185 int 186 killalllwps(int forexec) 187 { 188 struct lwp *lp = curthread->td_lwp; 189 struct proc *p = lp->lwp_proc; 190 int fakestop; 191 192 /* 193 * Interlock against P_WEXIT. Only one of the process's thread 194 * is allowed to do the master exit. 195 */ 196 lwkt_gettoken(&p->p_token); 197 if (p->p_flags & P_WEXIT) { 198 lwkt_reltoken(&p->p_token); 199 return (EALREADY); 200 } 201 p->p_flags |= P_WEXIT; 202 lwkt_gettoken(&lp->lwp_token); 203 204 /* 205 * Set temporary stopped state in case we are racing a coredump. 206 * Otherwise the coredump may hang forever. 207 */ 208 if (lp->lwp_mpflags & LWP_MP_WSTOP) { 209 fakestop = 0; 210 } else { 211 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WSTOP); 212 ++p->p_nstopped; 213 fakestop = 1; 214 wakeup(&p->p_nstopped); 215 } 216 217 /* 218 * Interlock with LWP_MP_WEXIT and kill any remaining LWPs 219 */ 220 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT); 221 if (p->p_nthreads > 1) 222 killlwps(lp); 223 224 /* 225 * Undo temporary stopped state 226 */ 227 if (fakestop && (lp->lwp_mpflags & LWP_MP_WSTOP)) { 228 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_WSTOP); 229 --p->p_nstopped; 230 } 231 232 /* 233 * If doing this for an exec, clean up the remaining thread 234 * (us) for continuing operation after all the other threads 235 * have been killed. 236 */ 237 if (forexec) { 238 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_WEXIT); 239 p->p_flags &= ~P_WEXIT; 240 } 241 lwkt_reltoken(&lp->lwp_token); 242 lwkt_reltoken(&p->p_token); 243 244 return(0); 245 } 246 247 /* 248 * Kill all LWPs except the current one. Do not try to signal 249 * LWPs which have exited on their own or have already been 250 * signaled. 251 */ 252 static void 253 killlwps(struct lwp *lp) 254 { 255 struct proc *p = lp->lwp_proc; 256 struct lwp *tlp; 257 258 /* 259 * Kill the remaining LWPs. We must send the signal before setting 260 * LWP_MP_WEXIT. The setting of WEXIT is optional but helps reduce 261 * races. tlp must be held across the call as it might block and 262 * allow the target lwp to rip itself out from under our loop. 263 */ 264 FOREACH_LWP_IN_PROC(tlp, p) { 265 LWPHOLD(tlp); 266 lwkt_gettoken(&tlp->lwp_token); 267 if ((tlp->lwp_mpflags & LWP_MP_WEXIT) == 0) { 268 atomic_set_int(&tlp->lwp_mpflags, LWP_MP_WEXIT); 269 lwpsignal(p, tlp, SIGKILL); 270 } 271 lwkt_reltoken(&tlp->lwp_token); 272 LWPRELE(tlp); 273 } 274 275 /* 276 * Wait for everything to clear out. Also make sure any tstop()s 277 * are signalled (we are holding p_token for the interlock). 278 */ 279 wakeup(p); 280 while (p->p_nthreads > 1) 281 tsleep(&p->p_nthreads, 0, "killlwps", 0); 282 } 283 284 /* 285 * Exit: deallocate address space and other resources, change proc state 286 * to zombie, and unlink proc from allproc and parent's lists. Save exit 287 * status and rusage for wait(). Check for child processes and orphan them. 288 */ 289 void 290 exit1(int rv) 291 { 292 struct thread *td = curthread; 293 struct proc *p = td->td_proc; 294 struct lwp *lp = td->td_lwp; 295 struct proc *q; 296 struct proc *pp; 297 struct proc *reproc; 298 struct sysreaper *reap; 299 struct vmspace *vm; 300 struct vnode *vtmp; 301 struct exitlist *ep; 302 int error; 303 304 lwkt_gettoken(&p->p_token); 305 306 if (p->p_pid == 1) { 307 kprintf("init died (signal %d, exit %d)\n", 308 WTERMSIG(rv), WEXITSTATUS(rv)); 309 panic("Going nowhere without my init!"); 310 } 311 varsymset_clean(&p->p_varsymset); 312 lockuninit(&p->p_varsymset.vx_lock); 313 314 /* 315 * Kill all lwps associated with the current process, return an 316 * error if we race another thread trying to do the same thing 317 * and lose the race. 318 */ 319 error = killalllwps(0); 320 if (error) { 321 lwp_exit(0, NULL); 322 /* NOT REACHED */ 323 } 324 325 /* are we a task leader? */ 326 if (p == p->p_leader) { 327 struct sysmsg sysmsg; 328 329 sysmsg.extargs.kill.signum = SIGKILL; 330 q = p->p_peers; 331 while(q) { 332 sysmsg.extargs.kill.pid = q->p_pid; 333 /* 334 * The interface for kill is better 335 * than the internal signal 336 */ 337 sys_kill(&sysmsg, &sysmsg.extargs.kill); 338 q = q->p_peers; 339 } 340 while (p->p_peers) 341 tsleep((caddr_t)p, 0, "exit1", 0); 342 } 343 344 #ifdef PGINPROF 345 vmsizmon(); 346 #endif 347 STOPEVENT(p, S_EXIT, rv); 348 p->p_flags |= P_POSTEXIT; /* stop procfs stepping */ 349 350 /* 351 * Check if any loadable modules need anything done at process exit. 352 * e.g. SYSV IPC stuff 353 * XXX what if one of these generates an error? 354 */ 355 p->p_xstat = rv; 356 357 /* 358 * XXX: imho, the eventhandler stuff is much cleaner than this. 359 * Maybe we should move everything to use eventhandler. 360 */ 361 TAILQ_FOREACH(ep, &exit_list, next) 362 (*ep->function)(td); 363 364 if (p->p_flags & P_PROFIL) 365 stopprofclock(p); 366 367 SIGEMPTYSET(p->p_siglist); 368 SIGEMPTYSET(lp->lwp_siglist); 369 if (timevalisset(&p->p_realtimer.it_value)) 370 callout_terminate(&p->p_ithandle); 371 372 /* 373 * Reset any sigio structures pointing to us as a result of 374 * F_SETOWN with our pid. 375 */ 376 funsetownlst(&p->p_sigiolst); 377 378 /* 379 * Close open files and release open-file table. 380 * This may block! 381 */ 382 fdfree(p, NULL); 383 384 if (p->p_leader->p_peers) { 385 q = p->p_leader; 386 while(q->p_peers != p) 387 q = q->p_peers; 388 q->p_peers = p->p_peers; 389 wakeup((caddr_t)p->p_leader); 390 } 391 392 /* 393 * XXX Shutdown SYSV semaphores 394 */ 395 semexit(p); 396 397 /* The next two chunks should probably be moved to vmspace_exit. */ 398 vm = p->p_vmspace; 399 400 /* 401 * Clean up data related to virtual kernel operation. Clean up 402 * any vkernel context related to the current lwp now so we can 403 * destroy p_vkernel. 404 */ 405 if (p->p_vkernel) { 406 vkernel_lwp_exit(lp); 407 vkernel_exit(p); 408 } 409 410 /* 411 * Release the user portion of address space. The exitbump prevents 412 * the vmspace from being completely eradicated (using holdcnt). 413 * This releases references to vnodes, which could cause I/O if the 414 * file has been unlinked. We need to do this early enough that 415 * we can still sleep. 416 * 417 * We can't free the entire vmspace as the kernel stack may be mapped 418 * within that space also. 419 * 420 * Processes sharing the same vmspace may exit in one order, and 421 * get cleaned up by vmspace_exit() in a different order. The 422 * last exiting process to reach this point releases as much of 423 * the environment as it can, and the last process cleaned up 424 * by vmspace_exit() (which decrements exitingcnt) cleans up the 425 * remainder. 426 * 427 * NOTE: Releasing p_token around this call is helpful if the 428 * vmspace had a huge RSS. Otherwise some other process 429 * trying to do an allproc or other scan (like 'ps') may 430 * stall for a long time. 431 */ 432 lwkt_reltoken(&p->p_token); 433 vmspace_relexit(vm); 434 lwkt_gettoken(&p->p_token); 435 436 if (SESS_LEADER(p)) { 437 struct session *sp = p->p_session; 438 439 if (sp->s_ttyvp) { 440 /* 441 * We are the controlling process. Signal the 442 * foreground process group, drain the controlling 443 * terminal, and revoke access to the controlling 444 * terminal. 445 * 446 * NOTE: while waiting for the process group to exit 447 * it is possible that one of the processes in the 448 * group will revoke the tty, so the ttyclosesession() 449 * function will re-check sp->s_ttyvp. 450 */ 451 if (sp->s_ttyp && (sp->s_ttyp->t_session == sp)) { 452 if (sp->s_ttyp->t_pgrp) 453 pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1); 454 ttywait(sp->s_ttyp); 455 ttyclosesession(sp, 1); /* also revoke */ 456 } 457 /* 458 * Release the tty. If someone has it open via 459 * /dev/tty then close it (since they no longer can 460 * once we've NULL'd it out). 461 */ 462 ttyclosesession(sp, 0); 463 464 /* 465 * s_ttyp is not zero'd; we use this to indicate 466 * that the session once had a controlling terminal. 467 * (for logging and informational purposes) 468 */ 469 } 470 sp->s_leader = NULL; 471 } 472 fixjobc(p, p->p_pgrp, 0); 473 (void)acct_process(p); 474 #ifdef KTRACE 475 /* 476 * release trace file 477 */ 478 if (p->p_tracenode) 479 ktrdestroy(&p->p_tracenode); 480 p->p_traceflag = 0; 481 #endif 482 /* 483 * Release reference to text vnode 484 */ 485 if ((vtmp = p->p_textvp) != NULL) { 486 p->p_textvp = NULL; 487 vrele(vtmp); 488 } 489 490 /* Release namecache handle to text file */ 491 if (p->p_textnch.ncp) 492 cache_drop(&p->p_textnch); 493 494 /* 495 * We have to handle PPWAIT here or proc_move_allproc_zombie() 496 * will block on the PHOLD() the parent is doing. 497 * 498 * We are using the flag as an interlock so an atomic op is 499 * necessary to synchronize with the parent's cpu. 500 */ 501 if (p->p_flags & P_PPWAIT) { 502 if (p->p_pptr && p->p_pptr->p_upmap) 503 atomic_add_int(&p->p_pptr->p_upmap->invfork, -1); 504 atomic_clear_int(&p->p_flags, P_PPWAIT); 505 wakeup(p->p_pptr); 506 } 507 508 /* 509 * Move the process to the zombie list. This will block 510 * until the process p_lock count reaches 0. The process will 511 * not be reaped until TDF_EXITING is set by cpu_thread_exit(), 512 * which is called from cpu_proc_exit(). 513 * 514 * Interlock against waiters using p_waitgen. We increment 515 * p_waitgen after completing the move of our process to the 516 * zombie list. 517 * 518 * WARNING: pp becomes stale when we block, clear it now as a 519 * reminder. 520 */ 521 proc_move_allproc_zombie(p); 522 pp = p->p_pptr; 523 atomic_add_long(&pp->p_waitgen, 1); 524 pp = NULL; 525 526 /* 527 * release controlled reaper for exit if we own it and return the 528 * remaining reaper (the one for us), which we will drop after we 529 * are done. 530 */ 531 reap = reaper_exit(p); 532 533 /* 534 * Reparent all of this process's children to the init process or 535 * to the designated reaper. We must hold the reaper's p_token in 536 * order to safely mess with p_children. 537 * 538 * Issue the p_deathsig signal to children that request it. 539 * 540 * We already hold p->p_token (to remove the children from our list). 541 */ 542 reproc = NULL; 543 q = LIST_FIRST(&p->p_children); 544 if (q) { 545 reproc = reaper_get(reap); 546 lwkt_gettoken(&reproc->p_token); 547 while ((q = LIST_FIRST(&p->p_children)) != NULL) { 548 PHOLD(q); 549 lwkt_gettoken(&q->p_token); 550 if (q != LIST_FIRST(&p->p_children)) { 551 lwkt_reltoken(&q->p_token); 552 PRELE(q); 553 continue; 554 } 555 LIST_REMOVE(q, p_sibling); 556 LIST_INSERT_HEAD(&reproc->p_children, q, p_sibling); 557 q->p_pptr = reproc; 558 q->p_ppid = reproc->p_pid; 559 q->p_sigparent = SIGCHLD; 560 561 /* 562 * Traced processes are killed 563 * since their existence means someone is screwing up. 564 */ 565 if (q->p_flags & P_TRACED) { 566 q->p_flags &= ~P_TRACED; 567 ksignal(q, SIGKILL); 568 } 569 570 /* 571 * Issue p_deathsig to children that request it 572 */ 573 if (q->p_deathsig) 574 ksignal(q, q->p_deathsig); 575 lwkt_reltoken(&q->p_token); 576 PRELE(q); 577 } 578 lwkt_reltoken(&reproc->p_token); 579 wakeup(reproc); 580 } 581 582 /* 583 * Save exit status and final rusage info. We no longer add 584 * child rusage info into self times, wait4() and kern_wait() 585 * handles it in order to properly support wait6(). 586 */ 587 calcru_proc(p, &p->p_ru); 588 /*ruadd(&p->p_ru, &p->p_cru); REMOVED */ 589 590 /* 591 * notify interested parties of our demise. 592 */ 593 KNOTE(&p->p_klist, NOTE_EXIT); 594 595 /* 596 * Notify parent that we're gone. If parent has the PS_NOCLDWAIT 597 * flag set, or if the handler is set to SIG_IGN, notify the reaper 598 * instead (it will handle this situation). 599 * 600 * NOTE: The reaper can still be the parent process. 601 * 602 * (must reload pp) 603 */ 604 if (p->p_pptr->p_sigacts->ps_flag & (PS_NOCLDWAIT | PS_CLDSIGIGN)) { 605 if (reproc == NULL) 606 reproc = reaper_get(reap); 607 proc_reparent(p, reproc); 608 } 609 if (reproc) 610 PRELE(reproc); 611 if (reap) 612 reaper_drop(reap); 613 614 /* 615 * Signal (possibly new) parent. 616 */ 617 pp = p->p_pptr; 618 PHOLD(pp); 619 if (p->p_sigparent && pp != initproc) { 620 int sig = p->p_sigparent; 621 622 if (sig != SIGUSR1 && sig != SIGCHLD) 623 sig = SIGCHLD; 624 ksignal(pp, sig); 625 } else { 626 ksignal(pp, SIGCHLD); 627 } 628 p->p_flags &= ~P_TRACED; 629 PRELE(pp); 630 631 /* 632 * cpu_exit is responsible for clearing curproc, since 633 * it is heavily integrated with the thread/switching sequence. 634 * 635 * Other substructures are freed from wait(). 636 */ 637 if (p->p_limit) { 638 struct plimit *rlimit; 639 640 rlimit = p->p_limit; 641 p->p_limit = NULL; 642 plimit_free(rlimit); 643 } 644 645 /* 646 * Finally, call machine-dependent code to release as many of the 647 * lwp's resources as we can and halt execution of this thread. 648 * 649 * pp is a wild pointer now but still the correct wakeup() target. 650 * lwp_exit() only uses it to send the wakeup() signal to the likely 651 * parent. Any reparenting race that occurs will get a signal 652 * automatically and not be an issue. 653 */ 654 lwp_exit(1, pp); 655 } 656 657 /* 658 * Eventually called by every exiting LWP 659 * 660 * p->p_token must be held. mplock may be held and will be released. 661 */ 662 void 663 lwp_exit(int masterexit, void *waddr) 664 { 665 struct thread *td = curthread; 666 struct lwp *lp = td->td_lwp; 667 struct proc *p = lp->lwp_proc; 668 int dowake = 0; 669 670 /* 671 * Release the current user process designation on the process so 672 * the userland scheduler can work in someone else. 673 */ 674 p->p_usched->release_curproc(lp); 675 676 /* 677 * Destroy the per-thread shared page and remove from any pmaps 678 * it resides in. 679 */ 680 lwp_userunmap(lp); 681 682 /* 683 * lwp_exit() may be called without setting LWP_MP_WEXIT, so 684 * make sure it is set here. 685 */ 686 ASSERT_LWKT_TOKEN_HELD(&p->p_token); 687 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT); 688 689 /* 690 * Clean up any virtualization 691 */ 692 if (lp->lwp_vkernel) 693 vkernel_lwp_exit(lp); 694 695 /* 696 * Clean up select/poll support 697 */ 698 kqueue_terminate(&lp->lwp_kqueue); 699 700 if (td->td_linux_task) 701 linux_task_drop_callback(td); 702 if (masterexit && p->p_linux_mm) 703 linux_proc_drop_callback(p); 704 705 /* 706 * Clean up any syscall-cached ucred or rlimit. 707 */ 708 if (td->td_ucred) { 709 crfree(td->td_ucred); 710 td->td_ucred = NULL; 711 } 712 if (td->td_limit) { 713 struct plimit *rlimit; 714 715 rlimit = td->td_limit; 716 td->td_limit = NULL; 717 plimit_free(rlimit); 718 } 719 720 /* 721 * Cleanup any cached descriptors for this thread 722 */ 723 if (p->p_fd) 724 fexitcache(td); 725 726 /* 727 * Nobody actually wakes us when the lock 728 * count reaches zero, so just wait one tick. 729 */ 730 while (lp->lwp_lock > 0) 731 tsleep(lp, 0, "lwpexit", 1); 732 733 /* Hand down resource usage to our proc */ 734 ruadd(&p->p_ru, &lp->lwp_ru); 735 736 /* 737 * If we don't hold the process until the LWP is reaped wait*() 738 * may try to dispose of its vmspace before all the LWPs have 739 * actually terminated. 740 */ 741 PHOLD(p); 742 743 /* 744 * Do any remaining work that might block on us. We should be 745 * coded such that further blocking is ok after decrementing 746 * p_nthreads but don't take the chance. 747 */ 748 dsched_exit_thread(td); 749 biosched_done(curthread); 750 751 /* 752 * We have to use the reaper for all the LWPs except the one doing 753 * the master exit. The LWP doing the master exit can just be 754 * left on p_lwps and the process reaper will deal with it 755 * synchronously, which is much faster. 756 * 757 * Wakeup anyone waiting on p_nthreads to drop to 1 or 0. 758 * 759 * The process is left held until the reaper calls lwp_dispose() on 760 * the lp (after calling lwp_wait()). 761 */ 762 if (masterexit == 0) { 763 int cpu = mycpuid; 764 765 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp); 766 --p->p_nthreads; 767 if ((p->p_flags & P_MAYBETHREADED) && p->p_nthreads <= 1) 768 dowake = 1; 769 lwkt_gettoken(&deadlwp_token[cpu]); 770 LIST_INSERT_HEAD(&deadlwp_list[cpu], lp, u.lwp_reap_entry); 771 taskqueue_enqueue(taskqueue_thread[cpu], deadlwp_task[cpu]); 772 lwkt_reltoken(&deadlwp_token[cpu]); 773 } else { 774 --p->p_nthreads; 775 if ((p->p_flags & P_MAYBETHREADED) && p->p_nthreads <= 1) 776 dowake = 1; 777 } 778 779 /* 780 * We no longer need p_token. 781 * 782 * Tell the userland scheduler that we are going away 783 */ 784 lwkt_reltoken(&p->p_token); 785 p->p_usched->heuristic_exiting(lp, p); 786 787 /* 788 * Issue late wakeups after releasing our token to give us a chance 789 * to deschedule and switch away before another cpu in a wait*() 790 * reaps us. This is done as late as possible to reduce contention. 791 */ 792 if (dowake) 793 wakeup(&p->p_nthreads); 794 if (waddr) 795 wakeup(waddr); 796 797 cpu_lwp_exit(); 798 } 799 800 /* 801 * Wait until a lwp is completely dead. The final interlock in this drama 802 * is when TDF_EXITING is set in cpu_thread_exit() just before the final 803 * switchout. 804 * 805 * At the point TDF_EXITING is set a complete exit is accomplished when 806 * TDF_RUNNING and TDF_PREEMPT_LOCK are both clear. td_mpflags has two 807 * post-switch interlock flags that can be used to wait for the TDF_ 808 * flags to clear. 809 * 810 * Returns non-zero on success, and zero if the caller needs to retry 811 * the lwp_wait(). 812 */ 813 static int 814 lwp_wait(struct lwp *lp) 815 { 816 struct thread *td = lp->lwp_thread; 817 u_int mpflags; 818 819 KKASSERT(lwkt_preempted_proc() != lp); 820 821 /* 822 * This bit of code uses the thread destruction interlock 823 * managed by lwkt_switch_return() to wait for the lwp's 824 * thread to completely disengage. 825 * 826 * It is possible for us to race another cpu core so we 827 * have to do this correctly. 828 */ 829 for (;;) { 830 mpflags = td->td_mpflags; 831 cpu_ccfence(); 832 if (mpflags & TDF_MP_EXITSIG) 833 break; 834 tsleep_interlock(td, 0); 835 if (atomic_cmpset_int(&td->td_mpflags, mpflags, 836 mpflags | TDF_MP_EXITWAIT)) { 837 tsleep(td, PINTERLOCKED, "lwpxt", 0); 838 } 839 } 840 841 /* 842 * We've already waited for the core exit but there can still 843 * be other refs from e.g. process scans and such. 844 */ 845 if (lp->lwp_lock > 0) { 846 tsleep(lp, 0, "lwpwait1", 1); 847 return(0); 848 } 849 if (td->td_refs) { 850 tsleep(td, 0, "lwpwait2", 1); 851 return(0); 852 } 853 854 /* 855 * Now that we have the thread destruction interlock these flags 856 * really should already be cleaned up, keep a check for safety. 857 * 858 * We can't rip its stack out from under it until TDF_EXITING is 859 * set and both TDF_RUNNING and TDF_PREEMPT_LOCK are clear. 860 * TDF_PREEMPT_LOCK must be checked because TDF_RUNNING 861 * will be cleared temporarily if a thread gets preempted. 862 */ 863 while ((td->td_flags & (TDF_RUNNING | 864 TDF_RUNQ | 865 TDF_PREEMPT_LOCK | 866 TDF_EXITING)) != TDF_EXITING) { 867 tsleep(lp, 0, "lwpwait3", 1); 868 return (0); 869 } 870 871 KASSERT((td->td_flags & (TDF_RUNQ|TDF_TSLEEPQ)) == 0, 872 ("lwp_wait: td %p (%s) still on run or sleep queue", 873 td, td->td_comm)); 874 return (1); 875 } 876 877 /* 878 * Release the resources associated with a lwp. 879 * The lwp must be completely dead. 880 */ 881 void 882 lwp_dispose(struct lwp *lp) 883 { 884 struct thread *td = lp->lwp_thread; 885 886 KKASSERT(lwkt_preempted_proc() != lp); 887 KKASSERT(lp->lwp_lock == 0); 888 KKASSERT(td->td_refs == 0); 889 KKASSERT((td->td_flags & (TDF_RUNNING | 890 TDF_RUNQ | 891 TDF_PREEMPT_LOCK | 892 TDF_EXITING)) == TDF_EXITING); 893 894 PRELE(lp->lwp_proc); 895 lp->lwp_proc = NULL; 896 if (td != NULL) { 897 td->td_proc = NULL; 898 td->td_lwp = NULL; 899 lp->lwp_thread = NULL; 900 lwkt_free_thread(td); 901 } 902 kfree(lp, M_LWP); 903 } 904 905 int 906 sys_wait4(struct sysmsg *sysmsg, const struct wait_args *uap) 907 { 908 struct __wrusage wrusage; 909 int error; 910 int status; 911 int options; 912 id_t id; 913 idtype_t idtype; 914 915 options = uap->options | WEXITED | WTRAPPED; 916 id = uap->pid; 917 918 if (id == WAIT_ANY) { 919 idtype = P_ALL; 920 } else if (id == WAIT_MYPGRP) { 921 idtype = P_PGID; 922 id = curproc->p_pgid; 923 } else if (id < 0) { 924 idtype = P_PGID; 925 id = -id; 926 } else { 927 idtype = P_PID; 928 } 929 930 error = kern_wait(idtype, id, &status, options, &wrusage, 931 NULL, &sysmsg->sysmsg_result); 932 933 if (error == 0 && uap->status) 934 error = copyout(&status, uap->status, sizeof(*uap->status)); 935 if (error == 0 && uap->rusage) { 936 ruadd(&wrusage.wru_self, &wrusage.wru_children); 937 error = copyout(&wrusage.wru_self, uap->rusage, sizeof(*uap->rusage)); 938 } 939 return (error); 940 } 941 942 int 943 sys_wait6(struct sysmsg *sysmsg, const struct wait6_args *uap) 944 { 945 struct __wrusage wrusage; 946 siginfo_t info; 947 siginfo_t *infop; 948 int error; 949 int status; 950 int options; 951 id_t id; 952 idtype_t idtype; 953 954 /* 955 * NOTE: wait6() requires WEXITED and WTRAPPED to be specified if 956 * desired. 957 */ 958 options = uap->options; 959 idtype = uap->idtype; 960 id = uap->id; 961 infop = uap->info ? &info : NULL; 962 963 switch(idtype) { 964 case P_PID: 965 case P_PGID: 966 if (id == WAIT_MYPGRP) { 967 idtype = P_PGID; 968 id = curproc->p_pgid; 969 } 970 break; 971 default: 972 /* let kern_wait deal with the remainder */ 973 break; 974 } 975 976 error = kern_wait(idtype, id, &status, options, 977 &wrusage, infop, &sysmsg->sysmsg_result); 978 979 if (error == 0 && uap->status) 980 error = copyout(&status, uap->status, sizeof(*uap->status)); 981 if (error == 0 && uap->wrusage) 982 error = copyout(&wrusage, uap->wrusage, sizeof(*uap->wrusage)); 983 if (error == 0 && uap->info) 984 error = copyout(&info, uap->info, sizeof(*uap->info)); 985 return (error); 986 } 987 988 /* 989 * kernel wait*() system call support 990 */ 991 int 992 kern_wait(idtype_t idtype, id_t id, int *status, int options, 993 struct __wrusage *wrusage, siginfo_t *info, int *res) 994 { 995 struct thread *td = curthread; 996 struct lwp *lp; 997 struct proc *q = td->td_proc; 998 struct proc *p, *t; 999 struct ucred *cr; 1000 struct pargs *pa; 1001 struct sigacts *ps; 1002 int nfound, error; 1003 long waitgen; 1004 1005 /* 1006 * Must not have extraneous options. Must have at least one 1007 * matchable option. 1008 */ 1009 if (options &~ (WUNTRACED|WNOHANG|WCONTINUED|WLINUXCLONE|WSTOPPED| 1010 WEXITED|WTRAPPED|WNOWAIT)) { 1011 return (EINVAL); 1012 } 1013 if ((options & (WEXITED | WUNTRACED | WCONTINUED | WTRAPPED)) == 0) { 1014 return (EINVAL); 1015 } 1016 1017 /* 1018 * Protect the q->p_children list 1019 */ 1020 lwkt_gettoken(&q->p_token); 1021 loop: 1022 /* 1023 * All sorts of things can change due to blocking so we have to loop 1024 * all the way back up here. 1025 * 1026 * The problem is that if a process group is stopped and the parent 1027 * is doing a wait*(..., WUNTRACED, ...), it will see the STOP 1028 * of the child and then stop itself when it tries to return from the 1029 * system call. When the process group is resumed the parent will 1030 * then get the STOP status even though the child has now resumed 1031 * (a followup wait*() will get the CONT status). 1032 * 1033 * Previously the CONT would overwrite the STOP because the tstop 1034 * was handled within tsleep(), and the parent would only see 1035 * the CONT when both are stopped and continued together. This little 1036 * two-line hack restores this effect. 1037 * 1038 * No locks are held so we can safely block the process here. 1039 */ 1040 if (STOPLWP(q, td->td_lwp)) 1041 tstop(); 1042 1043 nfound = 0; 1044 1045 /* 1046 * Loop on children. 1047 * 1048 * NOTE: We don't want to break q's p_token in the loop for the 1049 * case where no children are found or we risk breaking the 1050 * interlock between child and parent. 1051 */ 1052 waitgen = atomic_fetchadd_long(&q->p_waitgen, 0x80000000); 1053 LIST_FOREACH(p, &q->p_children, p_sibling) { 1054 /* 1055 * Skip children that another thread is already uninterruptably 1056 * reaping. 1057 */ 1058 if (PWAITRES_PENDING(p)) 1059 continue; 1060 1061 /* 1062 * Filter, (p) will be held on fall-through. Try to optimize 1063 * this to avoid the atomic op until we are pretty sure we 1064 * want this process. 1065 */ 1066 switch(idtype) { 1067 case P_ALL: 1068 PHOLD(p); 1069 break; 1070 case P_PID: 1071 if (p->p_pid != (pid_t)id) 1072 continue; 1073 PHOLD(p); 1074 break; 1075 case P_PGID: 1076 if (p->p_pgid != (pid_t)id) 1077 continue; 1078 PHOLD(p); 1079 break; 1080 case P_SID: 1081 PHOLD(p); 1082 if (p->p_session && p->p_session->s_sid != (pid_t)id) { 1083 PRELE(p); 1084 continue; 1085 } 1086 break; 1087 case P_UID: 1088 PHOLD(p); 1089 if (p->p_ucred->cr_uid != (uid_t)id) { 1090 PRELE(p); 1091 continue; 1092 } 1093 break; 1094 case P_GID: 1095 PHOLD(p); 1096 if (p->p_ucred->cr_gid != (gid_t)id) { 1097 PRELE(p); 1098 continue; 1099 } 1100 break; 1101 case P_JAILID: 1102 PHOLD(p); 1103 if (p->p_ucred->cr_prison && 1104 p->p_ucred->cr_prison->pr_id != (int)id) { 1105 PRELE(p); 1106 continue; 1107 } 1108 break; 1109 default: 1110 /* unsupported filter */ 1111 continue; 1112 } 1113 /* (p) is held at this point */ 1114 1115 /* 1116 * This special case handles a kthread spawned by linux_clone 1117 * (see linux_misc.c). The linux_wait4 and linux_waitpid 1118 * functions need to be able to distinguish between waiting 1119 * on a process and waiting on a thread. It is a thread if 1120 * p_sigparent is not SIGCHLD, and the WLINUXCLONE option 1121 * signifies we want to wait for threads and not processes. 1122 */ 1123 if ((p->p_sigparent != SIGCHLD) ^ 1124 ((options & WLINUXCLONE) != 0)) { 1125 PRELE(p); 1126 continue; 1127 } 1128 1129 nfound++; 1130 if (p->p_stat == SZOMB && (options & WEXITED)) { 1131 /* 1132 * We may go into SZOMB with threads still present. 1133 * We must wait for them to exit before we can reap 1134 * the master thread, otherwise we may race reaping 1135 * non-master threads. 1136 * 1137 * Only this routine can remove a process from 1138 * the zombie list and destroy it. 1139 * 1140 * This function will fail after sleeping if another 1141 * thread owns the zombie lock. This function will 1142 * fail immediately or after sleeping if another 1143 * thread owns or obtains ownership of the reap via 1144 * WAITRES. 1145 */ 1146 if (PHOLDZOMB(p)) { 1147 PRELE(p); 1148 goto loop; 1149 } 1150 lwkt_gettoken(&p->p_token); 1151 if (p->p_pptr != q) { 1152 lwkt_reltoken(&p->p_token); 1153 PRELE(p); 1154 PRELEZOMB(p); 1155 goto loop; 1156 } 1157 1158 /* 1159 * We are the reaper, from this point on the reap 1160 * cannot be aborted. 1161 */ 1162 PWAITRES_SET(p); 1163 while (p->p_nthreads > 0) { 1164 tsleep(&p->p_nthreads, 0, "lwpzomb", hz); 1165 } 1166 1167 /* 1168 * Reap any LWPs left in p->p_lwps. This is usually 1169 * just the last LWP. This must be done before 1170 * we loop on p_lock since the lwps hold a ref on 1171 * it as a vmspace interlock. 1172 * 1173 * Once that is accomplished p_nthreads had better 1174 * be zero. 1175 */ 1176 while ((lp = RB_ROOT(&p->p_lwp_tree)) != NULL) { 1177 /* 1178 * Make sure no one is using this lwp, before 1179 * it is removed from the tree. If we didn't 1180 * wait it here, lwp tree iteration with 1181 * blocking operation would be broken. 1182 */ 1183 while (lp->lwp_lock > 0) 1184 tsleep(lp, 0, "zomblwp", 1); 1185 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp); 1186 reaplwp(lp); 1187 } 1188 KKASSERT(p->p_nthreads == 0); 1189 1190 /* 1191 * Don't do anything really bad until all references 1192 * to the process go away. This may include other 1193 * LWPs which are still in the process of being 1194 * reaped. We can't just pull the rug out from under 1195 * them because they may still be using the VM space. 1196 * 1197 * Certain kernel facilities such as /proc will also 1198 * put a hold on the process for short periods of 1199 * time. 1200 */ 1201 PRELE(p); /* from top of loop */ 1202 PSTALL(p, "reap3", 1); /* 1 ref (for PZOMBHOLD) */ 1203 1204 /* Take care of our return values. */ 1205 *res = p->p_pid; 1206 1207 *status = p->p_xstat; 1208 wrusage->wru_self = p->p_ru; 1209 wrusage->wru_children = p->p_cru; 1210 1211 if (info) { 1212 bzero(info, sizeof(*info)); 1213 info->si_errno = 0; 1214 info->si_signo = SIGCHLD; 1215 if (WIFEXITED(p->p_xstat)) { 1216 info->si_code = CLD_EXITED; 1217 info->si_status = 1218 WEXITSTATUS(p->p_xstat); 1219 } else { 1220 info->si_code = CLD_KILLED; 1221 info->si_status = WTERMSIG(p->p_xstat); 1222 } 1223 info->si_pid = p->p_pid; 1224 info->si_uid = p->p_ucred->cr_uid; 1225 } 1226 1227 /* 1228 * WNOWAIT shortcuts to done here, leaving the 1229 * child on the zombie list. 1230 */ 1231 if (options & WNOWAIT) { 1232 lwkt_reltoken(&p->p_token); 1233 PRELEZOMB(p); 1234 error = 0; 1235 goto done; 1236 } 1237 1238 /* 1239 * If we got the child via a ptrace 'attach', 1240 * we need to give it back to the old parent. 1241 */ 1242 if (p->p_oppid && (t = pfind(p->p_oppid)) != NULL) { 1243 p->p_oppid = 0; 1244 proc_reparent(p, t); 1245 ksignal(t, SIGCHLD); 1246 wakeup((caddr_t)t); 1247 PRELE(t); 1248 lwkt_reltoken(&p->p_token); 1249 PRELEZOMB(p); 1250 error = 0; 1251 goto done; 1252 } 1253 1254 /* 1255 * Unlink the proc from its process group so that 1256 * the following operations won't lead to an 1257 * inconsistent state for processes running down 1258 * the zombie list. 1259 */ 1260 proc_remove_zombie(p); 1261 proc_userunmap(p); 1262 lwkt_reltoken(&p->p_token); 1263 leavepgrp(p); 1264 1265 p->p_xstat = 0; 1266 ruadd(&q->p_cru, &p->p_ru); 1267 ruadd(&q->p_cru, &p->p_cru); 1268 1269 /* 1270 * Decrement the count of procs running with this uid. 1271 */ 1272 chgproccnt(p->p_ucred->cr_ruidinfo, -1, 0); 1273 1274 /* 1275 * Free up credentials. p_spin is required to 1276 * avoid races against allproc scans. 1277 */ 1278 spin_lock(&p->p_spin); 1279 cr = p->p_ucred; 1280 p->p_ucred = NULL; 1281 spin_unlock(&p->p_spin); 1282 crfree(cr); 1283 1284 /* 1285 * Remove unused arguments 1286 */ 1287 pa = p->p_args; 1288 p->p_args = NULL; 1289 if (pa && refcount_release(&pa->ar_ref)) { 1290 kfree(pa, M_PARGS); 1291 pa = NULL; 1292 } 1293 1294 ps = p->p_sigacts; 1295 p->p_sigacts = NULL; 1296 if (ps && refcount_release(&ps->ps_refcnt)) { 1297 kfree(ps, M_SUBPROC); 1298 ps = NULL; 1299 } 1300 1301 /* 1302 * Our exitingcount was incremented when the process 1303 * became a zombie, now that the process has been 1304 * removed from (almost) all lists we should be able 1305 * to safely destroy its vmspace. Wait for any current 1306 * holders to go away (so the vmspace remains stable), 1307 * then scrap it. 1308 * 1309 * NOTE: Releasing the parent process (q) p_token 1310 * across the vmspace_exitfree() call is 1311 * important here to reduce stalls on 1312 * interactions with (q) (such as 1313 * fork/exec/wait or 'ps'). 1314 */ 1315 PSTALL(p, "reap4", 1); 1316 lwkt_reltoken(&q->p_token); 1317 vmspace_exitfree(p); 1318 lwkt_gettoken(&q->p_token); 1319 PSTALL(p, "reap5", 1); 1320 1321 /* 1322 * NOTE: We have to officially release ZOMB in order 1323 * to ensure that a racing thread in kern_wait() 1324 * which blocked on ZOMB is woken up. 1325 */ 1326 PRELEZOMB(p); 1327 kfree(p->p_uidpcpu, M_SUBPROC); 1328 kfree(p, M_PROC); 1329 atomic_add_int(&nprocs, -1); 1330 error = 0; 1331 goto done; 1332 } 1333 1334 /* 1335 * Process has not yet exited 1336 */ 1337 if ((p->p_stat == SSTOP || p->p_stat == SCORE) && 1338 (p->p_flags & P_WAITED) == 0 && 1339 (((p->p_flags & P_TRACED) && (options & WTRAPPED)) || 1340 (options & WSTOPPED))) { 1341 lwkt_gettoken(&p->p_token); 1342 if (p->p_pptr != q) { 1343 lwkt_reltoken(&p->p_token); 1344 PRELE(p); 1345 goto loop; 1346 } 1347 if ((p->p_stat != SSTOP && p->p_stat != SCORE) || 1348 (p->p_flags & P_WAITED) != 0 || 1349 ((p->p_flags & P_TRACED) == 0 && 1350 (options & WUNTRACED) == 0)) { 1351 lwkt_reltoken(&p->p_token); 1352 PRELE(p); 1353 goto loop; 1354 } 1355 1356 /* 1357 * Don't set P_WAITED if WNOWAIT specified, leaving 1358 * the process in a waitable state. 1359 */ 1360 if ((options & WNOWAIT) == 0) 1361 p->p_flags |= P_WAITED; 1362 1363 *res = p->p_pid; 1364 *status = W_STOPCODE(p->p_xstat); 1365 /* Zero rusage so we get something consistent. */ 1366 bzero(wrusage, sizeof(*wrusage)); 1367 error = 0; 1368 if (info) { 1369 bzero(info, sizeof(*info)); 1370 if (p->p_flags & P_TRACED) 1371 info->si_code = CLD_TRAPPED; 1372 else 1373 info->si_code = CLD_STOPPED; 1374 info->si_status = WSTOPSIG(p->p_xstat); 1375 } 1376 lwkt_reltoken(&p->p_token); 1377 PRELE(p); 1378 goto done; 1379 } 1380 if ((options & WCONTINUED) && (p->p_flags & P_CONTINUED)) { 1381 lwkt_gettoken(&p->p_token); 1382 if (p->p_pptr != q) { 1383 lwkt_reltoken(&p->p_token); 1384 PRELE(p); 1385 goto loop; 1386 } 1387 if ((p->p_flags & P_CONTINUED) == 0) { 1388 lwkt_reltoken(&p->p_token); 1389 PRELE(p); 1390 goto loop; 1391 } 1392 1393 *res = p->p_pid; 1394 1395 /* 1396 * Don't set P_WAITED if WNOWAIT specified, leaving 1397 * the process in a waitable state. 1398 */ 1399 if ((options & WNOWAIT) == 0) 1400 p->p_flags &= ~P_CONTINUED; 1401 1402 *status = SIGCONT; 1403 error = 0; 1404 if (info) { 1405 bzero(info, sizeof(*info)); 1406 info->si_code = CLD_CONTINUED; 1407 info->si_status = WSTOPSIG(p->p_xstat); 1408 } 1409 lwkt_reltoken(&p->p_token); 1410 PRELE(p); 1411 goto done; 1412 } 1413 PRELE(p); 1414 } 1415 if (nfound == 0) { 1416 error = ECHILD; 1417 goto done; 1418 } 1419 if (options & WNOHANG) { 1420 *res = 0; 1421 error = 0; 1422 goto done; 1423 } 1424 1425 /* 1426 * Wait for signal - interlocked using q->p_waitgen. 1427 */ 1428 error = 0; 1429 while ((waitgen & 0x7FFFFFFF) == (q->p_waitgen & 0x7FFFFFFF)) { 1430 tsleep_interlock(q, PCATCH); 1431 waitgen = atomic_fetchadd_long(&q->p_waitgen, 0x80000000); 1432 if ((waitgen & 0x7FFFFFFF) == (q->p_waitgen & 0x7FFFFFFF)) { 1433 error = tsleep(q, PCATCH | PINTERLOCKED, "wait", 0); 1434 break; 1435 } 1436 } 1437 if (error) { 1438 done: 1439 lwkt_reltoken(&q->p_token); 1440 return (error); 1441 } 1442 goto loop; 1443 } 1444 1445 /* 1446 * Change child's parent process to parent. 1447 * 1448 * p_children/p_sibling requires the parent's token, and 1449 * changing pptr requires the child's token, so we have to 1450 * get three tokens to do this operation. We also need to 1451 * hold pointers that might get ripped out from under us to 1452 * preserve structural integrity. 1453 * 1454 * It is possible to race another reparent or disconnect or other 1455 * similar operation. We must retry when this situation occurs. 1456 * Once we successfully reparent the process we no longer care 1457 * about any races. 1458 */ 1459 void 1460 proc_reparent(struct proc *child, struct proc *parent) 1461 { 1462 struct proc *opp; 1463 1464 PHOLD(parent); 1465 while ((opp = child->p_pptr) != parent) { 1466 PHOLD(opp); 1467 lwkt_gettoken(&opp->p_token); 1468 lwkt_gettoken(&child->p_token); 1469 lwkt_gettoken(&parent->p_token); 1470 if (child->p_pptr != opp) { 1471 lwkt_reltoken(&parent->p_token); 1472 lwkt_reltoken(&child->p_token); 1473 lwkt_reltoken(&opp->p_token); 1474 PRELE(opp); 1475 continue; 1476 } 1477 LIST_REMOVE(child, p_sibling); 1478 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling); 1479 child->p_pptr = parent; 1480 child->p_ppid = parent->p_pid; 1481 lwkt_reltoken(&parent->p_token); 1482 lwkt_reltoken(&child->p_token); 1483 lwkt_reltoken(&opp->p_token); 1484 if (LIST_EMPTY(&opp->p_children)) 1485 wakeup(opp); 1486 PRELE(opp); 1487 break; 1488 } 1489 PRELE(parent); 1490 } 1491 1492 /* 1493 * The next two functions are to handle adding/deleting items on the 1494 * exit callout list 1495 * 1496 * at_exit(): 1497 * Take the arguments given and put them onto the exit callout list, 1498 * However first make sure that it's not already there. 1499 * returns 0 on success. 1500 */ 1501 1502 int 1503 at_exit(exitlist_fn function) 1504 { 1505 struct exitlist *ep; 1506 1507 #ifdef INVARIANTS 1508 /* Be noisy if the programmer has lost track of things */ 1509 if (rm_at_exit(function)) 1510 kprintf("WARNING: exit callout entry (%p) already present\n", 1511 function); 1512 #endif 1513 ep = kmalloc(sizeof(*ep), M_ATEXIT, M_NOWAIT); 1514 if (ep == NULL) 1515 return (ENOMEM); 1516 ep->function = function; 1517 TAILQ_INSERT_TAIL(&exit_list, ep, next); 1518 return (0); 1519 } 1520 1521 /* 1522 * Scan the exit callout list for the given item and remove it. 1523 * Returns the number of items removed (0 or 1) 1524 */ 1525 int 1526 rm_at_exit(exitlist_fn function) 1527 { 1528 struct exitlist *ep; 1529 1530 TAILQ_FOREACH(ep, &exit_list, next) { 1531 if (ep->function == function) { 1532 TAILQ_REMOVE(&exit_list, ep, next); 1533 kfree(ep, M_ATEXIT); 1534 return(1); 1535 } 1536 } 1537 return (0); 1538 } 1539 1540 /* 1541 * LWP reaper related code. 1542 */ 1543 static void 1544 reaplwps(void *context, int dummy) 1545 { 1546 struct lwplist *lwplist = context; 1547 struct lwp *lp; 1548 int cpu = mycpuid; 1549 1550 lwkt_gettoken(&deadlwp_token[cpu]); 1551 while ((lp = LIST_FIRST(lwplist))) { 1552 LIST_REMOVE(lp, u.lwp_reap_entry); 1553 reaplwp(lp); 1554 } 1555 lwkt_reltoken(&deadlwp_token[cpu]); 1556 } 1557 1558 static void 1559 reaplwp(struct lwp *lp) 1560 { 1561 while (lwp_wait(lp) == 0) 1562 ; 1563 lwp_dispose(lp); 1564 } 1565 1566 static void 1567 deadlwp_init(void) 1568 { 1569 int cpu; 1570 1571 for (cpu = 0; cpu < ncpus; cpu++) { 1572 lwkt_token_init(&deadlwp_token[cpu], "deadlwpl"); 1573 LIST_INIT(&deadlwp_list[cpu]); 1574 deadlwp_task[cpu] = kmalloc(sizeof(*deadlwp_task[cpu]), 1575 M_DEVBUF, M_WAITOK); 1576 TASK_INIT(deadlwp_task[cpu], 0, reaplwps, &deadlwp_list[cpu]); 1577 } 1578 } 1579 1580 SYSINIT(deadlwpinit, SI_SUB_CONFIGURE, SI_ORDER_ANY, deadlwp_init, NULL); 1581