xref: /dragonfly/sys/kern/kern_exit.c (revision d37f73b6)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)kern_exit.c	8.7 (Berkeley) 2/12/94
35  * $FreeBSD: src/sys/kern/kern_exit.c,v 1.92.2.11 2003/01/13 22:51:16 dillon Exp $
36  */
37 
38 #include "opt_compat.h"
39 #include "opt_ktrace.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sysproto.h>
44 #include <sys/kernel.h>
45 #include <sys/malloc.h>
46 #include <sys/proc.h>
47 #include <sys/ktrace.h>
48 #include <sys/pioctl.h>
49 #include <sys/tty.h>
50 #include <sys/wait.h>
51 #include <sys/vnode.h>
52 #include <sys/resourcevar.h>
53 #include <sys/signalvar.h>
54 #include <sys/taskqueue.h>
55 #include <sys/ptrace.h>
56 #include <sys/acct.h>		/* for acct_process() function prototype */
57 #include <sys/filedesc.h>
58 #include <sys/shm.h>
59 #include <sys/sem.h>
60 #include <sys/jail.h>
61 #include <sys/kern_syscall.h>
62 #include <sys/unistd.h>
63 #include <sys/eventhandler.h>
64 #include <sys/dsched.h>
65 
66 #include <vm/vm.h>
67 #include <vm/vm_param.h>
68 #include <sys/lock.h>
69 #include <vm/pmap.h>
70 #include <vm/vm_map.h>
71 #include <vm/vm_extern.h>
72 #include <sys/user.h>
73 
74 #include <sys/refcount.h>
75 #include <sys/thread2.h>
76 #include <sys/sysref2.h>
77 #include <sys/mplock2.h>
78 
79 #include <machine/vmm.h>
80 
81 static void reaplwps(void *context, int dummy);
82 static void reaplwp(struct lwp *lp);
83 static void killlwps(struct lwp *lp);
84 
85 static MALLOC_DEFINE(M_ATEXIT, "atexit", "atexit callback");
86 
87 /*
88  * callout list for things to do at exit time
89  */
90 struct exitlist {
91 	exitlist_fn function;
92 	TAILQ_ENTRY(exitlist) next;
93 };
94 
95 TAILQ_HEAD(exit_list_head, exitlist);
96 static struct exit_list_head exit_list = TAILQ_HEAD_INITIALIZER(exit_list);
97 
98 /*
99  * LWP reaper data
100  */
101 static struct task *deadlwp_task[MAXCPU];
102 static struct lwplist deadlwp_list[MAXCPU];
103 static struct lwkt_token deadlwp_token[MAXCPU];
104 
105 /*
106  * exit --
107  *	Death of process.
108  *
109  * SYS_EXIT_ARGS(int rval)
110  */
111 int
112 sys_exit(struct exit_args *uap)
113 {
114 	exit1(W_EXITCODE(uap->rval, 0));
115 	/* NOTREACHED */
116 }
117 
118 /*
119  * Extended exit --
120  *	Death of a lwp or process with optional bells and whistles.
121  */
122 int
123 sys_extexit(struct extexit_args *uap)
124 {
125 	struct proc *p = curproc;
126 	int action, who;
127 	int error;
128 
129 	action = EXTEXIT_ACTION(uap->how);
130 	who = EXTEXIT_WHO(uap->how);
131 
132 	/* Check parameters before we might perform some action */
133 	switch (who) {
134 	case EXTEXIT_PROC:
135 	case EXTEXIT_LWP:
136 		break;
137 	default:
138 		return (EINVAL);
139 	}
140 
141 	switch (action) {
142 	case EXTEXIT_SIMPLE:
143 		break;
144 	case EXTEXIT_SETINT:
145 		error = copyout(&uap->status, uap->addr, sizeof(uap->status));
146 		if (error)
147 			return (error);
148 		break;
149 	default:
150 		return (EINVAL);
151 	}
152 
153 	lwkt_gettoken(&p->p_token);
154 
155 	switch (who) {
156 	case EXTEXIT_LWP:
157 		/*
158 		 * Be sure only to perform a simple lwp exit if there is at
159 		 * least one more lwp in the proc, which will call exit1()
160 		 * later, otherwise the proc will be an UNDEAD and not even a
161 		 * SZOMB!
162 		 */
163 		if (p->p_nthreads > 1) {
164 			lwp_exit(0, NULL);	/* called w/ p_token held */
165 			/* NOT REACHED */
166 		}
167 		/* else last lwp in proc:  do the real thing */
168 		/* FALLTHROUGH */
169 	default:	/* to help gcc */
170 	case EXTEXIT_PROC:
171 		lwkt_reltoken(&p->p_token);
172 		exit1(W_EXITCODE(uap->status, 0));
173 		/* NOTREACHED */
174 	}
175 
176 	/* NOTREACHED */
177 	lwkt_reltoken(&p->p_token);	/* safety */
178 }
179 
180 /*
181  * Kill all lwps associated with the current process except the
182  * current lwp.   Return an error if we race another thread trying to
183  * do the same thing and lose the race.
184  *
185  * If forexec is non-zero the current thread and process flags are
186  * cleaned up so they can be reused.
187  *
188  * Caller must hold curproc->p_token
189  */
190 int
191 killalllwps(int forexec)
192 {
193 	struct lwp *lp = curthread->td_lwp;
194 	struct proc *p = lp->lwp_proc;
195 
196 	/*
197 	 * Interlock against P_WEXIT.  Only one of the process's thread
198 	 * is allowed to do the master exit.
199 	 */
200 	if (p->p_flags & P_WEXIT)
201 		return (EALREADY);
202 	p->p_flags |= P_WEXIT;
203 
204 	/*
205 	 * Interlock with LWP_MP_WEXIT and kill any remaining LWPs
206 	 */
207 	atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT);
208 	if (p->p_nthreads > 1)
209 		killlwps(lp);
210 
211 	/*
212 	 * If doing this for an exec, clean up the remaining thread
213 	 * (us) for continuing operation after all the other threads
214 	 * have been killed.
215 	 */
216 	if (forexec) {
217 		atomic_clear_int(&lp->lwp_mpflags, LWP_MP_WEXIT);
218 		p->p_flags &= ~P_WEXIT;
219 	}
220 	return(0);
221 }
222 
223 /*
224  * Kill all LWPs except the current one.  Do not try to signal
225  * LWPs which have exited on their own or have already been
226  * signaled.
227  */
228 static void
229 killlwps(struct lwp *lp)
230 {
231 	struct proc *p = lp->lwp_proc;
232 	struct lwp *tlp;
233 
234 	/*
235 	 * Kill the remaining LWPs.  We must send the signal before setting
236 	 * LWP_MP_WEXIT.  The setting of WEXIT is optional but helps reduce
237 	 * races.  tlp must be held across the call as it might block and
238 	 * allow the target lwp to rip itself out from under our loop.
239 	 */
240 	FOREACH_LWP_IN_PROC(tlp, p) {
241 		LWPHOLD(tlp);
242 		lwkt_gettoken(&tlp->lwp_token);
243 		if ((tlp->lwp_mpflags & LWP_MP_WEXIT) == 0) {
244 			lwpsignal(p, tlp, SIGKILL);
245 			atomic_set_int(&tlp->lwp_mpflags, LWP_MP_WEXIT);
246 		}
247 		lwkt_reltoken(&tlp->lwp_token);
248 		LWPRELE(tlp);
249 	}
250 
251 	/*
252 	 * Wait for everything to clear out.
253 	 */
254 	while (p->p_nthreads > 1)
255 		tsleep(&p->p_nthreads, 0, "killlwps", 0);
256 }
257 
258 /*
259  * Exit: deallocate address space and other resources, change proc state
260  * to zombie, and unlink proc from allproc and parent's lists.  Save exit
261  * status and rusage for wait().  Check for child processes and orphan them.
262  */
263 void
264 exit1(int rv)
265 {
266 	struct thread *td = curthread;
267 	struct proc *p = td->td_proc;
268 	struct lwp *lp = td->td_lwp;
269 	struct proc *q;
270 	struct proc *pp;
271 	struct proc *reproc;
272 	struct sysreaper *reap;
273 	struct vmspace *vm;
274 	struct vnode *vtmp;
275 	struct exitlist *ep;
276 	int error;
277 
278 	lwkt_gettoken(&p->p_token);
279 
280 	if (p->p_pid == 1) {
281 		kprintf("init died (signal %d, exit %d)\n",
282 		    WTERMSIG(rv), WEXITSTATUS(rv));
283 		panic("Going nowhere without my init!");
284 	}
285 	varsymset_clean(&p->p_varsymset);
286 	lockuninit(&p->p_varsymset.vx_lock);
287 
288 	/*
289 	 * Kill all lwps associated with the current process, return an
290 	 * error if we race another thread trying to do the same thing
291 	 * and lose the race.
292 	 */
293 	error = killalllwps(0);
294 	if (error) {
295 		lwp_exit(0, NULL);
296 		/* NOT REACHED */
297 	}
298 
299 	/* are we a task leader? */
300 	if (p == p->p_leader) {
301         	struct kill_args killArgs;
302 		killArgs.signum = SIGKILL;
303 		q = p->p_peers;
304 		while(q) {
305 			killArgs.pid = q->p_pid;
306 			/*
307 		         * The interface for kill is better
308 			 * than the internal signal
309 			 */
310 			sys_kill(&killArgs);
311 			q = q->p_peers;
312 		}
313 		while (p->p_peers)
314 			tsleep((caddr_t)p, 0, "exit1", 0);
315 	}
316 
317 #ifdef PGINPROF
318 	vmsizmon();
319 #endif
320 	STOPEVENT(p, S_EXIT, rv);
321 	p->p_flags |= P_POSTEXIT;	/* stop procfs stepping */
322 
323 	/*
324 	 * Check if any loadable modules need anything done at process exit.
325 	 * e.g. SYSV IPC stuff
326 	 * XXX what if one of these generates an error?
327 	 */
328 	p->p_xstat = rv;
329 	EVENTHANDLER_INVOKE(process_exit, p);
330 
331 	/*
332 	 * XXX: imho, the eventhandler stuff is much cleaner than this.
333 	 *	Maybe we should move everything to use eventhandler.
334 	 */
335 	TAILQ_FOREACH(ep, &exit_list, next)
336 		(*ep->function)(td);
337 
338 	if (p->p_flags & P_PROFIL)
339 		stopprofclock(p);
340 
341 	SIGEMPTYSET(p->p_siglist);
342 	SIGEMPTYSET(lp->lwp_siglist);
343 	if (timevalisset(&p->p_realtimer.it_value))
344 		callout_stop_sync(&p->p_ithandle);
345 
346 	/*
347 	 * Reset any sigio structures pointing to us as a result of
348 	 * F_SETOWN with our pid.
349 	 */
350 	funsetownlst(&p->p_sigiolst);
351 
352 	/*
353 	 * Close open files and release open-file table.
354 	 * This may block!
355 	 */
356 	fdfree(p, NULL);
357 
358 	if (p->p_leader->p_peers) {
359 		q = p->p_leader;
360 		while(q->p_peers != p)
361 			q = q->p_peers;
362 		q->p_peers = p->p_peers;
363 		wakeup((caddr_t)p->p_leader);
364 	}
365 
366 	/*
367 	 * XXX Shutdown SYSV semaphores
368 	 */
369 	semexit(p);
370 
371 	KKASSERT(p->p_numposixlocks == 0);
372 
373 	/* The next two chunks should probably be moved to vmspace_exit. */
374 	vm = p->p_vmspace;
375 
376 	/*
377 	 * Clean up data related to virtual kernel operation.  Clean up
378 	 * any vkernel context related to the current lwp now so we can
379 	 * destroy p_vkernel.
380 	 */
381 	if (p->p_vkernel) {
382 		vkernel_lwp_exit(lp);
383 		vkernel_exit(p);
384 	}
385 
386 	/*
387 	 * Release the user portion of address space.  The exitbump prevents
388 	 * the vmspace from being completely eradicated (using holdcnt).
389 	 * This releases references to vnodes, which could cause I/O if the
390 	 * file has been unlinked.  We need to do this early enough that
391 	 * we can still sleep.
392 	 *
393 	 * We can't free the entire vmspace as the kernel stack may be mapped
394 	 * within that space also.
395 	 *
396 	 * Processes sharing the same vmspace may exit in one order, and
397 	 * get cleaned up by vmspace_exit() in a different order.  The
398 	 * last exiting process to reach this point releases as much of
399 	 * the environment as it can, and the last process cleaned up
400 	 * by vmspace_exit() (which decrements exitingcnt) cleans up the
401 	 * remainder.
402 	 */
403 	vmspace_relexit(vm);
404 
405 	if (SESS_LEADER(p)) {
406 		struct session *sp = p->p_session;
407 
408 		if (sp->s_ttyvp) {
409 			/*
410 			 * We are the controlling process.  Signal the
411 			 * foreground process group, drain the controlling
412 			 * terminal, and revoke access to the controlling
413 			 * terminal.
414 			 *
415 			 * NOTE: while waiting for the process group to exit
416 			 * it is possible that one of the processes in the
417 			 * group will revoke the tty, so the ttyclosesession()
418 			 * function will re-check sp->s_ttyvp.
419 			 */
420 			if (sp->s_ttyp && (sp->s_ttyp->t_session == sp)) {
421 				if (sp->s_ttyp->t_pgrp)
422 					pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1);
423 				ttywait(sp->s_ttyp);
424 				ttyclosesession(sp, 1); /* also revoke */
425 			}
426 			/*
427 			 * Release the tty.  If someone has it open via
428 			 * /dev/tty then close it (since they no longer can
429 			 * once we've NULL'd it out).
430 			 */
431 			ttyclosesession(sp, 0);
432 
433 			/*
434 			 * s_ttyp is not zero'd; we use this to indicate
435 			 * that the session once had a controlling terminal.
436 			 * (for logging and informational purposes)
437 			 */
438 		}
439 		sp->s_leader = NULL;
440 	}
441 	fixjobc(p, p->p_pgrp, 0);
442 	(void)acct_process(p);
443 #ifdef KTRACE
444 	/*
445 	 * release trace file
446 	 */
447 	if (p->p_tracenode)
448 		ktrdestroy(&p->p_tracenode);
449 	p->p_traceflag = 0;
450 #endif
451 	/*
452 	 * Release reference to text vnode
453 	 */
454 	if ((vtmp = p->p_textvp) != NULL) {
455 		p->p_textvp = NULL;
456 		vrele(vtmp);
457 	}
458 
459 	/* Release namecache handle to text file */
460 	if (p->p_textnch.ncp)
461 		cache_drop(&p->p_textnch);
462 
463 	/*
464 	 * We have to handle PPWAIT here or proc_move_allproc_zombie()
465 	 * will block on the PHOLD() the parent is doing.
466 	 *
467 	 * We are using the flag as an interlock so an atomic op is
468 	 * necessary to synchronize with the parent's cpu.
469 	 */
470 	if (p->p_flags & P_PPWAIT) {
471 		if (p->p_pptr && p->p_pptr->p_upmap)
472 			p->p_pptr->p_upmap->invfork = 0;
473 		atomic_clear_int(&p->p_flags, P_PPWAIT);
474 		wakeup(p->p_pptr);
475 	}
476 
477 	/*
478 	 * Move the process to the zombie list.  This will block
479 	 * until the process p_lock count reaches 0.  The process will
480 	 * not be reaped until TDF_EXITING is set by cpu_thread_exit(),
481 	 * which is called from cpu_proc_exit().
482 	 *
483 	 * Interlock against waiters using p_waitgen.  We increment
484 	 * p_waitgen after completing the move of our process to the
485 	 * zombie list.
486 	 *
487 	 * WARNING: pp becomes stale when we block, clear it now as a
488 	 *	    reminder.
489 	 */
490 	proc_move_allproc_zombie(p);
491 	pp = p->p_pptr;
492 	atomic_add_long(&pp->p_waitgen, 1);
493 	pp = NULL;
494 
495 	/*
496 	 * release controlled reaper for exit if we own it and return the
497 	 * remaining reaper (the one for us), which we will drop after we
498 	 * are done.
499 	 */
500 	reap = reaper_exit(p);
501 
502 	/*
503 	 * Reparent all of this process's children to the init process or
504 	 * to the designated reaper.  We must hold the reaper's p_token in
505 	 * order to safely mess with p_children.
506 	 *
507 	 * We already hold p->p_token (to remove the children from our list).
508 	 */
509 	reproc = NULL;
510 	q = LIST_FIRST(&p->p_children);
511 	if (q) {
512 		reproc = reaper_get(reap);
513 		lwkt_gettoken(&reproc->p_token);
514 		while ((q = LIST_FIRST(&p->p_children)) != NULL) {
515 			PHOLD(q);
516 			lwkt_gettoken(&q->p_token);
517 			if (q != LIST_FIRST(&p->p_children)) {
518 				lwkt_reltoken(&q->p_token);
519 				PRELE(q);
520 				continue;
521 			}
522 			LIST_REMOVE(q, p_sibling);
523 			LIST_INSERT_HEAD(&reproc->p_children, q, p_sibling);
524 			q->p_pptr = reproc;
525 			q->p_sigparent = SIGCHLD;
526 
527 			/*
528 			 * Traced processes are killed
529 			 * since their existence means someone is screwing up.
530 			 */
531 			if (q->p_flags & P_TRACED) {
532 				q->p_flags &= ~P_TRACED;
533 				ksignal(q, SIGKILL);
534 			}
535 			lwkt_reltoken(&q->p_token);
536 			PRELE(q);
537 		}
538 		lwkt_reltoken(&reproc->p_token);
539 		wakeup(reproc);
540 	}
541 
542 	/*
543 	 * Save exit status and final rusage info, adding in child rusage
544 	 * info and self times.
545 	 */
546 	calcru_proc(p, &p->p_ru);
547 	ruadd(&p->p_ru, &p->p_cru);
548 
549 	/*
550 	 * notify interested parties of our demise.
551 	 */
552 	KNOTE(&p->p_klist, NOTE_EXIT);
553 
554 	/*
555 	 * Notify parent that we're gone.  If parent has the PS_NOCLDWAIT
556 	 * flag set, or if the handler is set to SIG_IGN, notify the reaper
557 	 * instead (it will handle this situation).
558 	 *
559 	 * NOTE: The reaper can still be the parent process.
560 	 *
561 	 * (must reload pp)
562 	 */
563 	if (p->p_pptr->p_sigacts->ps_flag & (PS_NOCLDWAIT | PS_CLDSIGIGN)) {
564 		if (reproc == NULL)
565 			reproc = reaper_get(reap);
566 		proc_reparent(p, reproc);
567 	}
568 	if (reproc)
569 		PRELE(reproc);
570 	if (reap)
571 		reaper_drop(reap);
572 
573 	/*
574 	 * Signal (possibly new) parent.
575 	 */
576 	pp = p->p_pptr;
577 	PHOLD(pp);
578 	if (p->p_sigparent && pp != initproc) {
579 		int sig = p->p_sigparent;
580 
581 		if (sig != SIGUSR1 && sig != SIGCHLD)
582 			sig = SIGCHLD;
583 	        ksignal(pp, sig);
584 	} else {
585 	        ksignal(pp, SIGCHLD);
586 	}
587 	p->p_flags &= ~P_TRACED;
588 	PRELE(pp);
589 
590 	/*
591 	 * cpu_exit is responsible for clearing curproc, since
592 	 * it is heavily integrated with the thread/switching sequence.
593 	 *
594 	 * Other substructures are freed from wait().
595 	 */
596 	plimit_free(p);
597 
598 	/*
599 	 * Finally, call machine-dependent code to release as many of the
600 	 * lwp's resources as we can and halt execution of this thread.
601 	 *
602 	 * pp is a wild pointer now but still the correct wakeup() target.
603 	 * lwp_exit() only uses it to send the wakeup() signal to the likely
604 	 * parent.  Any reparenting race that occurs will get a signal
605 	 * automatically and not be an issue.
606 	 */
607 	lwp_exit(1, pp);
608 }
609 
610 /*
611  * Eventually called by every exiting LWP
612  *
613  * p->p_token must be held.  mplock may be held and will be released.
614  */
615 void
616 lwp_exit(int masterexit, void *waddr)
617 {
618 	struct thread *td = curthread;
619 	struct lwp *lp = td->td_lwp;
620 	struct proc *p = lp->lwp_proc;
621 	int dowake = 0;
622 
623 	/*
624 	 * Release the current user process designation on the process so
625 	 * the userland scheduler can work in someone else.
626 	 */
627 	p->p_usched->release_curproc(lp);
628 
629 	/*
630 	 * lwp_exit() may be called without setting LWP_MP_WEXIT, so
631 	 * make sure it is set here.
632 	 */
633 	ASSERT_LWKT_TOKEN_HELD(&p->p_token);
634 	atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT);
635 
636 	/*
637 	 * Clean up any virtualization
638 	 */
639 	if (lp->lwp_vkernel)
640 		vkernel_lwp_exit(lp);
641 
642 	if (td->td_vmm)
643 		vmm_vmdestroy();
644 
645 	/*
646 	 * Clean up select/poll support
647 	 */
648 	kqueue_terminate(&lp->lwp_kqueue);
649 
650 	/*
651 	 * Clean up any syscall-cached ucred
652 	 */
653 	if (td->td_ucred) {
654 		crfree(td->td_ucred);
655 		td->td_ucred = NULL;
656 	}
657 
658 	/*
659 	 * Nobody actually wakes us when the lock
660 	 * count reaches zero, so just wait one tick.
661 	 */
662 	while (lp->lwp_lock > 0)
663 		tsleep(lp, 0, "lwpexit", 1);
664 
665 	/* Hand down resource usage to our proc */
666 	ruadd(&p->p_ru, &lp->lwp_ru);
667 
668 	/*
669 	 * If we don't hold the process until the LWP is reaped wait*()
670 	 * may try to dispose of its vmspace before all the LWPs have
671 	 * actually terminated.
672 	 */
673 	PHOLD(p);
674 
675 	/*
676 	 * Do any remaining work that might block on us.  We should be
677 	 * coded such that further blocking is ok after decrementing
678 	 * p_nthreads but don't take the chance.
679 	 */
680 	dsched_exit_thread(td);
681 	biosched_done(curthread);
682 
683 	/*
684 	 * We have to use the reaper for all the LWPs except the one doing
685 	 * the master exit.  The LWP doing the master exit can just be
686 	 * left on p_lwps and the process reaper will deal with it
687 	 * synchronously, which is much faster.
688 	 *
689 	 * Wakeup anyone waiting on p_nthreads to drop to 1 or 0.
690 	 *
691 	 * The process is left held until the reaper calls lwp_dispose() on
692 	 * the lp (after calling lwp_wait()).
693 	 */
694 	if (masterexit == 0) {
695 		int cpu = mycpuid;
696 
697 		lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp);
698 		--p->p_nthreads;
699 		if ((p->p_flags & P_MAYBETHREADED) && p->p_nthreads <= 1)
700 			dowake = 1;
701 		lwkt_gettoken(&deadlwp_token[cpu]);
702 		LIST_INSERT_HEAD(&deadlwp_list[cpu], lp, u.lwp_reap_entry);
703 		taskqueue_enqueue(taskqueue_thread[cpu], deadlwp_task[cpu]);
704 		lwkt_reltoken(&deadlwp_token[cpu]);
705 	} else {
706 		--p->p_nthreads;
707 		if ((p->p_flags & P_MAYBETHREADED) && p->p_nthreads <= 1)
708 			dowake = 1;
709 	}
710 
711 	/*
712 	 * We no longer need p_token.
713 	 *
714 	 * Tell the userland scheduler that we are going away
715 	 */
716 	lwkt_reltoken(&p->p_token);
717 	p->p_usched->heuristic_exiting(lp, p);
718 
719 	/*
720 	 * Issue late wakeups after releasing our token to give us a chance
721 	 * to deschedule and switch away before another cpu in a wait*()
722 	 * reaps us.  This is done as late as possible to reduce contention.
723 	 */
724 	if (dowake)
725 		wakeup(&p->p_nthreads);
726 	if (waddr)
727 		wakeup(waddr);
728 
729 	cpu_lwp_exit();
730 }
731 
732 /*
733  * Wait until a lwp is completely dead.  The final interlock in this drama
734  * is when TDF_EXITING is set in cpu_thread_exit() just before the final
735  * switchout.
736  *
737  * At the point TDF_EXITING is set a complete exit is accomplished when
738  * TDF_RUNNING and TDF_PREEMPT_LOCK are both clear.  td_mpflags has two
739  * post-switch interlock flags that can be used to wait for the TDF_
740  * flags to clear.
741  *
742  * Returns non-zero on success, and zero if the caller needs to retry
743  * the lwp_wait().
744  */
745 static int
746 lwp_wait(struct lwp *lp)
747 {
748 	struct thread *td = lp->lwp_thread;
749 	u_int mpflags;
750 
751 	KKASSERT(lwkt_preempted_proc() != lp);
752 
753 	/*
754 	 * This bit of code uses the thread destruction interlock
755 	 * managed by lwkt_switch_return() to wait for the lwp's
756 	 * thread to completely disengage.
757 	 *
758 	 * It is possible for us to race another cpu core so we
759 	 * have to do this correctly.
760 	 */
761 	for (;;) {
762 		mpflags = td->td_mpflags;
763 		cpu_ccfence();
764 		if (mpflags & TDF_MP_EXITSIG)
765 			break;
766 		tsleep_interlock(td, 0);
767 		if (atomic_cmpset_int(&td->td_mpflags, mpflags,
768 				      mpflags | TDF_MP_EXITWAIT)) {
769 			tsleep(td, PINTERLOCKED, "lwpxt", 0);
770 		}
771 	}
772 
773 	/*
774 	 * We've already waited for the core exit but there can still
775 	 * be other refs from e.g. process scans and such.
776 	 */
777 	if (lp->lwp_lock > 0) {
778 		tsleep(lp, 0, "lwpwait1", 1);
779 		return(0);
780 	}
781 	if (td->td_refs) {
782 		tsleep(td, 0, "lwpwait2", 1);
783 		return(0);
784 	}
785 
786 	/*
787 	 * Now that we have the thread destruction interlock these flags
788 	 * really should already be cleaned up, keep a check for safety.
789 	 *
790 	 * We can't rip its stack out from under it until TDF_EXITING is
791 	 * set and both TDF_RUNNING and TDF_PREEMPT_LOCK are clear.
792 	 * TDF_PREEMPT_LOCK must be checked because TDF_RUNNING
793 	 * will be cleared temporarily if a thread gets preempted.
794 	 */
795 	while ((td->td_flags & (TDF_RUNNING |
796 				TDF_RUNQ |
797 			        TDF_PREEMPT_LOCK |
798 			        TDF_EXITING)) != TDF_EXITING) {
799 		tsleep(lp, 0, "lwpwait3", 1);
800 		return (0);
801 	}
802 
803 	KASSERT((td->td_flags & (TDF_RUNQ|TDF_TSLEEPQ)) == 0,
804 		("lwp_wait: td %p (%s) still on run or sleep queue",
805 		td, td->td_comm));
806 	return (1);
807 }
808 
809 /*
810  * Release the resources associated with a lwp.
811  * The lwp must be completely dead.
812  */
813 void
814 lwp_dispose(struct lwp *lp)
815 {
816 	struct thread *td = lp->lwp_thread;
817 
818 	KKASSERT(lwkt_preempted_proc() != lp);
819 	KKASSERT(td->td_refs == 0);
820 	KKASSERT((td->td_flags & (TDF_RUNNING |
821 				  TDF_RUNQ |
822 				  TDF_PREEMPT_LOCK |
823 				  TDF_EXITING)) == TDF_EXITING);
824 
825 	PRELE(lp->lwp_proc);
826 	lp->lwp_proc = NULL;
827 	if (td != NULL) {
828 		td->td_proc = NULL;
829 		td->td_lwp = NULL;
830 		lp->lwp_thread = NULL;
831 		lwkt_free_thread(td);
832 	}
833 	kfree(lp, M_LWP);
834 }
835 
836 int
837 sys_wait4(struct wait_args *uap)
838 {
839 	struct rusage rusage;
840 	int error, status;
841 
842 	error = kern_wait(uap->pid, (uap->status ? &status : NULL),
843 			  uap->options, (uap->rusage ? &rusage : NULL),
844 			  &uap->sysmsg_result);
845 
846 	if (error == 0 && uap->status)
847 		error = copyout(&status, uap->status, sizeof(*uap->status));
848 	if (error == 0 && uap->rusage)
849 		error = copyout(&rusage, uap->rusage, sizeof(*uap->rusage));
850 	return (error);
851 }
852 
853 /*
854  * wait1()
855  *
856  * wait_args(int pid, int *status, int options, struct rusage *rusage)
857  */
858 int
859 kern_wait(pid_t pid, int *status, int options, struct rusage *rusage, int *res)
860 {
861 	struct thread *td = curthread;
862 	struct lwp *lp;
863 	struct proc *q = td->td_proc;
864 	struct proc *p, *t;
865 	struct pargs *pa;
866 	struct sigacts *ps;
867 	int nfound, error;
868 	long waitgen;
869 
870 	if (pid == 0)
871 		pid = -q->p_pgid;
872 	if (options &~ (WUNTRACED|WNOHANG|WCONTINUED|WLINUXCLONE))
873 		return (EINVAL);
874 
875 	/*
876 	 * Protect the q->p_children list
877 	 */
878 	lwkt_gettoken(&q->p_token);
879 loop:
880 	/*
881 	 * All sorts of things can change due to blocking so we have to loop
882 	 * all the way back up here.
883 	 *
884 	 * The problem is that if a process group is stopped and the parent
885 	 * is doing a wait*(..., WUNTRACED, ...), it will see the STOP
886 	 * of the child and then stop itself when it tries to return from the
887 	 * system call.  When the process group is resumed the parent will
888 	 * then get the STOP status even though the child has now resumed
889 	 * (a followup wait*() will get the CONT status).
890 	 *
891 	 * Previously the CONT would overwrite the STOP because the tstop
892 	 * was handled within tsleep(), and the parent would only see
893 	 * the CONT when both are stopped and continued together.  This little
894 	 * two-line hack restores this effect.
895 	 */
896 	while (q->p_stat == SSTOP)
897             tstop();
898 
899 	nfound = 0;
900 
901 	/*
902 	 * Loop on children.
903 	 *
904 	 * NOTE: We don't want to break q's p_token in the loop for the
905 	 *	 case where no children are found or we risk breaking the
906 	 *	 interlock between child and parent.
907 	 */
908 	waitgen = atomic_fetchadd_long(&q->p_waitgen, 0x80000000);
909 	LIST_FOREACH(p, &q->p_children, p_sibling) {
910 		if (pid != WAIT_ANY &&
911 		    p->p_pid != pid && p->p_pgid != -pid) {
912 			continue;
913 		}
914 
915 		/*
916 		 * This special case handles a kthread spawned by linux_clone
917 		 * (see linux_misc.c).  The linux_wait4 and linux_waitpid
918 		 * functions need to be able to distinguish between waiting
919 		 * on a process and waiting on a thread.  It is a thread if
920 		 * p_sigparent is not SIGCHLD, and the WLINUXCLONE option
921 		 * signifies we want to wait for threads and not processes.
922 		 */
923 		if ((p->p_sigparent != SIGCHLD) ^
924 		    ((options & WLINUXCLONE) != 0)) {
925 			continue;
926 		}
927 
928 		nfound++;
929 		if (p->p_stat == SZOMB) {
930 			/*
931 			 * We may go into SZOMB with threads still present.
932 			 * We must wait for them to exit before we can reap
933 			 * the master thread, otherwise we may race reaping
934 			 * non-master threads.
935 			 *
936 			 * Only this routine can remove a process from
937 			 * the zombie list and destroy it, use PACQUIREZOMB()
938 			 * to serialize us and loop if it blocks (interlocked
939 			 * by the parent's q->p_token).
940 			 *
941 			 * WARNING!  (p) can be invalid when PHOLDZOMB(p)
942 			 *	     returns non-zero.  Be sure not to
943 			 *	     mess with it.
944 			 */
945 			if (PHOLDZOMB(p))
946 				goto loop;
947 			lwkt_gettoken(&p->p_token);
948 			if (p->p_pptr != q) {
949 				lwkt_reltoken(&p->p_token);
950 				PRELEZOMB(p);
951 				goto loop;
952 			}
953 			while (p->p_nthreads > 0) {
954 				tsleep(&p->p_nthreads, 0, "lwpzomb", hz);
955 			}
956 
957 			/*
958 			 * Reap any LWPs left in p->p_lwps.  This is usually
959 			 * just the last LWP.  This must be done before
960 			 * we loop on p_lock since the lwps hold a ref on
961 			 * it as a vmspace interlock.
962 			 *
963 			 * Once that is accomplished p_nthreads had better
964 			 * be zero.
965 			 */
966 			while ((lp = RB_ROOT(&p->p_lwp_tree)) != NULL) {
967 				lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp);
968 				reaplwp(lp);
969 			}
970 			KKASSERT(p->p_nthreads == 0);
971 
972 			/*
973 			 * Don't do anything really bad until all references
974 			 * to the process go away.  This may include other
975 			 * LWPs which are still in the process of being
976 			 * reaped.  We can't just pull the rug out from under
977 			 * them because they may still be using the VM space.
978 			 *
979 			 * Certain kernel facilities such as /proc will also
980 			 * put a hold on the process for short periods of
981 			 * time.
982 			 */
983 			PRELE(p);
984 			PSTALL(p, "reap3", 0);
985 
986 			/* Take care of our return values. */
987 			*res = p->p_pid;
988 
989 			if (status)
990 				*status = p->p_xstat;
991 			if (rusage)
992 				*rusage = p->p_ru;
993 
994 			/*
995 			 * If we got the child via a ptrace 'attach',
996 			 * we need to give it back to the old parent.
997 			 */
998 			if (p->p_oppid && (t = pfind(p->p_oppid)) != NULL) {
999 				PHOLD(p);
1000 				p->p_oppid = 0;
1001 				proc_reparent(p, t);
1002 				ksignal(t, SIGCHLD);
1003 				wakeup((caddr_t)t);
1004 				error = 0;
1005 				PRELE(t);
1006 				lwkt_reltoken(&p->p_token);
1007 				PRELEZOMB(p);
1008 				goto done;
1009 			}
1010 
1011 			/*
1012 			 * Unlink the proc from its process group so that
1013 			 * the following operations won't lead to an
1014 			 * inconsistent state for processes running down
1015 			 * the zombie list.
1016 			 */
1017 			proc_remove_zombie(p);
1018 			proc_userunmap(p);
1019 			lwkt_reltoken(&p->p_token);
1020 			leavepgrp(p);
1021 
1022 			p->p_xstat = 0;
1023 			ruadd(&q->p_cru, &p->p_ru);
1024 
1025 			/*
1026 			 * Decrement the count of procs running with this uid.
1027 			 */
1028 			chgproccnt(p->p_ucred->cr_ruidinfo, -1, 0);
1029 
1030 			/*
1031 			 * Free up credentials.
1032 			 */
1033 			crfree(p->p_ucred);
1034 			p->p_ucred = NULL;
1035 
1036 			/*
1037 			 * Remove unused arguments
1038 			 */
1039 			pa = p->p_args;
1040 			p->p_args = NULL;
1041 			if (pa && refcount_release(&pa->ar_ref)) {
1042 				kfree(pa, M_PARGS);
1043 				pa = NULL;
1044 			}
1045 
1046 			ps = p->p_sigacts;
1047 			p->p_sigacts = NULL;
1048 			if (ps && refcount_release(&ps->ps_refcnt)) {
1049 				kfree(ps, M_SUBPROC);
1050 				ps = NULL;
1051 			}
1052 
1053 			/*
1054 			 * Our exitingcount was incremented when the process
1055 			 * became a zombie, now that the process has been
1056 			 * removed from (almost) all lists we should be able
1057 			 * to safely destroy its vmspace.  Wait for any current
1058 			 * holders to go away (so the vmspace remains stable),
1059 			 * then scrap it.
1060 			 */
1061 			PSTALL(p, "reap4", 0);
1062 			vmspace_exitfree(p);
1063 			PSTALL(p, "reap5", 0);
1064 
1065 			/*
1066 			 * NOTE: We have to officially release ZOMB in order
1067 			 *	 to ensure that a racing thread in kern_wait()
1068 			 *	 which blocked on ZOMB is woken up.
1069 			 */
1070 			PHOLD(p);
1071 			PRELEZOMB(p);
1072 			kfree(p, M_PROC);
1073 			atomic_add_int(&nprocs, -1);
1074 			error = 0;
1075 			goto done;
1076 		}
1077 		if (p->p_stat == SSTOP && (p->p_flags & P_WAITED) == 0 &&
1078 		    ((p->p_flags & P_TRACED) || (options & WUNTRACED))) {
1079 			PHOLD(p);
1080 			lwkt_gettoken(&p->p_token);
1081 			if (p->p_pptr != q) {
1082 				lwkt_reltoken(&p->p_token);
1083 				PRELE(p);
1084 				goto loop;
1085 			}
1086 			if (p->p_stat != SSTOP ||
1087 			    (p->p_flags & P_WAITED) != 0 ||
1088 			    ((p->p_flags & P_TRACED) == 0 &&
1089 			     (options & WUNTRACED) == 0)) {
1090 				lwkt_reltoken(&p->p_token);
1091 				PRELE(p);
1092 				goto loop;
1093 			}
1094 
1095 			p->p_flags |= P_WAITED;
1096 
1097 			*res = p->p_pid;
1098 			if (status)
1099 				*status = W_STOPCODE(p->p_xstat);
1100 			/* Zero rusage so we get something consistent. */
1101 			if (rusage)
1102 				bzero(rusage, sizeof(*rusage));
1103 			error = 0;
1104 			lwkt_reltoken(&p->p_token);
1105 			PRELE(p);
1106 			goto done;
1107 		}
1108 		if ((options & WCONTINUED) && (p->p_flags & P_CONTINUED)) {
1109 			PHOLD(p);
1110 			lwkt_gettoken(&p->p_token);
1111 			if (p->p_pptr != q) {
1112 				lwkt_reltoken(&p->p_token);
1113 				PRELE(p);
1114 				goto loop;
1115 			}
1116 			if ((p->p_flags & P_CONTINUED) == 0) {
1117 				lwkt_reltoken(&p->p_token);
1118 				PRELE(p);
1119 				goto loop;
1120 			}
1121 
1122 			*res = p->p_pid;
1123 			p->p_flags &= ~P_CONTINUED;
1124 
1125 			if (status)
1126 				*status = SIGCONT;
1127 			error = 0;
1128 			lwkt_reltoken(&p->p_token);
1129 			PRELE(p);
1130 			goto done;
1131 		}
1132 	}
1133 	if (nfound == 0) {
1134 		error = ECHILD;
1135 		goto done;
1136 	}
1137 	if (options & WNOHANG) {
1138 		*res = 0;
1139 		error = 0;
1140 		goto done;
1141 	}
1142 
1143 	/*
1144 	 * Wait for signal - interlocked using q->p_waitgen.
1145 	 */
1146 	error = 0;
1147 	while ((waitgen & 0x7FFFFFFF) == (q->p_waitgen & 0x7FFFFFFF)) {
1148 		tsleep_interlock(q, PCATCH);
1149 		waitgen = atomic_fetchadd_long(&q->p_waitgen, 0x80000000);
1150 		if ((waitgen & 0x7FFFFFFF) == (q->p_waitgen & 0x7FFFFFFF)) {
1151 			error = tsleep(q, PCATCH | PINTERLOCKED, "wait", 0);
1152 			break;
1153 		}
1154 	}
1155 	if (error) {
1156 done:
1157 		lwkt_reltoken(&q->p_token);
1158 		return (error);
1159 	}
1160 	goto loop;
1161 }
1162 
1163 /*
1164  * Change child's parent process to parent.
1165  *
1166  * p_children/p_sibling requires the parent's token, and
1167  * changing pptr requires the child's token, so we have to
1168  * get three tokens to do this operation.  We also need to
1169  * hold pointers that might get ripped out from under us to
1170  * preserve structural integrity.
1171  *
1172  * It is possible to race another reparent or disconnect or other
1173  * similar operation.  We must retry when this situation occurs.
1174  * Once we successfully reparent the process we no longer care
1175  * about any races.
1176  */
1177 void
1178 proc_reparent(struct proc *child, struct proc *parent)
1179 {
1180 	struct proc *opp;
1181 
1182 	PHOLD(parent);
1183 	while ((opp = child->p_pptr) != parent) {
1184 		PHOLD(opp);
1185 		lwkt_gettoken(&opp->p_token);
1186 		lwkt_gettoken(&child->p_token);
1187 		lwkt_gettoken(&parent->p_token);
1188 		if (child->p_pptr != opp) {
1189 			lwkt_reltoken(&parent->p_token);
1190 			lwkt_reltoken(&child->p_token);
1191 			lwkt_reltoken(&opp->p_token);
1192 			PRELE(opp);
1193 			continue;
1194 		}
1195 		LIST_REMOVE(child, p_sibling);
1196 		LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
1197 		child->p_pptr = parent;
1198 		lwkt_reltoken(&parent->p_token);
1199 		lwkt_reltoken(&child->p_token);
1200 		lwkt_reltoken(&opp->p_token);
1201 		if (LIST_EMPTY(&opp->p_children))
1202 			wakeup(opp);
1203 		PRELE(opp);
1204 		break;
1205 	}
1206 	PRELE(parent);
1207 }
1208 
1209 /*
1210  * The next two functions are to handle adding/deleting items on the
1211  * exit callout list
1212  *
1213  * at_exit():
1214  * Take the arguments given and put them onto the exit callout list,
1215  * However first make sure that it's not already there.
1216  * returns 0 on success.
1217  */
1218 
1219 int
1220 at_exit(exitlist_fn function)
1221 {
1222 	struct exitlist *ep;
1223 
1224 #ifdef INVARIANTS
1225 	/* Be noisy if the programmer has lost track of things */
1226 	if (rm_at_exit(function))
1227 		kprintf("WARNING: exit callout entry (%p) already present\n",
1228 		    function);
1229 #endif
1230 	ep = kmalloc(sizeof(*ep), M_ATEXIT, M_NOWAIT);
1231 	if (ep == NULL)
1232 		return (ENOMEM);
1233 	ep->function = function;
1234 	TAILQ_INSERT_TAIL(&exit_list, ep, next);
1235 	return (0);
1236 }
1237 
1238 /*
1239  * Scan the exit callout list for the given item and remove it.
1240  * Returns the number of items removed (0 or 1)
1241  */
1242 int
1243 rm_at_exit(exitlist_fn function)
1244 {
1245 	struct exitlist *ep;
1246 
1247 	TAILQ_FOREACH(ep, &exit_list, next) {
1248 		if (ep->function == function) {
1249 			TAILQ_REMOVE(&exit_list, ep, next);
1250 			kfree(ep, M_ATEXIT);
1251 			return(1);
1252 		}
1253 	}
1254 	return (0);
1255 }
1256 
1257 /*
1258  * LWP reaper related code.
1259  */
1260 static void
1261 reaplwps(void *context, int dummy)
1262 {
1263 	struct lwplist *lwplist = context;
1264 	struct lwp *lp;
1265 	int cpu = mycpuid;
1266 
1267 	lwkt_gettoken(&deadlwp_token[cpu]);
1268 	while ((lp = LIST_FIRST(lwplist))) {
1269 		LIST_REMOVE(lp, u.lwp_reap_entry);
1270 		reaplwp(lp);
1271 	}
1272 	lwkt_reltoken(&deadlwp_token[cpu]);
1273 }
1274 
1275 static void
1276 reaplwp(struct lwp *lp)
1277 {
1278 	while (lwp_wait(lp) == 0)
1279 		;
1280 	lwp_dispose(lp);
1281 }
1282 
1283 static void
1284 deadlwp_init(void)
1285 {
1286 	int cpu;
1287 
1288 	for (cpu = 0; cpu < ncpus; cpu++) {
1289 		lwkt_token_init(&deadlwp_token[cpu], "deadlwpl");
1290 		LIST_INIT(&deadlwp_list[cpu]);
1291 		deadlwp_task[cpu] = kmalloc(sizeof(*deadlwp_task[cpu]),
1292 					    M_DEVBUF, M_WAITOK);
1293 		TASK_INIT(deadlwp_task[cpu], 0, reaplwps, &deadlwp_list[cpu]);
1294 	}
1295 }
1296 
1297 SYSINIT(deadlwpinit, SI_SUB_CONFIGURE, SI_ORDER_ANY, deadlwp_init, NULL);
1298