xref: /dragonfly/sys/kern/kern_fork.c (revision 521a7b05)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)kern_fork.c	8.6 (Berkeley) 4/8/94
39  * $FreeBSD: src/sys/kern/kern_fork.c,v 1.72.2.14 2003/06/26 04:15:10 silby Exp $
40  * $DragonFly: src/sys/kern/kern_fork.c,v 1.68 2007/04/29 18:25:34 dillon Exp $
41  */
42 
43 #include "opt_ktrace.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sysproto.h>
48 #include <sys/filedesc.h>
49 #include <sys/kernel.h>
50 #include <sys/sysctl.h>
51 #include <sys/malloc.h>
52 #include <sys/proc.h>
53 #include <sys/resourcevar.h>
54 #include <sys/vnode.h>
55 #include <sys/acct.h>
56 #include <sys/ktrace.h>
57 #include <sys/unistd.h>
58 #include <sys/jail.h>
59 #include <sys/caps.h>
60 
61 #include <vm/vm.h>
62 #include <sys/lock.h>
63 #include <vm/pmap.h>
64 #include <vm/vm_map.h>
65 #include <vm/vm_extern.h>
66 #include <vm/vm_zone.h>
67 
68 #include <sys/vmmeter.h>
69 #include <sys/thread2.h>
70 #include <sys/signal2.h>
71 
72 static MALLOC_DEFINE(M_ATFORK, "atfork", "atfork callback");
73 
74 /*
75  * These are the stuctures used to create a callout list for things to do
76  * when forking a process
77  */
78 struct forklist {
79 	forklist_fn function;
80 	TAILQ_ENTRY(forklist) next;
81 };
82 
83 TAILQ_HEAD(forklist_head, forklist);
84 static struct forklist_head fork_list = TAILQ_HEAD_INITIALIZER(fork_list);
85 
86 static struct lwp *lwp_fork(struct lwp *, struct proc *, int flags);
87 
88 int forksleep; /* Place for fork1() to sleep on. */
89 
90 /* ARGSUSED */
91 int
92 sys_fork(struct fork_args *uap)
93 {
94 	struct lwp *lp = curthread->td_lwp;
95 	struct proc *p2;
96 	int error;
97 
98 	error = fork1(lp, RFFDG | RFPROC | RFPGLOCK, &p2);
99 	if (error == 0) {
100 		start_forked_proc(lp, p2);
101 		uap->sysmsg_fds[0] = p2->p_pid;
102 		uap->sysmsg_fds[1] = 0;
103 	}
104 	return error;
105 }
106 
107 /* ARGSUSED */
108 int
109 sys_vfork(struct vfork_args *uap)
110 {
111 	struct lwp *lp = curthread->td_lwp;
112 	struct proc *p2;
113 	int error;
114 
115 	error = fork1(lp, RFFDG | RFPROC | RFPPWAIT | RFMEM | RFPGLOCK, &p2);
116 	if (error == 0) {
117 		start_forked_proc(lp, p2);
118 		uap->sysmsg_fds[0] = p2->p_pid;
119 		uap->sysmsg_fds[1] = 0;
120 	}
121 	return error;
122 }
123 
124 /*
125  * Handle rforks.  An rfork may (1) operate on the current process without
126  * creating a new, (2) create a new process that shared the current process's
127  * vmspace, signals, and/or descriptors, or (3) create a new process that does
128  * not share these things (normal fork).
129  *
130  * Note that we only call start_forked_proc() if a new process is actually
131  * created.
132  *
133  * rfork { int flags }
134  */
135 int
136 sys_rfork(struct rfork_args *uap)
137 {
138 	struct lwp *lp = curthread->td_lwp;
139 	struct proc *p2;
140 	int error;
141 
142 	if ((uap->flags & RFKERNELONLY) != 0)
143 		return (EINVAL);
144 
145 	error = fork1(lp, uap->flags | RFPGLOCK, &p2);
146 	if (error == 0) {
147 		if (p2)
148 			start_forked_proc(lp, p2);
149 		uap->sysmsg_fds[0] = p2 ? p2->p_pid : 0;
150 		uap->sysmsg_fds[1] = 0;
151 	}
152 	return error;
153 }
154 
155 int
156 sys_lwp_create(struct lwp_create_args *uap)
157 {
158 	struct proc *p = curproc;
159 	struct lwp *lp;
160 	struct lwp_params params;
161 	int error;
162 
163 	error = copyin(uap->params, &params, sizeof(params));
164 	if (error)
165 		goto fail2;
166 
167 	lp = lwp_fork(curthread->td_lwp, p, RFPROC);
168 	error = cpu_prepare_lwp(lp, &params);
169 	if (params.tid1 != NULL &&
170 	    (error = copyout(&lp->lwp_tid, params.tid1, sizeof(lp->lwp_tid))))
171 		goto fail;
172 	if (params.tid2 != NULL &&
173 	    (error = copyout(&lp->lwp_tid, params.tid2, sizeof(lp->lwp_tid))))
174 		goto fail;
175 
176 	/*
177 	 * Now schedule the new lwp.
178 	 */
179 	p->p_usched->resetpriority(lp);
180 	crit_enter();
181 	lp->lwp_stat = LSRUN;
182 	p->p_usched->setrunqueue(lp);
183 	crit_exit();
184 
185 	return (0);
186 
187 fail:
188 	--p->p_nthreads;
189 	LIST_REMOVE(lp, lwp_list);
190 	/* lwp_dispose expects an exited lwp, and a held proc */
191 	lp->lwp_flag |= LWP_WEXIT;
192 	lp->lwp_thread->td_flags |= TDF_EXITING;
193 	PHOLD(p);
194 	lwp_dispose(lp);
195 fail2:
196 	return (error);
197 }
198 
199 int	nprocs = 1;		/* process 0 */
200 
201 int
202 fork1(struct lwp *lp1, int flags, struct proc **procp)
203 {
204 	struct proc *p1 = lp1->lwp_proc;
205 	struct proc *p2, *pptr;
206 	struct pgrp *pgrp;
207 	uid_t uid;
208 	int ok, error;
209 	static int curfail = 0;
210 	static struct timeval lastfail;
211 	struct forklist *ep;
212 	struct filedesc_to_leader *fdtol;
213 
214 	if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
215 		return (EINVAL);
216 
217 	/*
218 	 * Here we don't create a new process, but we divorce
219 	 * certain parts of a process from itself.
220 	 */
221 	if ((flags & RFPROC) == 0) {
222 		/*
223 		 * This kind of stunt does not work anymore if
224 		 * there are native threads (lwps) running
225 		 */
226 		if (p1->p_nthreads != 1)
227 			return (EINVAL);
228 
229 		vm_fork(p1, 0, flags);
230 
231 		/*
232 		 * Close all file descriptors.
233 		 */
234 		if (flags & RFCFDG) {
235 			struct filedesc *fdtmp;
236 			fdtmp = fdinit(p1);
237 			fdfree(p1);
238 			p1->p_fd = fdtmp;
239 		}
240 
241 		/*
242 		 * Unshare file descriptors (from parent.)
243 		 */
244 		if (flags & RFFDG) {
245 			if (p1->p_fd->fd_refcnt > 1) {
246 				struct filedesc *newfd;
247 				newfd = fdcopy(p1);
248 				fdfree(p1);
249 				p1->p_fd = newfd;
250 			}
251 		}
252 		*procp = NULL;
253 		return (0);
254 	}
255 
256 	/*
257 	 * Interlock against process group signal delivery.  If signals
258 	 * are pending after the interlock is obtained we have to restart
259 	 * the system call to process the signals.  If we don't the child
260 	 * can miss a pgsignal (such as ^C) sent during the fork.
261 	 *
262 	 * We can't use CURSIG() here because it will process any STOPs
263 	 * and cause the process group lock to be held indefinitely.  If
264 	 * a STOP occurs, the fork will be restarted after the CONT.
265 	 */
266 	error = 0;
267 	pgrp = NULL;
268 	if ((flags & RFPGLOCK) && (pgrp = p1->p_pgrp) != NULL) {
269 		lockmgr(&pgrp->pg_lock, LK_SHARED);
270 		if (CURSIGNB(lp1)) {
271 			error = ERESTART;
272 			goto done;
273 		}
274 	}
275 
276 	/*
277 	 * Although process entries are dynamically created, we still keep
278 	 * a global limit on the maximum number we will create.  Don't allow
279 	 * a nonprivileged user to use the last ten processes; don't let root
280 	 * exceed the limit. The variable nprocs is the current number of
281 	 * processes, maxproc is the limit.
282 	 */
283 	uid = p1->p_ucred->cr_ruid;
284 	if ((nprocs >= maxproc - 10 && uid != 0) || nprocs >= maxproc) {
285 		if (ppsratecheck(&lastfail, &curfail, 1))
286 			kprintf("maxproc limit exceeded by uid %d, please "
287 			       "see tuning(7) and login.conf(5).\n", uid);
288 		tsleep(&forksleep, 0, "fork", hz / 2);
289 		error = EAGAIN;
290 		goto done;
291 	}
292 	/*
293 	 * Increment the nprocs resource before blocking can occur.  There
294 	 * are hard-limits as to the number of processes that can run.
295 	 */
296 	nprocs++;
297 
298 	/*
299 	 * Increment the count of procs running with this uid. Don't allow
300 	 * a nonprivileged user to exceed their current limit.
301 	 */
302 	ok = chgproccnt(p1->p_ucred->cr_ruidinfo, 1,
303 		(uid != 0) ? p1->p_rlimit[RLIMIT_NPROC].rlim_cur : 0);
304 	if (!ok) {
305 		/*
306 		 * Back out the process count
307 		 */
308 		nprocs--;
309 		if (ppsratecheck(&lastfail, &curfail, 1))
310 			kprintf("maxproc limit exceeded by uid %d, please "
311 			       "see tuning(7) and login.conf(5).\n", uid);
312 		tsleep(&forksleep, 0, "fork", hz / 2);
313 		error = EAGAIN;
314 		goto done;
315 	}
316 
317 	/* Allocate new proc. */
318 	p2 = zalloc(proc_zone);
319 	bzero(p2, sizeof(*p2));
320 
321 	/*
322 	 * Setup linkage for kernel based threading XXX lwp
323 	 */
324 	if (flags & RFTHREAD) {
325 		p2->p_peers = p1->p_peers;
326 		p1->p_peers = p2;
327 		p2->p_leader = p1->p_leader;
328 	} else {
329 		p2->p_leader = p2;
330 	}
331 
332 	LIST_INIT(&p2->p_lwps);
333 
334 	/*
335 	 * Setting the state to SIDL protects the partially initialized
336 	 * process once it starts getting hooked into the rest of the system.
337 	 */
338 	p2->p_stat = SIDL;
339 	proc_add_allproc(p2);
340 
341 	/*
342 	 * Make a proc table entry for the new process.
343 	 * The whole structure was zeroed above, so copy the section that is
344 	 * copied directly from the parent.
345 	 */
346 	bcopy(&p1->p_startcopy, &p2->p_startcopy,
347 	    (unsigned) ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy));
348 
349 	/*
350 	 * Duplicate sub-structures as needed.
351 	 * Increase reference counts on shared objects.
352 	 */
353 	if (p1->p_flag & P_PROFIL)
354 		startprofclock(p2);
355 	p2->p_ucred = crhold(p1->p_ucred);
356 
357 	if (jailed(p2->p_ucred))
358 		p2->p_flag |= P_JAILED;
359 
360 	if (p2->p_args)
361 		p2->p_args->ar_ref++;
362 
363 	p2->p_usched = p1->p_usched;
364 
365 	if (flags & RFSIGSHARE) {
366 		p2->p_sigacts = p1->p_sigacts;
367 		p2->p_sigacts->ps_refcnt++;
368 	} else {
369 		p2->p_sigacts = (struct sigacts *)kmalloc(sizeof(*p2->p_sigacts),
370 		    M_SUBPROC, M_WAITOK);
371 		bcopy(p1->p_sigacts, p2->p_sigacts, sizeof(*p2->p_sigacts));
372 		p2->p_sigacts->ps_refcnt = 1;
373 	}
374 	if (flags & RFLINUXTHPN)
375 	        p2->p_sigparent = SIGUSR1;
376 	else
377 	        p2->p_sigparent = SIGCHLD;
378 
379 	/* bump references to the text vnode (for procfs) */
380 	p2->p_textvp = p1->p_textvp;
381 	if (p2->p_textvp)
382 		vref(p2->p_textvp);
383 
384 	/*
385 	 * Handle file descriptors
386 	 */
387 	if (flags & RFCFDG) {
388 		p2->p_fd = fdinit(p1);
389 		fdtol = NULL;
390 	} else if (flags & RFFDG) {
391 		p2->p_fd = fdcopy(p1);
392 		fdtol = NULL;
393 	} else {
394 		p2->p_fd = fdshare(p1);
395 		if (p1->p_fdtol == NULL)
396 			p1->p_fdtol =
397 				filedesc_to_leader_alloc(NULL,
398 							 p1->p_leader);
399 		if ((flags & RFTHREAD) != 0) {
400 			/*
401 			 * Shared file descriptor table and
402 			 * shared process leaders.
403 			 */
404 			fdtol = p1->p_fdtol;
405 			fdtol->fdl_refcount++;
406 		} else {
407 			/*
408 			 * Shared file descriptor table, and
409 			 * different process leaders
410 			 */
411 			fdtol = filedesc_to_leader_alloc(p1->p_fdtol, p2);
412 		}
413 	}
414 	p2->p_fdtol = fdtol;
415 	p2->p_limit = plimit_fork(p1->p_limit);
416 
417 	/*
418 	 * Preserve some more flags in subprocess.  P_PROFIL has already
419 	 * been preserved.
420 	 */
421 	p2->p_flag |= p1->p_flag & P_SUGID;
422 	if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
423 		p2->p_flag |= P_CONTROLT;
424 	if (flags & RFPPWAIT)
425 		p2->p_flag |= P_PPWAIT;
426 
427 	/*
428 	 * Inherit the virtual kernel structure (allows a virtual kernel
429 	 * to fork to simulate multiple cpus).
430 	 */
431 	if (p1->p_vkernel)
432 		vkernel_inherit(p1, p2);
433 
434 	/*
435 	 * Once we are on a pglist we may receive signals.  XXX we might
436 	 * race a ^C being sent to the process group by not receiving it
437 	 * at all prior to this line.
438 	 */
439 	LIST_INSERT_AFTER(p1, p2, p_pglist);
440 
441 	/*
442 	 * Attach the new process to its parent.
443 	 *
444 	 * If RFNOWAIT is set, the newly created process becomes a child
445 	 * of init.  This effectively disassociates the child from the
446 	 * parent.
447 	 */
448 	if (flags & RFNOWAIT)
449 		pptr = initproc;
450 	else
451 		pptr = p1;
452 	p2->p_pptr = pptr;
453 	LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
454 	LIST_INIT(&p2->p_children);
455 	varsymset_init(&p2->p_varsymset, &p1->p_varsymset);
456 	callout_init(&p2->p_ithandle);
457 
458 #ifdef KTRACE
459 	/*
460 	 * Copy traceflag and tracefile if enabled.  If not inherited,
461 	 * these were zeroed above but we still could have a trace race
462 	 * so make sure p2's p_tracenode is NULL.
463 	 */
464 	if ((p1->p_traceflag & KTRFAC_INHERIT) && p2->p_tracenode == NULL) {
465 		p2->p_traceflag = p1->p_traceflag;
466 		p2->p_tracenode = ktrinherit(p1->p_tracenode);
467 	}
468 #endif
469 
470 	/*
471 	 * This begins the section where we must prevent the parent
472 	 * from being swapped.
473 	 *
474 	 * Gets PRELE'd in the caller in start_forked_proc().
475 	 */
476 	PHOLD(p1);
477 
478 	vm_fork(p1, p2, flags);
479 
480 	/*
481 	 * Create the first lwp associated with the new proc.
482 	 * It will return via a different execution path later, directly
483 	 * into userland, after it was put on the runq by
484 	 * start_forked_proc().
485 	 */
486 	lwp_fork(lp1, p2, flags);
487 
488 	if (flags == (RFFDG | RFPROC)) {
489 		mycpu->gd_cnt.v_forks++;
490 		mycpu->gd_cnt.v_forkpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize;
491 	} else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
492 		mycpu->gd_cnt.v_vforks++;
493 		mycpu->gd_cnt.v_vforkpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize;
494 	} else if (p1 == &proc0) {
495 		mycpu->gd_cnt.v_kthreads++;
496 		mycpu->gd_cnt.v_kthreadpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize;
497 	} else {
498 		mycpu->gd_cnt.v_rforks++;
499 		mycpu->gd_cnt.v_rforkpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize;
500 	}
501 
502 	/*
503 	 * Both processes are set up, now check if any loadable modules want
504 	 * to adjust anything.
505 	 *   What if they have an error? XXX
506 	 */
507 	TAILQ_FOREACH(ep, &fork_list, next) {
508 		(*ep->function)(p1, p2, flags);
509 	}
510 
511 	/*
512 	 * Set the start time.  Note that the process is not runnable.  The
513 	 * caller is responsible for making it runnable.
514 	 */
515 	microtime(&p2->p_start);
516 	p2->p_acflag = AFORK;
517 
518 	/*
519 	 * tell any interested parties about the new process
520 	 */
521 	KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid);
522 
523 	/*
524 	 * Return child proc pointer to parent.
525 	 */
526 	*procp = p2;
527 done:
528 	if (pgrp)
529 		lockmgr(&pgrp->pg_lock, LK_RELEASE);
530 	return (error);
531 }
532 
533 static struct lwp *
534 lwp_fork(struct lwp *origlp, struct proc *destproc, int flags)
535 {
536 	struct lwp *lp;
537 	struct thread *td;
538 	lwpid_t tid;
539 
540 	/*
541 	 * We need to prevent wrap-around collisions.
542 	 * Until we have a nice tid allocator, we need to
543 	 * start searching for free tids once we wrap around.
544 	 *
545 	 * XXX give me a nicer allocator
546 	 */
547 	if (destproc->p_lasttid + 1 <= 0) {
548 		tid = 0;
549 restart:
550 		FOREACH_LWP_IN_PROC(lp, destproc) {
551 			if (lp->lwp_tid != tid)
552 				continue;
553 			/* tids match, search next. */
554 			tid++;
555 			/*
556 			 * Wait -- the whole tid space is depleted?
557 			 * Impossible.
558 			 */
559 			if (tid <= 0)
560 				panic("lwp_fork: All tids depleted?!");
561 			goto restart;
562 		}
563 		/* When we come here, the tid is not occupied */
564 	} else {
565 		tid = destproc->p_lasttid++;
566 	}
567 
568 	lp = zalloc(lwp_zone);
569 	bzero(lp, sizeof(*lp));
570 	lp->lwp_proc = destproc;
571 	lp->lwp_tid = tid;
572 	LIST_INSERT_HEAD(&destproc->p_lwps, lp, lwp_list);
573 	destproc->p_nthreads++;
574 	lp->lwp_stat = LSRUN;
575 	bcopy(&origlp->lwp_startcopy, &lp->lwp_startcopy,
576 	    (unsigned) ((caddr_t)&lp->lwp_endcopy -
577 			(caddr_t)&lp->lwp_startcopy));
578 	lp->lwp_flag |= origlp->lwp_flag & LWP_ALTSTACK;
579 	/*
580 	 * Set cpbase to the last timeout that occured (not the upcoming
581 	 * timeout).
582 	 *
583 	 * A critical section is required since a timer IPI can update
584 	 * scheduler specific data.
585 	 */
586 	crit_enter();
587 	lp->lwp_cpbase = mycpu->gd_schedclock.time -
588 			mycpu->gd_schedclock.periodic;
589 	destproc->p_usched->heuristic_forking(origlp, lp);
590 	crit_exit();
591 
592 	td = lwkt_alloc_thread(NULL, LWKT_THREAD_STACK, -1, 0);
593 	lp->lwp_thread = td;
594 	td->td_proc = destproc;
595 	td->td_lwp = lp;
596 	td->td_switch = cpu_heavy_switch;
597 #ifdef SMP
598 	KKASSERT(td->td_mpcount == 1);
599 #endif
600 	lwkt_setpri(td, TDPRI_KERN_USER);
601 	lwkt_set_comm(td, "%s", destproc->p_comm);
602 
603 	/*
604 	 * cpu_fork will copy and update the pcb, set up the kernel stack,
605 	 * and make the child ready to run.
606 	 */
607 	cpu_fork(origlp, lp, flags);
608 	caps_fork(origlp->lwp_thread, lp->lwp_thread);
609 
610 	return (lp);
611 }
612 
613 /*
614  * The next two functionms are general routines to handle adding/deleting
615  * items on the fork callout list.
616  *
617  * at_fork():
618  * Take the arguments given and put them onto the fork callout list,
619  * However first make sure that it's not already there.
620  * Returns 0 on success or a standard error number.
621  */
622 int
623 at_fork(forklist_fn function)
624 {
625 	struct forklist *ep;
626 
627 #ifdef INVARIANTS
628 	/* let the programmer know if he's been stupid */
629 	if (rm_at_fork(function)) {
630 		kprintf("WARNING: fork callout entry (%p) already present\n",
631 		    function);
632 	}
633 #endif
634 	ep = kmalloc(sizeof(*ep), M_ATFORK, M_WAITOK|M_ZERO);
635 	ep->function = function;
636 	TAILQ_INSERT_TAIL(&fork_list, ep, next);
637 	return (0);
638 }
639 
640 /*
641  * Scan the exit callout list for the given item and remove it..
642  * Returns the number of items removed (0 or 1)
643  */
644 int
645 rm_at_fork(forklist_fn function)
646 {
647 	struct forklist *ep;
648 
649 	TAILQ_FOREACH(ep, &fork_list, next) {
650 		if (ep->function == function) {
651 			TAILQ_REMOVE(&fork_list, ep, next);
652 			kfree(ep, M_ATFORK);
653 			return(1);
654 		}
655 	}
656 	return (0);
657 }
658 
659 /*
660  * Add a forked process to the run queue after any remaining setup, such
661  * as setting the fork handler, has been completed.
662  */
663 void
664 start_forked_proc(struct lwp *lp1, struct proc *p2)
665 {
666 	struct lwp *lp2 = ONLY_LWP_IN_PROC(p2);
667 
668 	/*
669 	 * Move from SIDL to RUN queue, and activate the process's thread.
670 	 * Activation of the thread effectively makes the process "a"
671 	 * current process, so we do not setrunqueue().
672 	 *
673 	 * YYY setrunqueue works here but we should clean up the trampoline
674 	 * code so we just schedule the LWKT thread and let the trampoline
675 	 * deal with the userland scheduler on return to userland.
676 	 */
677 	KASSERT(p2->p_stat == SIDL,
678 	    ("cannot start forked process, bad status: %p", p2));
679 	p2->p_usched->resetpriority(lp2);
680 	crit_enter();
681 	p2->p_stat = SACTIVE;
682 	lp2->lwp_stat = LSRUN;
683 	p2->p_usched->setrunqueue(lp2);
684 	crit_exit();
685 
686 	/*
687 	 * Now can be swapped.
688 	 */
689 	PRELE(lp1->lwp_proc);
690 
691 	/*
692 	 * Preserve synchronization semantics of vfork.  If waiting for
693 	 * child to exec or exit, set P_PPWAIT on child, and sleep on our
694 	 * proc (in case of exit).
695 	 */
696 	while (p2->p_flag & P_PPWAIT)
697 		tsleep(lp1->lwp_proc, 0, "ppwait", 0);
698 }
699