xref: /original-bsd/sys/kern/kern_fork.c (revision 08cd6844)
1 /*
2  * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)kern_fork.c	7.25 (Berkeley) 03/17/91
8  */
9 
10 #include "param.h"
11 #include "systm.h"
12 #include "map.h"
13 #include "filedesc.h"
14 #include "kernel.h"
15 #include "malloc.h"
16 #include "proc.h"
17 #include "resourcevar.h"
18 #include "vnode.h"
19 #include "seg.h"
20 #include "file.h"
21 #include "acct.h"
22 #include "ktrace.h"
23 
24 /* ARGSUSED */
25 fork(p, uap, retval)
26 	struct proc *p;
27 	struct args *uap;
28 	int retval[];
29 {
30 
31 	return (fork1(p, 0, retval));
32 }
33 
34 /* ARGSUSED */
35 vfork(p, uap, retval)
36 	struct proc *p;
37 	struct args *uap;
38 	int retval[];
39 {
40 
41 	return (fork1(p, 1, retval));
42 }
43 
44 int	nprocs = 1;		/* process 0 */
45 
46 fork1(p1, isvfork, retval)
47 	register struct proc *p1;
48 	int isvfork, retval[];
49 {
50 	register struct proc *p2;
51 	register int count, uid;
52 	static int nextpid, pidchecked = 0;
53 
54 	count = 0;
55 	if ((uid = p1->p_ucred->cr_uid) != 0) {
56 		for (p2 = allproc; p2; p2 = p2->p_nxt)
57 			if (p2->p_ucred->cr_uid == uid)
58 				count++;
59 		for (p2 = zombproc; p2; p2 = p2->p_nxt)
60 			if (p2->p_ucred->cr_uid == uid)
61 				count++;
62 	}
63 	/*
64 	 * Although process entries are dynamically entries,
65 	 * we still keep a global limit on the maximum number
66 	 * we will create.  Don't allow a nonprivileged user
67 	 * to exceed its current limit or to bring us within one
68 	 * of the global limit; don't let root exceed the limit.
69 	 * nprocs is the current number of processes,
70 	 * maxproc is the limit.
71 	 */
72 	retval[1] = 0;
73 	if (nprocs >= maxproc || uid == 0 && nprocs >= maxproc + 1) {
74 		tablefull("proc");
75 		return (EAGAIN);
76 	}
77 	if (count > p1->p_rlimit[RLIMIT_NPROC].rlim_cur)
78 		return (EAGAIN);
79 
80 	/*
81 	 * Find an unused process ID.
82 	 * We remember a range of unused IDs ready to use
83 	 * (from nextpid+1 through pidchecked-1).
84 	 */
85 	nextpid++;
86 retry:
87 	/*
88 	 * If the process ID prototype has wrapped around,
89 	 * restart somewhat above 0, as the low-numbered procs
90 	 * tend to include daemons that don't exit.
91 	 */
92 	if (nextpid >= PID_MAX) {
93 		nextpid = 100;
94 		pidchecked = 0;
95 	}
96 	if (nextpid >= pidchecked) {
97 		int doingzomb = 0;
98 
99 		pidchecked = PID_MAX;
100 		/*
101 		 * Scan the active and zombie procs to check whether this pid
102 		 * is in use.  Remember the lowest pid that's greater
103 		 * than nextpid, so we can avoid checking for a while.
104 		 */
105 		p2 = allproc;
106 again:
107 		for (; p2 != NULL; p2 = p2->p_nxt) {
108 			if (p2->p_pid == nextpid ||
109 			    p2->p_pgrp->pg_id == nextpid) {
110 				nextpid++;
111 				if (nextpid >= pidchecked)
112 					goto retry;
113 			}
114 			if (p2->p_pid > nextpid && pidchecked > p2->p_pid)
115 				pidchecked = p2->p_pid;
116 			if (p2->p_pgrp->pg_id > nextpid &&
117 			    pidchecked > p2->p_pgrp->pg_id)
118 				pidchecked = p2->p_pgrp->pg_id;
119 		}
120 		if (!doingzomb) {
121 			doingzomb = 1;
122 			p2 = zombproc;
123 			goto again;
124 		}
125 	}
126 
127 
128 	/*
129 	 * Allocate new proc.
130 	 * Link onto allproc (this should probably be delayed).
131 	 */
132 	MALLOC(p2, struct proc *, sizeof(struct proc), M_PROC, M_WAITOK);
133 	nprocs++;
134 	p2->p_nxt = allproc;
135 	p2->p_nxt->p_prev = &p2->p_nxt;		/* allproc is never NULL */
136 	p2->p_prev = &allproc;
137 	allproc = p2;
138 	p2->p_link = NULL;			/* shouldn't be necessary */
139 	p2->p_rlink = NULL;			/* shouldn't be necessary */
140 
141 	/*
142 	 * Make a proc table entry for the new process.
143 	 * Start by zeroing the section of proc that is zero-initialized,
144 	 * then copy the section that is copied directly from the parent.
145 	 */
146 	bzero(&p2->p_startzero,
147 	    (unsigned) ((caddr_t)&p2->p_endzero - (caddr_t)&p2->p_startzero));
148 	bcopy(&p1->p_startcopy, &p2->p_startcopy,
149 	    (unsigned) ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy));
150 
151 	/*
152 	 * Duplicate sub-structures as needed.
153 	 * Increase reference counts on shared objects.
154 	 */
155 	MALLOC(p2->p_cred, struct pcred *, sizeof(struct pcred),
156 	    M_SUBPROC, M_WAITOK);
157 	bcopy(p1->p_cred, p2->p_cred, sizeof(*p2->p_cred));
158 	crhold(p1->p_ucred);
159 
160 	p2->p_fd = fdcopy(p1);
161 	p2->p_stats = p1->p_stats;		/* XXX move; in u. */
162 	/*
163 	 * If p_limit is still copy-on-write, bump refcnt,
164 	 * otherwise get a copy that won't be modified.
165 	 * (If PL_SHAREMOD is clear, the structure is shared
166 	 * copy-on-write.)
167 	 */
168 	if (p1->p_limit->p_lflags & PL_SHAREMOD)
169 		p2->p_limit = limcopy(p1->p_limit);
170 	else {
171 		p2->p_limit = p1->p_limit;
172 		p2->p_limit->p_refcnt++;
173 	}
174 	p2->p_sigacts = p1->p_sigacts;		/* XXX move; in u. */
175 
176 	p2->p_flag = SLOAD | (p1->p_flag & (SPAGV|SHPUX));
177 	if (p1->p_session->s_ttyvp != NULL && p1->p_flag & SCTTY)
178 		p2->p_flag |= SCTTY;
179 	if (isvfork)
180 		p2->p_flag |= SPPWAIT;
181 	p2->p_stat = SIDL;
182 	p2->p_pid = nextpid;
183 	{
184 	struct proc **hash = &pidhash[PIDHASH(p2->p_pid)];
185 
186 	p2->p_hash = *hash;
187 	*hash = p2;
188 	}
189 	p2->p_pgrpnxt = p1->p_pgrpnxt;
190 	p1->p_pgrpnxt = p2;
191 	p2->p_pptr = p1;
192 	p2->p_osptr = p1->p_cptr;
193 	if (p1->p_cptr)
194 		p1->p_cptr->p_ysptr = p2;
195 	p1->p_cptr = p2;
196 #ifdef KTRACE
197 	/*
198 	 * Copy traceflag and tracefile if enabled.
199 	 * If not inherited, these were zeroed above.
200 	 */
201 	if (p1->p_traceflag&KTRFAC_INHERIT) {
202 		p2->p_traceflag = p1->p_traceflag;
203 		if ((p2->p_tracep = p1->p_tracep) != NULL)
204 			VREF(p2->p_tracep);
205 	}
206 #endif
207 
208 	p2->p_regs = p1->p_regs;		 /* XXX move this */
209 #if defined(tahoe)
210 	p2->p_vmspace->p_ckey = p1->p_vmspace->p_ckey; /* XXX move this */
211 #endif
212 
213 	/*
214 	 * This begins the section where we must prevent the parent
215 	 * from being swapped.
216 	 */
217 	p1->p_flag |= SKEEP;
218 	if (vm_fork(p1, p2, isvfork)) {
219 		/*
220 		 * Child process.  Set start time, return parent pid,
221 		 * and mark as child in retval[1].
222 		 */
223 		(void) splclock();
224 		p2->p_stats->p_start = time;
225 		(void) spl0();
226 		retval[0] = p1->p_pid;
227 		retval[1] = 1;
228 		p2->p_acflag = AFORK;
229 		return (0);
230 	}
231 
232 	/*
233 	 * Make child runnable and add to run queue.
234 	 */
235 	(void) splhigh();
236 	p2->p_stat = SRUN;
237 	setrq(p2);
238 	(void) spl0();
239 
240 	/*
241 	 * Now can be swapped.
242 	 */
243 	p1->p_flag &= ~SKEEP;
244 
245 	/*
246 	 * XXX preserve synchronization semantics of vfork
247 	 * If waiting for child to exec or exit, set SPPWAIT
248 	 * on child, and sleep on our proc (in case of exit).
249 	 */
250 	if (isvfork)
251 		while (p2->p_flag & SPPWAIT)
252 			sleep((caddr_t)p1, PZERO - 1);
253 
254 	/*
255 	 * Return child pid to parent process.
256 	 * retval[1] was set above.
257 	 */
258 	retval[0] = p2->p_pid;
259 	return (0);
260 }
261