xref: /original-bsd/sys/kern/kern_fork.c (revision de3f5c4e)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)kern_fork.c	7.29 (Berkeley) 05/15/91
8  */
9 
10 #include "param.h"
11 #include "systm.h"
12 #include "map.h"
13 #include "filedesc.h"
14 #include "kernel.h"
15 #include "malloc.h"
16 #include "proc.h"
17 #include "resourcevar.h"
18 #include "vnode.h"
19 #include "seg.h"
20 #include "file.h"
21 #include "acct.h"
22 #include "ktrace.h"
23 
24 /* ARGSUSED */
25 fork(p, uap, retval)
26 	struct proc *p;
27 	void *uap;
28 	int retval[];
29 {
30 
31 	return (fork1(p, 0, retval));
32 }
33 
34 /* ARGSUSED */
35 vfork(p, uap, retval)
36 	struct proc *p;
37 	void *uap;
38 	int retval[];
39 {
40 
41 	return (fork1(p, 1, retval));
42 }
43 
44 int	nprocs = 1;		/* process 0 */
45 
46 fork1(p1, isvfork, retval)
47 	register struct proc *p1;
48 	int isvfork, retval[];
49 {
50 	register struct proc *p2;
51 	register int count, uid;
52 	static int nextpid, pidchecked = 0;
53 
54 	count = 0;
55 	if ((uid = p1->p_ucred->cr_uid) != 0) {
56 		for (p2 = allproc; p2; p2 = p2->p_nxt)
57 			if (p2->p_ucred->cr_uid == uid)
58 				count++;
59 		for (p2 = zombproc; p2; p2 = p2->p_nxt)
60 			if (p2->p_ucred->cr_uid == uid)
61 				count++;
62 	}
63 	/*
64 	 * Although process entries are dynamically entries,
65 	 * we still keep a global limit on the maximum number
66 	 * we will create.  Don't allow a nonprivileged user
67 	 * to exceed its current limit or to bring us within one
68 	 * of the global limit; don't let root exceed the limit.
69 	 * nprocs is the current number of processes,
70 	 * maxproc is the limit.
71 	 */
72 	if (nprocs >= maxproc || uid == 0 && nprocs >= maxproc + 1) {
73 		tablefull("proc");
74 		return (EAGAIN);
75 	}
76 	if (count > p1->p_rlimit[RLIMIT_NPROC].rlim_cur)
77 		return (EAGAIN);
78 
79 	/*
80 	 * Find an unused process ID.
81 	 * We remember a range of unused IDs ready to use
82 	 * (from nextpid+1 through pidchecked-1).
83 	 */
84 	nextpid++;
85 retry:
86 	/*
87 	 * If the process ID prototype has wrapped around,
88 	 * restart somewhat above 0, as the low-numbered procs
89 	 * tend to include daemons that don't exit.
90 	 */
91 	if (nextpid >= PID_MAX) {
92 		nextpid = 100;
93 		pidchecked = 0;
94 	}
95 	if (nextpid >= pidchecked) {
96 		int doingzomb = 0;
97 
98 		pidchecked = PID_MAX;
99 		/*
100 		 * Scan the active and zombie procs to check whether this pid
101 		 * is in use.  Remember the lowest pid that's greater
102 		 * than nextpid, so we can avoid checking for a while.
103 		 */
104 		p2 = allproc;
105 again:
106 		for (; p2 != NULL; p2 = p2->p_nxt) {
107 			if (p2->p_pid == nextpid ||
108 			    p2->p_pgrp->pg_id == nextpid) {
109 				nextpid++;
110 				if (nextpid >= pidchecked)
111 					goto retry;
112 			}
113 			if (p2->p_pid > nextpid && pidchecked > p2->p_pid)
114 				pidchecked = p2->p_pid;
115 			if (p2->p_pgrp->pg_id > nextpid &&
116 			    pidchecked > p2->p_pgrp->pg_id)
117 				pidchecked = p2->p_pgrp->pg_id;
118 		}
119 		if (!doingzomb) {
120 			doingzomb = 1;
121 			p2 = zombproc;
122 			goto again;
123 		}
124 	}
125 
126 
127 	/*
128 	 * Allocate new proc.
129 	 * Link onto allproc (this should probably be delayed).
130 	 */
131 	MALLOC(p2, struct proc *, sizeof(struct proc), M_PROC, M_WAITOK);
132 	nprocs++;
133 	p2->p_nxt = allproc;
134 	p2->p_nxt->p_prev = &p2->p_nxt;		/* allproc is never NULL */
135 	p2->p_prev = &allproc;
136 	allproc = p2;
137 	p2->p_link = NULL;			/* shouldn't be necessary */
138 	p2->p_rlink = NULL;			/* shouldn't be necessary */
139 
140 	/*
141 	 * Make a proc table entry for the new process.
142 	 * Start by zeroing the section of proc that is zero-initialized,
143 	 * then copy the section that is copied directly from the parent.
144 	 */
145 	bzero(&p2->p_startzero,
146 	    (unsigned) ((caddr_t)&p2->p_endzero - (caddr_t)&p2->p_startzero));
147 	bcopy(&p1->p_startcopy, &p2->p_startcopy,
148 	    (unsigned) ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy));
149 	p2->p_spare[0] = 0;	/* XXX - should be in zero range */
150 	p2->p_spare[1] = 0;	/* XXX - should be in zero range */
151 	p2->p_spare[2] = 0;	/* XXX - should be in zero range */
152 	p2->p_spare[3] = 0;	/* XXX - should be in zero range */
153 
154 	/*
155 	 * Duplicate sub-structures as needed.
156 	 * Increase reference counts on shared objects.
157 	 * The p_stats and p_sigacts substructs are set in vm_fork.
158 	 */
159 	MALLOC(p2->p_cred, struct pcred *, sizeof(struct pcred),
160 	    M_SUBPROC, M_WAITOK);
161 	bcopy(p1->p_cred, p2->p_cred, sizeof(*p2->p_cred));
162 	p2->p_cred->p_refcnt = 1;
163 	crhold(p1->p_ucred);
164 
165 	p2->p_fd = fdcopy(p1);
166 	/*
167 	 * If p_limit is still copy-on-write, bump refcnt,
168 	 * otherwise get a copy that won't be modified.
169 	 * (If PL_SHAREMOD is clear, the structure is shared
170 	 * copy-on-write.)
171 	 */
172 	if (p1->p_limit->p_lflags & PL_SHAREMOD)
173 		p2->p_limit = limcopy(p1->p_limit);
174 	else {
175 		p2->p_limit = p1->p_limit;
176 		p2->p_limit->p_refcnt++;
177 	}
178 
179 	p2->p_flag = SLOAD | (p1->p_flag & SHPUX);
180 	if (p1->p_session->s_ttyvp != NULL && p1->p_flag & SCTTY)
181 		p2->p_flag |= SCTTY;
182 	if (isvfork)
183 		p2->p_flag |= SPPWAIT;
184 	p2->p_stat = SIDL;
185 	p2->p_pid = nextpid;
186 	{
187 	struct proc **hash = &pidhash[PIDHASH(p2->p_pid)];
188 
189 	p2->p_hash = *hash;
190 	*hash = p2;
191 	}
192 	p2->p_pgrpnxt = p1->p_pgrpnxt;
193 	p1->p_pgrpnxt = p2;
194 	p2->p_pptr = p1;
195 	p2->p_osptr = p1->p_cptr;
196 	if (p1->p_cptr)
197 		p1->p_cptr->p_ysptr = p2;
198 	p1->p_cptr = p2;
199 #ifdef KTRACE
200 	/*
201 	 * Copy traceflag and tracefile if enabled.
202 	 * If not inherited, these were zeroed above.
203 	 */
204 	if (p1->p_traceflag&KTRFAC_INHERIT) {
205 		p2->p_traceflag = p1->p_traceflag;
206 		if ((p2->p_tracep = p1->p_tracep) != NULL)
207 			VREF(p2->p_tracep);
208 	}
209 #endif
210 
211 #if defined(tahoe)
212 	p2->p_vmspace->p_ckey = p1->p_vmspace->p_ckey; /* XXX move this */
213 #endif
214 
215 	/*
216 	 * This begins the section where we must prevent the parent
217 	 * from being swapped.
218 	 */
219 	p1->p_flag |= SKEEP;
220 	/*
221 	 * Set return values for child before vm_fork,
222 	 * so they can be copied to child stack.
223 	 * We return parent pid, and mark as child in retval[1].
224 	 * NOTE: the kernel stack may be at a different location in the child
225 	 * process, and thus addresses of automatic variables (including retval)
226 	 * may be invalid after vm_fork returns in the child process.
227 	 */
228 	retval[0] = p1->p_pid;
229 	retval[1] = 1;
230 	if (vm_fork(p1, p2, isvfork)) {
231 		/*
232 		 * Child process.  Set start time and get to work.
233 		 */
234 		(void) splclock();
235 		p2->p_stats->p_start = time;
236 		(void) spl0();
237 		p2->p_acflag = AFORK;
238 		return (0);
239 	}
240 
241 	/*
242 	 * Make child runnable and add to run queue.
243 	 */
244 	(void) splhigh();
245 	p2->p_stat = SRUN;
246 	setrq(p2);
247 	(void) spl0();
248 
249 	/*
250 	 * Now can be swapped.
251 	 */
252 	p1->p_flag &= ~SKEEP;
253 
254 	/*
255 	 * Preserve synchronization semantics of vfork.
256 	 * If waiting for child to exec or exit, set SPPWAIT
257 	 * on child, and sleep on our proc (in case of exit).
258 	 */
259 	if (isvfork)
260 		while (p2->p_flag & SPPWAIT)
261 			tsleep((caddr_t)p1, PWAIT, "ppwait", 0);
262 
263 	/*
264 	 * Return child pid to parent process,
265 	 * marking us as parent via retval[1].
266 	 */
267 	retval[0] = p2->p_pid;
268 	retval[1] = 0;
269 	return (0);
270 }
271