xref: /original-bsd/sys/kern/kern_fork.c (revision cd89438c)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)kern_fork.c	7.26 (Berkeley) 04/20/91
8  */
9 
10 #include "param.h"
11 #include "systm.h"
12 #include "map.h"
13 #include "filedesc.h"
14 #include "kernel.h"
15 #include "malloc.h"
16 #include "proc.h"
17 #include "resourcevar.h"
18 #include "vnode.h"
19 #include "seg.h"
20 #include "file.h"
21 #include "acct.h"
22 #include "ktrace.h"
23 
24 /* ARGSUSED */
25 fork(p, uap, retval)
26 	struct proc *p;
27 	struct args *uap;
28 	int retval[];
29 {
30 
31 	return (fork1(p, 0, retval));
32 }
33 
34 /* ARGSUSED */
35 vfork(p, uap, retval)
36 	struct proc *p;
37 	struct args *uap;
38 	int retval[];
39 {
40 
41 	return (fork1(p, 1, retval));
42 }
43 
44 int	nprocs = 1;		/* process 0 */
45 
46 fork1(p1, isvfork, retval)
47 	register struct proc *p1;
48 	int isvfork, retval[];
49 {
50 	register struct proc *p2;
51 	register int count, uid;
52 	static int nextpid, pidchecked = 0;
53 
54 	count = 0;
55 	if ((uid = p1->p_ucred->cr_uid) != 0) {
56 		for (p2 = allproc; p2; p2 = p2->p_nxt)
57 			if (p2->p_ucred->cr_uid == uid)
58 				count++;
59 		for (p2 = zombproc; p2; p2 = p2->p_nxt)
60 			if (p2->p_ucred->cr_uid == uid)
61 				count++;
62 	}
63 	/*
64 	 * Although process entries are dynamically entries,
65 	 * we still keep a global limit on the maximum number
66 	 * we will create.  Don't allow a nonprivileged user
67 	 * to exceed its current limit or to bring us within one
68 	 * of the global limit; don't let root exceed the limit.
69 	 * nprocs is the current number of processes,
70 	 * maxproc is the limit.
71 	 */
72 	if (nprocs >= maxproc || uid == 0 && nprocs >= maxproc + 1) {
73 		tablefull("proc");
74 		return (EAGAIN);
75 	}
76 	if (count > p1->p_rlimit[RLIMIT_NPROC].rlim_cur)
77 		return (EAGAIN);
78 
79 	/*
80 	 * Find an unused process ID.
81 	 * We remember a range of unused IDs ready to use
82 	 * (from nextpid+1 through pidchecked-1).
83 	 */
84 	nextpid++;
85 retry:
86 	/*
87 	 * If the process ID prototype has wrapped around,
88 	 * restart somewhat above 0, as the low-numbered procs
89 	 * tend to include daemons that don't exit.
90 	 */
91 	if (nextpid >= PID_MAX) {
92 		nextpid = 100;
93 		pidchecked = 0;
94 	}
95 	if (nextpid >= pidchecked) {
96 		int doingzomb = 0;
97 
98 		pidchecked = PID_MAX;
99 		/*
100 		 * Scan the active and zombie procs to check whether this pid
101 		 * is in use.  Remember the lowest pid that's greater
102 		 * than nextpid, so we can avoid checking for a while.
103 		 */
104 		p2 = allproc;
105 again:
106 		for (; p2 != NULL; p2 = p2->p_nxt) {
107 			if (p2->p_pid == nextpid ||
108 			    p2->p_pgrp->pg_id == nextpid) {
109 				nextpid++;
110 				if (nextpid >= pidchecked)
111 					goto retry;
112 			}
113 			if (p2->p_pid > nextpid && pidchecked > p2->p_pid)
114 				pidchecked = p2->p_pid;
115 			if (p2->p_pgrp->pg_id > nextpid &&
116 			    pidchecked > p2->p_pgrp->pg_id)
117 				pidchecked = p2->p_pgrp->pg_id;
118 		}
119 		if (!doingzomb) {
120 			doingzomb = 1;
121 			p2 = zombproc;
122 			goto again;
123 		}
124 	}
125 
126 
127 	/*
128 	 * Allocate new proc.
129 	 * Link onto allproc (this should probably be delayed).
130 	 */
131 	MALLOC(p2, struct proc *, sizeof(struct proc), M_PROC, M_WAITOK);
132 	nprocs++;
133 	p2->p_nxt = allproc;
134 	p2->p_nxt->p_prev = &p2->p_nxt;		/* allproc is never NULL */
135 	p2->p_prev = &allproc;
136 	allproc = p2;
137 	p2->p_link = NULL;			/* shouldn't be necessary */
138 	p2->p_rlink = NULL;			/* shouldn't be necessary */
139 
140 	/*
141 	 * Make a proc table entry for the new process.
142 	 * Start by zeroing the section of proc that is zero-initialized,
143 	 * then copy the section that is copied directly from the parent.
144 	 */
145 	bzero(&p2->p_startzero,
146 	    (unsigned) ((caddr_t)&p2->p_endzero - (caddr_t)&p2->p_startzero));
147 	bcopy(&p1->p_startcopy, &p2->p_startcopy,
148 	    (unsigned) ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy));
149 
150 	/*
151 	 * Duplicate sub-structures as needed.
152 	 * Increase reference counts on shared objects.
153 	 * The p_stats and p_sigacts substructs are set in vm_fork.
154 	 */
155 	MALLOC(p2->p_cred, struct pcred *, sizeof(struct pcred),
156 	    M_SUBPROC, M_WAITOK);
157 	bcopy(p1->p_cred, p2->p_cred, sizeof(*p2->p_cred));
158 	crhold(p1->p_ucred);
159 
160 	p2->p_fd = fdcopy(p1);
161 	/*
162 	 * If p_limit is still copy-on-write, bump refcnt,
163 	 * otherwise get a copy that won't be modified.
164 	 * (If PL_SHAREMOD is clear, the structure is shared
165 	 * copy-on-write.)
166 	 */
167 	if (p1->p_limit->p_lflags & PL_SHAREMOD)
168 		p2->p_limit = limcopy(p1->p_limit);
169 	else {
170 		p2->p_limit = p1->p_limit;
171 		p2->p_limit->p_refcnt++;
172 	}
173 
174 	p2->p_flag = SLOAD | (p1->p_flag & SHPUX);
175 	if (p1->p_session->s_ttyvp != NULL && p1->p_flag & SCTTY)
176 		p2->p_flag |= SCTTY;
177 	if (isvfork)
178 		p2->p_flag |= SPPWAIT;
179 	p2->p_stat = SIDL;
180 	p2->p_pid = nextpid;
181 	{
182 	struct proc **hash = &pidhash[PIDHASH(p2->p_pid)];
183 
184 	p2->p_hash = *hash;
185 	*hash = p2;
186 	}
187 	p2->p_pgrpnxt = p1->p_pgrpnxt;
188 	p1->p_pgrpnxt = p2;
189 	p2->p_pptr = p1;
190 	p2->p_osptr = p1->p_cptr;
191 	if (p1->p_cptr)
192 		p1->p_cptr->p_ysptr = p2;
193 	p1->p_cptr = p2;
194 #ifdef KTRACE
195 	/*
196 	 * Copy traceflag and tracefile if enabled.
197 	 * If not inherited, these were zeroed above.
198 	 */
199 	if (p1->p_traceflag&KTRFAC_INHERIT) {
200 		p2->p_traceflag = p1->p_traceflag;
201 		if ((p2->p_tracep = p1->p_tracep) != NULL)
202 			VREF(p2->p_tracep);
203 	}
204 #endif
205 
206 	p2->p_regs = p1->p_regs;		 /* XXX move this */
207 #if defined(tahoe)
208 	p2->p_vmspace->p_ckey = p1->p_vmspace->p_ckey; /* XXX move this */
209 #endif
210 
211 	/*
212 	 * This begins the section where we must prevent the parent
213 	 * from being swapped.
214 	 */
215 	p1->p_flag |= SKEEP;
216 	/*
217 	 * Set return values for child before vm_fork,
218 	 * so they can be copied to child stack.
219 	 * We return parent pid, and mark as child in retval[1].
220 	 */
221 	retval[0] = p1->p_pid;
222 	retval[1] = 1;
223 	if (vm_fork(p1, p2, isvfork)) {
224 		/*
225 		 * Child process.  Set start time and get to work.
226 		 */
227 		(void) splclock();
228 		p2->p_stats->p_start = time;
229 		(void) spl0();
230 		p2->p_acflag = AFORK;
231 		return (0);
232 	}
233 
234 	/*
235 	 * Make child runnable and add to run queue.
236 	 */
237 	(void) splhigh();
238 	p2->p_stat = SRUN;
239 	setrq(p2);
240 	(void) spl0();
241 
242 	/*
243 	 * Now can be swapped.
244 	 */
245 	p1->p_flag &= ~SKEEP;
246 
247 	/*
248 	 * XXX preserve synchronization semantics of vfork
249 	 * If waiting for child to exec or exit, set SPPWAIT
250 	 * on child, and sleep on our proc (in case of exit).
251 	 */
252 	if (isvfork)
253 		while (p2->p_flag & SPPWAIT)
254 			tsleep((caddr_t)p1, PWAIT, "ppwait", 0);
255 
256 	/*
257 	 * Return child pid to parent process,
258 	 * marking us as parent via retval[1].
259 	 */
260 	retval[0] = p2->p_pid;
261 	retval[1] = 0;
262 	return (0);
263 }
264