1 /* $OpenBSD: kern_fork.c,v 1.260 2024/06/03 12:48:25 claudio Exp $ */
2 /* $NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $ */
3
4 /*
5 * Copyright (c) 1982, 1986, 1989, 1991, 1993
6 * The Regents of the University of California. All rights reserved.
7 * (c) UNIX System Laboratories, Inc.
8 * All or some portions of this file are derived from material licensed
9 * to the University of California by American Telephone and Telegraph
10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 * the permission of UNIX System Laboratories, Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94
38 */
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/filedesc.h>
43 #include <sys/malloc.h>
44 #include <sys/mount.h>
45 #include <sys/proc.h>
46 #include <sys/resourcevar.h>
47 #include <sys/signalvar.h>
48 #include <sys/vnode.h>
49 #include <sys/vmmeter.h>
50 #include <sys/acct.h>
51 #include <sys/ktrace.h>
52 #include <sys/sched.h>
53 #include <sys/smr.h>
54 #include <sys/sysctl.h>
55 #include <sys/pool.h>
56 #include <sys/mman.h>
57 #include <sys/ptrace.h>
58 #include <sys/atomic.h>
59 #include <sys/unistd.h>
60 #include <sys/tracepoint.h>
61
62 #include <sys/syscallargs.h>
63
64 #include <uvm/uvm.h>
65 #include <machine/tcb.h>
66
67 int nprocesses = 1; /* process 0 */
68 int nthreads = 1; /* proc 0 */
69 struct forkstat forkstat;
70
71 void fork_return(void *);
72 pid_t alloctid(void);
73 pid_t allocpid(void);
74 int ispidtaken(pid_t);
75
76 void unveil_copy(struct process *parent, struct process *child);
77
78 struct proc *thread_new(struct proc *_parent, vaddr_t _uaddr);
79 struct process *process_new(struct proc *, struct process *, int);
80 int fork_check_maxthread(uid_t _uid);
81
82 void
fork_return(void * arg)83 fork_return(void *arg)
84 {
85 struct proc *p = (struct proc *)arg;
86
87 if (p->p_p->ps_flags & PS_TRACED)
88 psignal(p, SIGTRAP);
89
90 child_return(p);
91 }
92
93 int
sys_fork(struct proc * p,void * v,register_t * retval)94 sys_fork(struct proc *p, void *v, register_t *retval)
95 {
96 void (*func)(void *) = child_return;
97 int flags;
98
99 flags = FORK_FORK;
100 if (p->p_p->ps_ptmask & PTRACE_FORK) {
101 flags |= FORK_PTRACE;
102 func = fork_return;
103 }
104 return fork1(p, flags, func, NULL, retval, NULL);
105 }
106
107 int
sys_vfork(struct proc * p,void * v,register_t * retval)108 sys_vfork(struct proc *p, void *v, register_t *retval)
109 {
110 return fork1(p, FORK_VFORK|FORK_PPWAIT, child_return, NULL,
111 retval, NULL);
112 }
113
114 int
sys___tfork(struct proc * p,void * v,register_t * retval)115 sys___tfork(struct proc *p, void *v, register_t *retval)
116 {
117 struct sys___tfork_args /* {
118 syscallarg(const struct __tfork) *param;
119 syscallarg(size_t) psize;
120 } */ *uap = v;
121 size_t psize = SCARG(uap, psize);
122 struct __tfork param = { 0 };
123 int error;
124
125 if (psize == 0 || psize > sizeof(param))
126 return EINVAL;
127 if ((error = copyin(SCARG(uap, param), ¶m, psize)))
128 return error;
129 #ifdef KTRACE
130 if (KTRPOINT(p, KTR_STRUCT))
131 ktrstruct(p, "tfork", ¶m, sizeof(param));
132 #endif
133 #ifdef TCB_INVALID
134 if (TCB_INVALID(param.tf_tcb))
135 return EINVAL;
136 #endif /* TCB_INVALID */
137
138 return thread_fork(p, param.tf_stack, param.tf_tcb, param.tf_tid,
139 retval);
140 }
141
142 /*
143 * Allocate and initialize a thread (proc) structure, given the parent thread.
144 */
145 struct proc *
thread_new(struct proc * parent,vaddr_t uaddr)146 thread_new(struct proc *parent, vaddr_t uaddr)
147 {
148 struct proc *p;
149
150 p = pool_get(&proc_pool, PR_WAITOK);
151 p->p_stat = SIDL; /* protect against others */
152 p->p_runpri = 0;
153 p->p_flag = 0;
154
155 /*
156 * Make a proc table entry for the new process.
157 * Start by zeroing the section of proc that is zero-initialized,
158 * then copy the section that is copied directly from the parent.
159 */
160 memset(&p->p_startzero, 0,
161 (caddr_t)&p->p_endzero - (caddr_t)&p->p_startzero);
162 memcpy(&p->p_startcopy, &parent->p_startcopy,
163 (caddr_t)&p->p_endcopy - (caddr_t)&p->p_startcopy);
164 crhold(p->p_ucred);
165 p->p_addr = (struct user *)uaddr;
166
167 /*
168 * Initialize the timeouts.
169 */
170 timeout_set(&p->p_sleep_to, endtsleep, p);
171
172 return p;
173 }
174
175 /*
176 * Initialize common bits of a process structure, given the initial thread.
177 */
178 void
process_initialize(struct process * pr,struct proc * p)179 process_initialize(struct process *pr, struct proc *p)
180 {
181 /* initialize the thread links */
182 pr->ps_mainproc = p;
183 TAILQ_INIT(&pr->ps_threads);
184 TAILQ_INSERT_TAIL(&pr->ps_threads, p, p_thr_link);
185 pr->ps_threadcnt = 1;
186 p->p_p = pr;
187
188 /* give the process the same creds as the initial thread */
189 pr->ps_ucred = p->p_ucred;
190 crhold(pr->ps_ucred);
191 /* new thread and new process */
192 KASSERT(p->p_ucred->cr_refcnt.r_refs >= 2);
193
194 LIST_INIT(&pr->ps_children);
195 LIST_INIT(&pr->ps_orphans);
196 LIST_INIT(&pr->ps_ftlist);
197 LIST_INIT(&pr->ps_sigiolst);
198 TAILQ_INIT(&pr->ps_tslpqueue);
199
200 rw_init(&pr->ps_lock, "pslock");
201 mtx_init(&pr->ps_mtx, IPL_HIGH);
202
203 timeout_set_flags(&pr->ps_realit_to, realitexpire, pr,
204 KCLOCK_UPTIME, 0);
205 timeout_set(&pr->ps_rucheck_to, rucheck, pr);
206 }
207
208
209 /*
210 * Allocate and initialize a new process.
211 */
212 struct process *
process_new(struct proc * p,struct process * parent,int flags)213 process_new(struct proc *p, struct process *parent, int flags)
214 {
215 struct process *pr;
216
217 pr = pool_get(&process_pool, PR_WAITOK);
218
219 /*
220 * Make a process structure for the new process.
221 * Start by zeroing the section of proc that is zero-initialized,
222 * then copy the section that is copied directly from the parent.
223 */
224 memset(&pr->ps_startzero, 0,
225 (caddr_t)&pr->ps_endzero - (caddr_t)&pr->ps_startzero);
226 memcpy(&pr->ps_startcopy, &parent->ps_startcopy,
227 (caddr_t)&pr->ps_endcopy - (caddr_t)&pr->ps_startcopy);
228
229 process_initialize(pr, p);
230 pr->ps_pid = allocpid();
231 lim_fork(parent, pr);
232
233 /* post-copy fixups */
234 pr->ps_pptr = parent;
235 pr->ps_ppid = parent->ps_pid;
236
237 /* bump references to the text vnode (for sysctl) */
238 pr->ps_textvp = parent->ps_textvp;
239 if (pr->ps_textvp)
240 vref(pr->ps_textvp);
241
242 /* copy unveil if unveil is active */
243 unveil_copy(parent, pr);
244
245 pr->ps_flags = parent->ps_flags &
246 (PS_SUGID | PS_SUGIDEXEC | PS_PLEDGE | PS_EXECPLEDGE |
247 PS_WXNEEDED | PS_CHROOT);
248 if (parent->ps_session->s_ttyvp != NULL)
249 pr->ps_flags |= parent->ps_flags & PS_CONTROLT;
250
251 if (parent->ps_pin.pn_pins) {
252 pr->ps_pin.pn_pins = mallocarray(parent->ps_pin.pn_npins,
253 sizeof(u_int), M_PINSYSCALL, M_WAITOK);
254 memcpy(pr->ps_pin.pn_pins, parent->ps_pin.pn_pins,
255 parent->ps_pin.pn_npins * sizeof(u_int));
256 pr->ps_flags |= PS_PIN;
257 }
258 if (parent->ps_libcpin.pn_pins) {
259 pr->ps_libcpin.pn_pins = mallocarray(parent->ps_libcpin.pn_npins,
260 sizeof(u_int), M_PINSYSCALL, M_WAITOK);
261 memcpy(pr->ps_libcpin.pn_pins, parent->ps_libcpin.pn_pins,
262 parent->ps_libcpin.pn_npins * sizeof(u_int));
263 pr->ps_flags |= PS_LIBCPIN;
264 }
265
266 /*
267 * Duplicate sub-structures as needed.
268 * Increase reference counts on shared objects.
269 */
270 if (flags & FORK_SHAREFILES)
271 pr->ps_fd = fdshare(parent);
272 else
273 pr->ps_fd = fdcopy(parent);
274 pr->ps_sigacts = sigactsinit(parent);
275 if (flags & FORK_SHAREVM)
276 pr->ps_vmspace = uvmspace_share(parent);
277 else
278 pr->ps_vmspace = uvmspace_fork(parent);
279
280 if (parent->ps_flags & PS_PROFIL)
281 startprofclock(pr);
282 if (flags & FORK_PTRACE)
283 pr->ps_flags |= parent->ps_flags & PS_TRACED;
284 if (flags & FORK_NOZOMBIE)
285 pr->ps_flags |= PS_NOZOMBIE;
286 if (flags & FORK_SYSTEM)
287 pr->ps_flags |= PS_SYSTEM;
288
289 /* mark as embryo to protect against others */
290 pr->ps_flags |= PS_EMBRYO;
291
292 /* Force visibility of all of the above changes */
293 membar_producer();
294
295 /* it's sufficiently inited to be globally visible */
296 LIST_INSERT_HEAD(&allprocess, pr, ps_list);
297
298 return pr;
299 }
300
301 /* print the 'table full' message once per 10 seconds */
302 struct timeval fork_tfmrate = { 10, 0 };
303
304 int
fork_check_maxthread(uid_t uid)305 fork_check_maxthread(uid_t uid)
306 {
307 /*
308 * Although process entries are dynamically created, we still keep
309 * a global limit on the maximum number we will create. We reserve
310 * the last 5 processes to root. The variable nprocesses is the
311 * current number of processes, maxprocess is the limit. Similar
312 * rules for threads (struct proc): we reserve the last 5 to root;
313 * the variable nthreads is the current number of procs, maxthread is
314 * the limit.
315 */
316 if ((nthreads >= maxthread - 5 && uid != 0) || nthreads >= maxthread) {
317 static struct timeval lasttfm;
318
319 if (ratecheck(&lasttfm, &fork_tfmrate))
320 tablefull("thread");
321 return EAGAIN;
322 }
323 nthreads++;
324
325 return 0;
326 }
327
328 static inline void
fork_thread_start(struct proc * p,struct proc * parent,int flags)329 fork_thread_start(struct proc *p, struct proc *parent, int flags)
330 {
331 struct cpu_info *ci;
332
333 SCHED_LOCK();
334 ci = sched_choosecpu_fork(parent, flags);
335 TRACEPOINT(sched, fork, p->p_tid + THREAD_PID_OFFSET,
336 p->p_p->ps_pid, CPU_INFO_UNIT(ci));
337 setrunqueue(ci, p, p->p_usrpri);
338 SCHED_UNLOCK();
339 }
340
341 int
fork1(struct proc * curp,int flags,void (* func)(void *),void * arg,register_t * retval,struct proc ** rnewprocp)342 fork1(struct proc *curp, int flags, void (*func)(void *), void *arg,
343 register_t *retval, struct proc **rnewprocp)
344 {
345 struct process *curpr = curp->p_p;
346 struct process *pr;
347 struct proc *p;
348 uid_t uid = curp->p_ucred->cr_ruid;
349 struct vmspace *vm;
350 int count;
351 vaddr_t uaddr;
352 int error;
353 struct ptrace_state *newptstat = NULL;
354
355 KASSERT((flags & ~(FORK_FORK | FORK_VFORK | FORK_PPWAIT | FORK_PTRACE
356 | FORK_IDLE | FORK_SHAREVM | FORK_SHAREFILES | FORK_NOZOMBIE
357 | FORK_SYSTEM)) == 0);
358 KASSERT(func != NULL);
359
360 if ((error = fork_check_maxthread(uid)))
361 return error;
362
363 if ((nprocesses >= maxprocess - 5 && uid != 0) ||
364 nprocesses >= maxprocess) {
365 static struct timeval lasttfm;
366
367 if (ratecheck(&lasttfm, &fork_tfmrate))
368 tablefull("process");
369 nthreads--;
370 return EAGAIN;
371 }
372 nprocesses++;
373
374 /*
375 * Increment the count of processes running with this uid.
376 * Don't allow a nonprivileged user to exceed their current limit.
377 */
378 count = chgproccnt(uid, 1);
379 if (uid != 0 && count > lim_cur(RLIMIT_NPROC)) {
380 (void)chgproccnt(uid, -1);
381 nprocesses--;
382 nthreads--;
383 return EAGAIN;
384 }
385
386 uaddr = uvm_uarea_alloc();
387 if (uaddr == 0) {
388 (void)chgproccnt(uid, -1);
389 nprocesses--;
390 nthreads--;
391 return (ENOMEM);
392 }
393
394 /*
395 * From now on, we're committed to the fork and cannot fail.
396 */
397 p = thread_new(curp, uaddr);
398 pr = process_new(p, curpr, flags);
399
400 p->p_fd = pr->ps_fd;
401 p->p_vmspace = pr->ps_vmspace;
402 if (pr->ps_flags & PS_SYSTEM)
403 atomic_setbits_int(&p->p_flag, P_SYSTEM);
404
405 if (flags & FORK_PPWAIT) {
406 atomic_setbits_int(&pr->ps_flags, PS_PPWAIT);
407 atomic_setbits_int(&curpr->ps_flags, PS_ISPWAIT);
408 }
409
410 #ifdef KTRACE
411 /*
412 * Copy traceflag and tracefile if enabled.
413 * If not inherited, these were zeroed above.
414 */
415 if (curpr->ps_traceflag & KTRFAC_INHERIT)
416 ktrsettrace(pr, curpr->ps_traceflag, curpr->ps_tracevp,
417 curpr->ps_tracecred);
418 #endif
419
420 /*
421 * Finish creating the child thread. cpu_fork() will copy
422 * and update the pcb and make the child ready to run. If
423 * this is a normal user fork, the child will exit directly
424 * to user mode via child_return() on its first time slice
425 * and will not return here. If this is a kernel thread,
426 * the specified entry point will be executed.
427 */
428 cpu_fork(curp, p, NULL, NULL, func, arg ? arg : p);
429
430 vm = pr->ps_vmspace;
431
432 if (flags & FORK_FORK) {
433 forkstat.cntfork++;
434 forkstat.sizfork += vm->vm_dsize + vm->vm_ssize;
435 } else if (flags & FORK_VFORK) {
436 forkstat.cntvfork++;
437 forkstat.sizvfork += vm->vm_dsize + vm->vm_ssize;
438 } else {
439 forkstat.cntkthread++;
440 }
441
442 if (pr->ps_flags & PS_TRACED && flags & FORK_FORK)
443 newptstat = malloc(sizeof(*newptstat), M_SUBPROC, M_WAITOK);
444
445 p->p_tid = alloctid();
446
447 LIST_INSERT_HEAD(&allproc, p, p_list);
448 LIST_INSERT_HEAD(TIDHASH(p->p_tid), p, p_hash);
449 LIST_INSERT_HEAD(PIDHASH(pr->ps_pid), pr, ps_hash);
450 LIST_INSERT_AFTER(curpr, pr, ps_pglist);
451 LIST_INSERT_HEAD(&curpr->ps_children, pr, ps_sibling);
452
453 if (pr->ps_flags & PS_TRACED) {
454 pr->ps_oppid = curpr->ps_pid;
455 process_reparent(pr, curpr->ps_pptr);
456
457 /*
458 * Set ptrace status.
459 */
460 if (newptstat != NULL) {
461 pr->ps_ptstat = newptstat;
462 newptstat = NULL;
463 curpr->ps_ptstat->pe_report_event = PTRACE_FORK;
464 pr->ps_ptstat->pe_report_event = PTRACE_FORK;
465 curpr->ps_ptstat->pe_other_pid = pr->ps_pid;
466 pr->ps_ptstat->pe_other_pid = curpr->ps_pid;
467 }
468 }
469
470 /*
471 * For new processes, set accounting bits and mark as complete.
472 */
473 nanouptime(&pr->ps_start);
474 pr->ps_acflag = AFORK;
475 atomic_clearbits_int(&pr->ps_flags, PS_EMBRYO);
476
477 if ((flags & FORK_IDLE) == 0)
478 fork_thread_start(p, curp, flags);
479 else
480 p->p_cpu = arg;
481
482 free(newptstat, M_SUBPROC, sizeof(*newptstat));
483
484 /*
485 * Notify any interested parties about the new process.
486 */
487 knote_locked(&curpr->ps_klist, NOTE_FORK | pr->ps_pid);
488
489 /*
490 * Update stats now that we know the fork was successful.
491 */
492 uvmexp.forks++;
493 if (flags & FORK_PPWAIT)
494 uvmexp.forks_ppwait++;
495 if (flags & FORK_SHAREVM)
496 uvmexp.forks_sharevm++;
497
498 /*
499 * Pass a pointer to the new process to the caller.
500 */
501 if (rnewprocp != NULL)
502 *rnewprocp = p;
503
504 /*
505 * Preserve synchronization semantics of vfork. If waiting for
506 * child to exec or exit, set PS_PPWAIT on child and PS_ISPWAIT
507 * on ourselves, and sleep on our process for the latter flag
508 * to go away.
509 * XXX Need to stop other rthreads in the parent
510 */
511 if (flags & FORK_PPWAIT)
512 while (curpr->ps_flags & PS_ISPWAIT)
513 tsleep_nsec(curpr, PWAIT, "ppwait", INFSLP);
514
515 /*
516 * If we're tracing the child, alert the parent too.
517 */
518 if ((flags & FORK_PTRACE) && (curpr->ps_flags & PS_TRACED))
519 psignal(curp, SIGTRAP);
520
521 /*
522 * Return child pid to parent process
523 */
524 if (retval != NULL)
525 *retval = pr->ps_pid;
526 return (0);
527 }
528
529 int
thread_fork(struct proc * curp,void * stack,void * tcb,pid_t * tidptr,register_t * retval)530 thread_fork(struct proc *curp, void *stack, void *tcb, pid_t *tidptr,
531 register_t *retval)
532 {
533 struct process *pr = curp->p_p;
534 struct proc *p;
535 pid_t tid;
536 vaddr_t uaddr;
537 int error;
538
539 if (stack == NULL)
540 return EINVAL;
541
542 if ((error = fork_check_maxthread(curp->p_ucred->cr_ruid)))
543 return error;
544
545 uaddr = uvm_uarea_alloc();
546 if (uaddr == 0) {
547 nthreads--;
548 return ENOMEM;
549 }
550
551 /*
552 * From now on, we're committed to the fork and cannot fail.
553 */
554 p = thread_new(curp, uaddr);
555 atomic_setbits_int(&p->p_flag, P_THREAD);
556 sigstkinit(&p->p_sigstk);
557 memset(p->p_name, 0, sizeof p->p_name);
558
559 /* other links */
560 p->p_p = pr;
561
562 /* local copies */
563 p->p_fd = pr->ps_fd;
564 p->p_vmspace = pr->ps_vmspace;
565
566 /*
567 * Finish creating the child thread. cpu_fork() will copy
568 * and update the pcb and make the child ready to run. The
569 * child will exit directly to user mode via child_return()
570 * on its first time slice and will not return here.
571 */
572 cpu_fork(curp, p, stack, tcb, child_return, p);
573
574 p->p_tid = alloctid();
575
576 LIST_INSERT_HEAD(&allproc, p, p_list);
577 LIST_INSERT_HEAD(TIDHASH(p->p_tid), p, p_hash);
578
579 mtx_enter(&pr->ps_mtx);
580 TAILQ_INSERT_TAIL(&pr->ps_threads, p, p_thr_link);
581 pr->ps_threadcnt++;
582
583 /*
584 * if somebody else wants to take us to single threaded mode,
585 * count ourselves in.
586 */
587 if (pr->ps_single) {
588 pr->ps_singlecnt++;
589 atomic_setbits_int(&p->p_flag, P_SUSPSINGLE);
590 }
591 mtx_leave(&pr->ps_mtx);
592
593 /*
594 * Return tid to parent thread and copy it out to userspace
595 */
596 *retval = tid = p->p_tid + THREAD_PID_OFFSET;
597 if (tidptr != NULL) {
598 if (copyout(&tid, tidptr, sizeof(tid)))
599 psignal(curp, SIGSEGV);
600 }
601
602 fork_thread_start(p, curp, 0);
603
604 /*
605 * Update stats now that we know the fork was successful.
606 */
607 forkstat.cnttfork++;
608 uvmexp.forks++;
609 uvmexp.forks_sharevm++;
610
611 return 0;
612 }
613
614
615 /* Find an unused tid */
616 pid_t
alloctid(void)617 alloctid(void)
618 {
619 pid_t tid;
620
621 do {
622 /* (0 .. TID_MASK+1] */
623 tid = 1 + (arc4random() & TID_MASK);
624 } while (tfind(tid) != NULL);
625
626 return (tid);
627 }
628
629 /*
630 * Checks for current use of a pid, either as a pid or pgid.
631 */
632 pid_t oldpids[128];
633 int
ispidtaken(pid_t pid)634 ispidtaken(pid_t pid)
635 {
636 uint32_t i;
637
638 for (i = 0; i < nitems(oldpids); i++)
639 if (pid == oldpids[i])
640 return (1);
641
642 if (prfind(pid) != NULL)
643 return (1);
644 if (pgfind(pid) != NULL)
645 return (1);
646 if (zombiefind(pid) != NULL)
647 return (1);
648 return (0);
649 }
650
651 /* Find an unused pid */
652 pid_t
allocpid(void)653 allocpid(void)
654 {
655 static int first = 1;
656 pid_t pid;
657
658 /* The first PID allocated is always 1. */
659 if (first) {
660 first = 0;
661 return 1;
662 }
663
664 /*
665 * All subsequent PIDs are chosen randomly. We need to
666 * find an unused PID in the range [2, PID_MAX].
667 */
668 do {
669 pid = 2 + arc4random_uniform(PID_MAX - 1);
670 } while (ispidtaken(pid));
671 return pid;
672 }
673
674 void
freepid(pid_t pid)675 freepid(pid_t pid)
676 {
677 static uint32_t idx;
678
679 oldpids[idx++ % nitems(oldpids)] = pid;
680 }
681
682 /* Do machine independent parts of switching to a new process */
683 void
proc_trampoline_mi(void)684 proc_trampoline_mi(void)
685 {
686 struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
687 struct proc *p = curproc;
688
689 SCHED_ASSERT_LOCKED();
690 clear_resched(curcpu());
691 mtx_leave(&sched_lock);
692 spl0();
693
694 SCHED_ASSERT_UNLOCKED();
695 KERNEL_ASSERT_UNLOCKED();
696 assertwaitok();
697 smr_idle();
698
699 /* Start any optional clock interrupts needed by the thread. */
700 if (ISSET(p->p_p->ps_flags, PS_ITIMER)) {
701 atomic_setbits_int(&spc->spc_schedflags, SPCF_ITIMER);
702 clockintr_advance(&spc->spc_itimer, hardclock_period);
703 }
704 if (ISSET(p->p_p->ps_flags, PS_PROFIL)) {
705 atomic_setbits_int(&spc->spc_schedflags, SPCF_PROFCLOCK);
706 clockintr_advance(&spc->spc_profclock, profclock_period);
707 }
708
709 nanouptime(&spc->spc_runtime);
710 KERNEL_LOCK();
711 }
712