xref: /dragonfly/sys/kern/kern_proc.c (revision 7d3e9a5b)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/sysctl.h>
34 #include <sys/malloc.h>
35 #include <sys/proc.h>
36 #include <sys/vnode.h>
37 #include <sys/jail.h>
38 #include <sys/filedesc.h>
39 #include <sys/tty.h>
40 #include <sys/dsched.h>
41 #include <sys/signalvar.h>
42 #include <sys/spinlock.h>
43 #include <sys/random.h>
44 #include <sys/exec.h>
45 #include <vm/vm.h>
46 #include <sys/lock.h>
47 #include <sys/kinfo.h>
48 #include <vm/pmap.h>
49 #include <vm/vm_map.h>
50 #include <machine/smp.h>
51 
52 #include <sys/refcount.h>
53 #include <sys/spinlock2.h>
54 
55 /*
56  * Hash table size must be a power of two and is not currently dynamically
57  * sized.  There is a trade-off between the linear scans which must iterate
58  * all HSIZE elements and the number of elements which might accumulate
59  * within each hash chain.
60  */
61 #define ALLPROC_HSIZE	256
62 #define ALLPROC_HMASK	(ALLPROC_HSIZE - 1)
63 #define ALLPROC_HASH(pid)	(pid & ALLPROC_HMASK)
64 #define PGRP_HASH(pid)	(pid & ALLPROC_HMASK)
65 #define SESS_HASH(pid)	(pid & ALLPROC_HMASK)
66 
67 /*
68  * pid_doms[] management, used to control how quickly a PID can be recycled.
69  * Must be a multiple of ALLPROC_HSIZE for the proc_makepid() inner loops.
70  *
71  * WARNING! PIDDOM_DELAY should not be defined > 20 or so unless you change
72  *	    the array from int8_t's to int16_t's.
73  */
74 #define PIDDOM_COUNT	10	/* 10 pids per domain - reduce array size */
75 #define PIDDOM_DELAY	10	/* min 10 seconds after exit before reuse */
76 #define PIDDOM_SCALE	10	/* (10,000*SCALE)/sec performance guarantee */
77 #define PIDSEL_DOMAINS	rounddown(PID_MAX * PIDDOM_SCALE / PIDDOM_COUNT, ALLPROC_HSIZE)
78 
79 /* Used by libkvm */
80 int allproc_hsize = ALLPROC_HSIZE;
81 
82 LIST_HEAD(pidhashhead, proc);
83 
84 static MALLOC_DEFINE(M_PGRP, "pgrp", "process group header");
85 MALLOC_DEFINE(M_SESSION, "session", "session header");
86 MALLOC_DEFINE(M_PROC, "proc", "Proc structures");
87 MALLOC_DEFINE(M_LWP, "lwp", "lwp structures");
88 MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures");
89 MALLOC_DEFINE(M_UPMAP, "upmap", "upmap/kpmap/lpmap structures");
90 
91 int ps_showallprocs = 1;
92 static int ps_showallthreads = 1;
93 SYSCTL_INT(_security, OID_AUTO, ps_showallprocs, CTLFLAG_RW,
94     &ps_showallprocs, 0,
95     "Unprivileged processes can see processes with different UID/GID");
96 SYSCTL_INT(_security, OID_AUTO, ps_showallthreads, CTLFLAG_RW,
97     &ps_showallthreads, 0,
98     "Unprivileged processes can see kernel threads");
99 static u_int pid_domain_skips;
100 SYSCTL_UINT(_kern, OID_AUTO, pid_domain_skips, CTLFLAG_RW,
101     &pid_domain_skips, 0,
102     "Number of pid_doms[] skipped");
103 static u_int pid_inner_skips;
104 SYSCTL_UINT(_kern, OID_AUTO, pid_inner_skips, CTLFLAG_RW,
105     &pid_inner_skips, 0,
106     "Number of pid_doms[] skipped");
107 
108 static void orphanpg(struct pgrp *pg);
109 static void proc_makepid(struct proc *p, int random_offset);
110 
111 /*
112  * Process related lists (for proc_token, allproc, allpgrp, and allsess)
113  */
114 typedef struct procglob procglob_t;
115 
116 static procglob_t	procglob[ALLPROC_HSIZE];
117 
118 /*
119  * We try our best to avoid recycling a PID too quickly.  We do this by
120  * storing (uint8_t)time_second in the related pid domain on-reap and then
121  * using that to skip-over the domain on-allocate.
122  *
123  * This array has to be fairly large to support a high fork/exec rate.
124  * A ~100,000 entry array will support a 10-second reuse latency at
125  * 10,000 execs/second, worst case.  Best-case multiply by PIDDOM_COUNT
126  * (approximately 100,000 execs/second).
127  *
128  * Currently we allocate around a megabyte, making the worst-case fork
129  * rate around 100,000/second.
130  */
131 static uint8_t *pid_doms;
132 
133 /*
134  * Random component to nextpid generation.  We mix in a random factor to make
135  * it a little harder to predict.  We sanity check the modulus value to avoid
136  * doing it in critical paths.  Don't let it be too small or we pointlessly
137  * waste randomness entropy, and don't let it be impossibly large.  Using a
138  * modulus that is too big causes a LOT more process table scans and slows
139  * down fork processing as the pidchecked caching is defeated.
140  */
141 static int randompid = 0;
142 
143 static __inline
144 struct ucred *
145 pcredcache(struct ucred *cr, struct proc *p)
146 {
147 	if (cr != p->p_ucred) {
148 		if (cr)
149 			crfree(cr);
150 		spin_lock(&p->p_spin);
151 		if ((cr = p->p_ucred) != NULL)
152 			crhold(cr);
153 		spin_unlock(&p->p_spin);
154 	}
155 	return cr;
156 }
157 
158 /*
159  * No requirements.
160  */
161 static int
162 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS)
163 {
164 	int error, pid;
165 
166 	pid = randompid;
167 	error = sysctl_handle_int(oidp, &pid, 0, req);
168 	if (error || !req->newptr)
169 		return (error);
170 	if (pid < 0 || pid > PID_MAX - 100)     /* out of range */
171 		pid = PID_MAX - 100;
172 	else if (pid < 2)                       /* NOP */
173 		pid = 0;
174 	else if (pid < 100)                     /* Make it reasonable */
175 		pid = 100;
176 	randompid = pid;
177 	return (error);
178 }
179 
180 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW,
181 	    0, 0, sysctl_kern_randompid, "I", "Random PID modulus");
182 
183 /*
184  * Initialize global process hashing structures.
185  *
186  * These functions are ONLY called from the low level boot code and do
187  * not lock their operations.
188  */
189 void
190 procinit(void)
191 {
192 	u_long i;
193 
194 	/*
195 	 * Allocate dynamically.  This array can be large (~1MB) so don't
196 	 * waste boot loader space.
197 	 */
198 	pid_doms = kmalloc(sizeof(pid_doms[0]) * PIDSEL_DOMAINS,
199 			   M_PROC, M_WAITOK | M_ZERO);
200 
201 	/*
202 	 * Avoid unnecessary stalls due to pid_doms[] values all being
203 	 * the same.  Make sure that the allocation of pid 1 and pid 2
204 	 * succeeds.
205 	 */
206 	for (i = 0; i < PIDSEL_DOMAINS; ++i)
207 		pid_doms[i] = (int8_t)i - (int8_t)(PIDDOM_DELAY + 1);
208 
209 	/*
210 	 * Other misc init.
211 	 */
212 	for (i = 0; i < ALLPROC_HSIZE; ++i) {
213 		procglob_t *prg = &procglob[i];
214 		LIST_INIT(&prg->allproc);
215 		LIST_INIT(&prg->allsess);
216 		LIST_INIT(&prg->allpgrp);
217 		lwkt_token_init(&prg->proc_token, "allproc");
218 	}
219 	uihashinit();
220 }
221 
222 void
223 procinsertinit(struct proc *p)
224 {
225 	LIST_INSERT_HEAD(&procglob[ALLPROC_HASH(p->p_pid)].allproc,
226 			 p, p_list);
227 }
228 
229 void
230 pgrpinsertinit(struct pgrp *pg)
231 {
232 	LIST_INSERT_HEAD(&procglob[ALLPROC_HASH(pg->pg_id)].allpgrp,
233 			 pg, pg_list);
234 }
235 
236 void
237 sessinsertinit(struct session *sess)
238 {
239 	LIST_INSERT_HEAD(&procglob[ALLPROC_HASH(sess->s_sid)].allsess,
240 			 sess, s_list);
241 }
242 
243 /*
244  * Process hold/release support functions.  Called via the PHOLD(),
245  * PRELE(), and PSTALL() macros.
246  *
247  * p->p_lock is a simple hold count with a waiting interlock.  No wakeup()
248  * is issued unless someone is actually waiting for the process.
249  *
250  * Most holds are short-term, allowing a process scan or other similar
251  * operation to access a proc structure without it getting ripped out from
252  * under us.  procfs and process-list sysctl ops also use the hold function
253  * interlocked with various p_flags to keep the vmspace intact when reading
254  * or writing a user process's address space.
255  *
256  * There are two situations where a hold count can be longer.  Exiting lwps
257  * hold the process until the lwp is reaped, and the parent will hold the
258  * child during vfork()/exec() sequences while the child is marked P_PPWAIT.
259  *
260  * The kernel waits for the hold count to drop to 0 (or 1 in some cases) at
261  * various critical points in the fork/exec and exit paths before proceeding.
262  */
263 #define PLOCK_WAITING	0x40000000	/* tsleep() on p_lock */
264 #define PLOCK_ZOMB	0x20000000	/* zombie interlock held */
265 #define PLOCK_WAITRES	0x10000000	/* wait reservation held */
266 #define PLOCK_MASK	0x0FFFFFFF
267 
268 /*
269  * Returns non-zero if the WAITRES flag has been set
270  */
271 int
272 pwaitres_pending(struct proc *p)
273 {
274 	if (p->p_lock & PLOCK_WAITRES)
275 		return 1;
276 	return 0;
277 }
278 
279 /*
280  * Caller holds PLOCK_ZOMB.  Sets PLOCK_WAITRES and wakes up anyone in
281  * pholdzomb() (which will fail).
282  */
283 void
284 pwaitres_set(struct proc *p)
285 {
286 	int o;
287 
288 	KKASSERT((p->p_lock & (PLOCK_ZOMB | PLOCK_WAITRES)) == PLOCK_ZOMB);
289 	o = p->p_lock;
290 	cpu_ccfence();
291 	for (;;) {
292 		if (atomic_fcmpset_int(&p->p_lock, &o,
293 				       (o | PLOCK_WAITRES) & ~PLOCK_WAITING)) {
294 			if (o & PLOCK_WAITING)
295 				wakeup(&p->p_lock);
296 			return;
297 		}
298 	}
299 }
300 
301 void
302 pstall(struct proc *p, const char *wmesg, int count)
303 {
304 	int o;
305 	int n;
306 
307 	for (;;) {
308 		o = p->p_lock;
309 		cpu_ccfence();
310 		if ((o & PLOCK_MASK) <= count)
311 			break;
312 		n = o | PLOCK_WAITING;
313 		tsleep_interlock(&p->p_lock, 0);
314 
315 		/*
316 		 * If someone is trying to single-step the process during
317 		 * an exec or an exit they can deadlock us because procfs
318 		 * sleeps with the process held.
319 		 */
320 		if (p->p_stops) {
321 			if (p->p_flags & P_INEXEC) {
322 				wakeup(&p->p_stype);
323 			} else if (p->p_flags & P_POSTEXIT) {
324 				spin_lock(&p->p_spin);
325 				p->p_stops = 0;
326 				p->p_step = 0;
327 				spin_unlock(&p->p_spin);
328 				wakeup(&p->p_stype);
329 			}
330 		}
331 
332 		if (atomic_cmpset_int(&p->p_lock, o, n)) {
333 			tsleep(&p->p_lock, PINTERLOCKED, wmesg, 0);
334 		}
335 	}
336 }
337 
338 void
339 phold(struct proc *p)
340 {
341 	atomic_add_int(&p->p_lock, 1);
342 }
343 
344 /*
345  * WARNING!  On last release (p) can become instantly invalid due to
346  *	     MP races.
347  */
348 void
349 prele(struct proc *p)
350 {
351 	int o;
352 	int n;
353 
354 	/*
355 	 * Fast path
356 	 */
357 	if (atomic_cmpset_int(&p->p_lock, 1, 0))
358 		return;
359 
360 	/*
361 	 * Slow path
362 	 */
363 	for (;;) {
364 		o = p->p_lock;
365 		KKASSERT((o & PLOCK_MASK) > 0);
366 		cpu_ccfence();
367 		n = (o - 1) & ~PLOCK_WAITING;
368 		if (atomic_cmpset_int(&p->p_lock, o, n)) {
369 			if (o & PLOCK_WAITING)
370 				wakeup(&p->p_lock);
371 			break;
372 		}
373 	}
374 }
375 
376 /*
377  * Hold and flag serialized for zombie reaping purposes.  Fail if we had
378  * to sleep or if another thread has reserved the reap (WAITRES).
379  *
380  * This function will fail if it has to block, returning non-zero with
381  * neither the flag set or the hold count bumped.  Note that (p) may
382  * not be valid in this case if the caller does not have some other
383  * reference on (p).
384  *
385  * This function does not block on other PHOLD()s, only on other
386  * PHOLDZOMB()s.
387  *
388  * Zero is returned on success.  The hold count will be incremented and
389  * the serialization flag acquired.  Note that serialization is only against
390  * other pholdzomb() calls, not against phold() calls.
391  */
392 int
393 pholdzomb(struct proc *p)
394 {
395 	int o;
396 	int n;
397 
398 	/*
399 	 * Fast path
400 	 */
401 	if (atomic_cmpset_int(&p->p_lock, 0, PLOCK_ZOMB | 1))
402 		return(0);
403 
404 	/*
405 	 * Slow path
406 	 */
407 	for (;;) {
408 		o = p->p_lock;
409 		cpu_ccfence();
410 		if ((o & (PLOCK_ZOMB | PLOCK_WAITRES)) == 0) {
411 			n = (o + 1) | PLOCK_ZOMB;
412 			if (atomic_cmpset_int(&p->p_lock, o, n))
413 				return(0);
414 		} else if (o & PLOCK_WAITRES) {
415 			return(1);
416 		} else {
417 			KKASSERT((o & PLOCK_MASK) > 0);
418 			n = o | PLOCK_WAITING;
419 			tsleep_interlock(&p->p_lock, 0);
420 			if (atomic_cmpset_int(&p->p_lock, o, n)) {
421 				tsleep(&p->p_lock, PINTERLOCKED, "phldz", 0);
422 				/* (p) can be ripped out at this point */
423 				return(1);
424 			}
425 		}
426 	}
427 }
428 
429 /*
430  * Release PLOCK_ZOMB, PLOCK_WAITRES, and the hold count, waking up any
431  * waiters.
432  *
433  * WARNING!  On last release (p) can become instantly invalid due to
434  *	     MP races.
435  */
436 void
437 prelezomb(struct proc *p)
438 {
439 	int o;
440 	int n;
441 
442 	/*
443 	 * Fast path
444 	 */
445 	if (atomic_cmpset_int(&p->p_lock, PLOCK_ZOMB | 1, 0))
446 		return;
447 
448 	/*
449 	 * Slow path
450 	 */
451 	KKASSERT(p->p_lock & PLOCK_ZOMB);
452 	for (;;) {
453 		o = p->p_lock;
454 		KKASSERT((o & PLOCK_MASK) > 0);
455 		cpu_ccfence();
456 		n = (o - 1) & ~(PLOCK_ZOMB | PLOCK_WAITING | PLOCK_WAITRES);
457 		if (atomic_cmpset_int(&p->p_lock, o, n)) {
458 			if (o & PLOCK_WAITING)
459 				wakeup(&p->p_lock);
460 			break;
461 		}
462 	}
463 }
464 
465 /*
466  * Is p an inferior of the current process?
467  *
468  * No requirements.
469  */
470 int
471 inferior(struct proc *p)
472 {
473 	struct proc *p2;
474 
475 	PHOLD(p);
476 	lwkt_gettoken_shared(&p->p_token);
477 	while (p != curproc) {
478 		if (p->p_pid == 0) {
479 			lwkt_reltoken(&p->p_token);
480 			return (0);
481 		}
482 		p2 = p->p_pptr;
483 		PHOLD(p2);
484 		lwkt_reltoken(&p->p_token);
485 		PRELE(p);
486 		lwkt_gettoken_shared(&p2->p_token);
487 		p = p2;
488 	}
489 	lwkt_reltoken(&p->p_token);
490 	PRELE(p);
491 
492 	return (1);
493 }
494 
495 /*
496  * Locate a process by number.  The returned process will be referenced and
497  * must be released with PRELE().
498  *
499  * No requirements.
500  */
501 struct proc *
502 pfind(pid_t pid)
503 {
504 	struct proc *p = curproc;
505 	procglob_t *prg;
506 	int n;
507 
508 	/*
509 	 * Shortcut the current process
510 	 */
511 	if (p && p->p_pid == pid) {
512 		PHOLD(p);
513 		return (p);
514 	}
515 
516 	/*
517 	 * Otherwise find it in the hash table.
518 	 */
519 	n = ALLPROC_HASH(pid);
520 	prg = &procglob[n];
521 
522 	lwkt_gettoken_shared(&prg->proc_token);
523 	LIST_FOREACH(p, &prg->allproc, p_list) {
524 		if (p->p_stat == SZOMB)
525 			continue;
526 		if (p->p_pid == pid) {
527 			PHOLD(p);
528 			lwkt_reltoken(&prg->proc_token);
529 			return (p);
530 		}
531 	}
532 	lwkt_reltoken(&prg->proc_token);
533 
534 	return (NULL);
535 }
536 
537 /*
538  * Locate a process by number.  The returned process is NOT referenced.
539  * The result will not be stable and is typically only used to validate
540  * against a process that the caller has in-hand.
541  *
542  * No requirements.
543  */
544 struct proc *
545 pfindn(pid_t pid)
546 {
547 	struct proc *p = curproc;
548 	procglob_t *prg;
549 	int n;
550 
551 	/*
552 	 * Shortcut the current process
553 	 */
554 	if (p && p->p_pid == pid)
555 		return (p);
556 
557 	/*
558 	 * Otherwise find it in the hash table.
559 	 */
560 	n = ALLPROC_HASH(pid);
561 	prg = &procglob[n];
562 
563 	lwkt_gettoken_shared(&prg->proc_token);
564 	LIST_FOREACH(p, &prg->allproc, p_list) {
565 		if (p->p_stat == SZOMB)
566 			continue;
567 		if (p->p_pid == pid) {
568 			lwkt_reltoken(&prg->proc_token);
569 			return (p);
570 		}
571 	}
572 	lwkt_reltoken(&prg->proc_token);
573 
574 	return (NULL);
575 }
576 
577 /*
578  * Locate a process on the zombie list.  Return a process or NULL.
579  * The returned process will be referenced and the caller must release
580  * it with PRELE().
581  *
582  * No other requirements.
583  */
584 struct proc *
585 zpfind(pid_t pid)
586 {
587 	struct proc *p = curproc;
588 	procglob_t *prg;
589 	int n;
590 
591 	/*
592 	 * Shortcut the current process
593 	 */
594 	if (p && p->p_pid == pid) {
595 		PHOLD(p);
596 		return (p);
597 	}
598 
599 	/*
600 	 * Otherwise find it in the hash table.
601 	 */
602 	n = ALLPROC_HASH(pid);
603 	prg = &procglob[n];
604 
605 	lwkt_gettoken_shared(&prg->proc_token);
606 	LIST_FOREACH(p, &prg->allproc, p_list) {
607 		if (p->p_stat != SZOMB)
608 			continue;
609 		if (p->p_pid == pid) {
610 			PHOLD(p);
611 			lwkt_reltoken(&prg->proc_token);
612 			return (p);
613 		}
614 	}
615 	lwkt_reltoken(&prg->proc_token);
616 
617 	return (NULL);
618 }
619 
620 /*
621  * Caller must hold the process token shared or exclusive.
622  * The returned lwp, if not NULL, will be held.  Caller must
623  * LWPRELE() it when done.
624  */
625 struct lwp *
626 lwpfind(struct proc *p, lwpid_t tid)
627 {
628 	struct lwp *lp;
629 
630 	lp = lwp_rb_tree_RB_LOOKUP(&p->p_lwp_tree, tid);
631 	if (lp)
632 		LWPHOLD(lp);
633 	return lp;
634 }
635 
636 void
637 pgref(struct pgrp *pgrp)
638 {
639 	refcount_acquire(&pgrp->pg_refs);
640 }
641 
642 void
643 pgrel(struct pgrp *pgrp)
644 {
645 	procglob_t *prg;
646 	int count;
647 	int n;
648 
649 	n = PGRP_HASH(pgrp->pg_id);
650 	prg = &procglob[n];
651 
652 	for (;;) {
653 		count = pgrp->pg_refs;
654 		cpu_ccfence();
655 		KKASSERT(count > 0);
656 		if (count == 1) {
657 			lwkt_gettoken(&prg->proc_token);
658 			if (atomic_cmpset_int(&pgrp->pg_refs, 1, 0))
659 				break;
660 			lwkt_reltoken(&prg->proc_token);
661 			/* retry */
662 		} else {
663 			if (atomic_cmpset_int(&pgrp->pg_refs, count, count - 1))
664 				return;
665 			/* retry */
666 		}
667 	}
668 
669 	/*
670 	 * Successful 1->0 transition, pghash_spin is held.
671 	 */
672 	LIST_REMOVE(pgrp, pg_list);
673 	if (pid_doms[pgrp->pg_id % PIDSEL_DOMAINS] != (uint8_t)time_second)
674 		pid_doms[pgrp->pg_id % PIDSEL_DOMAINS] = (uint8_t)time_second;
675 
676 	/*
677 	 * Reset any sigio structures pointing to us as a result of
678 	 * F_SETOWN with our pgid.
679 	 */
680 	funsetownlst(&pgrp->pg_sigiolst);
681 
682 	if (pgrp->pg_session->s_ttyp != NULL &&
683 	    pgrp->pg_session->s_ttyp->t_pgrp == pgrp) {
684 		pgrp->pg_session->s_ttyp->t_pgrp = NULL;
685 	}
686 	lwkt_reltoken(&prg->proc_token);
687 
688 	sess_rele(pgrp->pg_session);
689 	kfree(pgrp, M_PGRP);
690 }
691 
692 /*
693  * Locate a process group by number.  The returned process group will be
694  * referenced w/pgref() and must be released with pgrel() (or assigned
695  * somewhere if you wish to keep the reference).
696  *
697  * No requirements.
698  */
699 struct pgrp *
700 pgfind(pid_t pgid)
701 {
702 	struct pgrp *pgrp;
703 	procglob_t *prg;
704 	int n;
705 
706 	n = PGRP_HASH(pgid);
707 	prg = &procglob[n];
708 	lwkt_gettoken_shared(&prg->proc_token);
709 
710 	LIST_FOREACH(pgrp, &prg->allpgrp, pg_list) {
711 		if (pgrp->pg_id == pgid) {
712 			refcount_acquire(&pgrp->pg_refs);
713 			lwkt_reltoken(&prg->proc_token);
714 			return (pgrp);
715 		}
716 	}
717 	lwkt_reltoken(&prg->proc_token);
718 	return (NULL);
719 }
720 
721 /*
722  * Move p to a new or existing process group (and session)
723  *
724  * No requirements.
725  */
726 int
727 enterpgrp(struct proc *p, pid_t pgid, int mksess)
728 {
729 	struct pgrp *pgrp;
730 	struct pgrp *opgrp;
731 	int error;
732 
733 	pgrp = pgfind(pgid);
734 
735 	KASSERT(pgrp == NULL || !mksess,
736 		("enterpgrp: setsid into non-empty pgrp"));
737 	KASSERT(!SESS_LEADER(p),
738 		("enterpgrp: session leader attempted setpgrp"));
739 
740 	if (pgrp == NULL) {
741 		pid_t savepid = p->p_pid;
742 		struct proc *np;
743 		procglob_t *prg;
744 		int n;
745 
746 		/*
747 		 * new process group
748 		 */
749 		KASSERT(p->p_pid == pgid,
750 			("enterpgrp: new pgrp and pid != pgid"));
751 		pgrp = kmalloc(sizeof(struct pgrp), M_PGRP, M_WAITOK | M_ZERO);
752 		pgrp->pg_id = pgid;
753 		LIST_INIT(&pgrp->pg_members);
754 		pgrp->pg_jobc = 0;
755 		SLIST_INIT(&pgrp->pg_sigiolst);
756 		lwkt_token_init(&pgrp->pg_token, "pgrp_token");
757 		refcount_init(&pgrp->pg_refs, 1);
758 		lockinit(&pgrp->pg_lock, "pgwt", 0, 0);
759 
760 		n = PGRP_HASH(pgid);
761 		prg = &procglob[n];
762 
763 		if ((np = pfindn(savepid)) == NULL || np != p) {
764 			lwkt_reltoken(&prg->proc_token);
765 			error = ESRCH;
766 			kfree(pgrp, M_PGRP);
767 			goto fatal;
768 		}
769 
770 		lwkt_gettoken(&prg->proc_token);
771 		if (mksess) {
772 			struct session *sess;
773 
774 			/*
775 			 * new session
776 			 */
777 			sess = kmalloc(sizeof(struct session), M_SESSION,
778 				       M_WAITOK | M_ZERO);
779 			lwkt_gettoken(&p->p_token);
780 			sess->s_prg = prg;
781 			sess->s_leader = p;
782 			sess->s_sid = p->p_pid;
783 			sess->s_count = 1;
784 			sess->s_ttyvp = NULL;
785 			sess->s_ttyp = NULL;
786 			bcopy(p->p_session->s_login, sess->s_login,
787 			      sizeof(sess->s_login));
788 			pgrp->pg_session = sess;
789 			KASSERT(p == curproc,
790 				("enterpgrp: mksession and p != curproc"));
791 			p->p_flags &= ~P_CONTROLT;
792 			LIST_INSERT_HEAD(&prg->allsess, sess, s_list);
793 			lwkt_reltoken(&p->p_token);
794 		} else {
795 			lwkt_gettoken(&p->p_token);
796 			pgrp->pg_session = p->p_session;
797 			sess_hold(pgrp->pg_session);
798 			lwkt_reltoken(&p->p_token);
799 		}
800 		LIST_INSERT_HEAD(&prg->allpgrp, pgrp, pg_list);
801 
802 		lwkt_reltoken(&prg->proc_token);
803 	} else if (pgrp == p->p_pgrp) {
804 		pgrel(pgrp);
805 		goto done;
806 	} /* else pgfind() referenced the pgrp */
807 
808 	lwkt_gettoken(&pgrp->pg_token);
809 	lwkt_gettoken(&p->p_token);
810 
811 	/*
812 	 * Replace p->p_pgrp, handling any races that occur.
813 	 */
814 	while ((opgrp = p->p_pgrp) != NULL) {
815 		pgref(opgrp);
816 		lwkt_gettoken(&opgrp->pg_token);
817 		if (opgrp != p->p_pgrp) {
818 			lwkt_reltoken(&opgrp->pg_token);
819 			pgrel(opgrp);
820 			continue;
821 		}
822 		LIST_REMOVE(p, p_pglist);
823 		break;
824 	}
825 	p->p_pgrp = pgrp;
826 	LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist);
827 
828 	/*
829 	 * Adjust eligibility of affected pgrps to participate in job control.
830 	 * Increment eligibility counts before decrementing, otherwise we
831 	 * could reach 0 spuriously during the first call.
832 	 */
833 	fixjobc(p, pgrp, 1);
834 	if (opgrp) {
835 		fixjobc(p, opgrp, 0);
836 		lwkt_reltoken(&opgrp->pg_token);
837 		pgrel(opgrp);	/* manual pgref */
838 		pgrel(opgrp);	/* p->p_pgrp ref */
839 	}
840 	lwkt_reltoken(&p->p_token);
841 	lwkt_reltoken(&pgrp->pg_token);
842 done:
843 	error = 0;
844 fatal:
845 	return (error);
846 }
847 
848 /*
849  * Remove process from process group
850  *
851  * No requirements.
852  */
853 int
854 leavepgrp(struct proc *p)
855 {
856 	struct pgrp *pg = p->p_pgrp;
857 
858 	lwkt_gettoken(&p->p_token);
859 	while ((pg = p->p_pgrp) != NULL) {
860 		pgref(pg);
861 		lwkt_gettoken(&pg->pg_token);
862 		if (p->p_pgrp != pg) {
863 			lwkt_reltoken(&pg->pg_token);
864 			pgrel(pg);
865 			continue;
866 		}
867 		p->p_pgrp = NULL;
868 		LIST_REMOVE(p, p_pglist);
869 		lwkt_reltoken(&pg->pg_token);
870 		pgrel(pg);	/* manual pgref */
871 		pgrel(pg);	/* p->p_pgrp ref */
872 		break;
873 	}
874 	lwkt_reltoken(&p->p_token);
875 
876 	return (0);
877 }
878 
879 /*
880  * Adjust the ref count on a session structure.  When the ref count falls to
881  * zero the tty is disassociated from the session and the session structure
882  * is freed.  Note that tty assocation is not itself ref-counted.
883  *
884  * No requirements.
885  */
886 void
887 sess_hold(struct session *sp)
888 {
889 	atomic_add_int(&sp->s_count, 1);
890 }
891 
892 /*
893  * No requirements.
894  */
895 void
896 sess_rele(struct session *sess)
897 {
898 	procglob_t *prg;
899 	struct tty *tp;
900 	int count;
901 	int n;
902 
903 	n = SESS_HASH(sess->s_sid);
904 	prg = &procglob[n];
905 
906 	for (;;) {
907 		count = sess->s_count;
908 		cpu_ccfence();
909 		KKASSERT(count > 0);
910 		if (count == 1) {
911 			lwkt_gettoken(&prg->proc_token);
912 			if (atomic_cmpset_int(&sess->s_count, 1, 0))
913 				break;
914 			lwkt_reltoken(&prg->proc_token);
915 			/* retry */
916 		} else {
917 			if (atomic_cmpset_int(&sess->s_count, count, count - 1))
918 				return;
919 			/* retry */
920 		}
921 	}
922 
923 	/*
924 	 * Successful 1->0 transition and prg->proc_token is held.
925 	 */
926 	LIST_REMOVE(sess, s_list);
927 	if (pid_doms[sess->s_sid % PIDSEL_DOMAINS] != (uint8_t)time_second)
928 		pid_doms[sess->s_sid % PIDSEL_DOMAINS] = (uint8_t)time_second;
929 
930 	if (sess->s_ttyp && sess->s_ttyp->t_session) {
931 #ifdef TTY_DO_FULL_CLOSE
932 		/* FULL CLOSE, see ttyclearsession() */
933 		KKASSERT(sess->s_ttyp->t_session == sess);
934 		sess->s_ttyp->t_session = NULL;
935 #else
936 		/* HALF CLOSE, see ttyclearsession() */
937 		if (sess->s_ttyp->t_session == sess)
938 			sess->s_ttyp->t_session = NULL;
939 #endif
940 	}
941 	if ((tp = sess->s_ttyp) != NULL) {
942 		sess->s_ttyp = NULL;
943 		ttyunhold(tp);
944 	}
945 	lwkt_reltoken(&prg->proc_token);
946 
947 	kfree(sess, M_SESSION);
948 }
949 
950 /*
951  * Adjust pgrp jobc counters when specified process changes process group.
952  * We count the number of processes in each process group that "qualify"
953  * the group for terminal job control (those with a parent in a different
954  * process group of the same session).  If that count reaches zero, the
955  * process group becomes orphaned.  Check both the specified process'
956  * process group and that of its children.
957  * entering == 0 => p is leaving specified group.
958  * entering == 1 => p is entering specified group.
959  *
960  * No requirements.
961  */
962 void
963 fixjobc(struct proc *p, struct pgrp *pgrp, int entering)
964 {
965 	struct pgrp *hispgrp;
966 	struct session *mysession;
967 	struct proc *np;
968 
969 	/*
970 	 * Check p's parent to see whether p qualifies its own process
971 	 * group; if so, adjust count for p's process group.
972 	 */
973 	lwkt_gettoken(&p->p_token);	/* p_children scan */
974 	lwkt_gettoken(&pgrp->pg_token);
975 
976 	mysession = pgrp->pg_session;
977 	if ((hispgrp = p->p_pptr->p_pgrp) != pgrp &&
978 	    hispgrp->pg_session == mysession) {
979 		if (entering)
980 			pgrp->pg_jobc++;
981 		else if (--pgrp->pg_jobc == 0)
982 			orphanpg(pgrp);
983 	}
984 
985 	/*
986 	 * Check this process' children to see whether they qualify
987 	 * their process groups; if so, adjust counts for children's
988 	 * process groups.
989 	 */
990 	LIST_FOREACH(np, &p->p_children, p_sibling) {
991 		PHOLD(np);
992 		lwkt_gettoken(&np->p_token);
993 		if ((hispgrp = np->p_pgrp) != pgrp &&
994 		    hispgrp->pg_session == mysession &&
995 		    np->p_stat != SZOMB) {
996 			pgref(hispgrp);
997 			lwkt_gettoken(&hispgrp->pg_token);
998 			if (entering)
999 				hispgrp->pg_jobc++;
1000 			else if (--hispgrp->pg_jobc == 0)
1001 				orphanpg(hispgrp);
1002 			lwkt_reltoken(&hispgrp->pg_token);
1003 			pgrel(hispgrp);
1004 		}
1005 		lwkt_reltoken(&np->p_token);
1006 		PRELE(np);
1007 	}
1008 	KKASSERT(pgrp->pg_refs > 0);
1009 	lwkt_reltoken(&pgrp->pg_token);
1010 	lwkt_reltoken(&p->p_token);
1011 }
1012 
1013 /*
1014  * A process group has become orphaned;
1015  * if there are any stopped processes in the group,
1016  * hang-up all process in that group.
1017  *
1018  * The caller must hold pg_token.
1019  */
1020 static void
1021 orphanpg(struct pgrp *pg)
1022 {
1023 	struct proc *p;
1024 
1025 	LIST_FOREACH(p, &pg->pg_members, p_pglist) {
1026 		if (p->p_stat == SSTOP) {
1027 			LIST_FOREACH(p, &pg->pg_members, p_pglist) {
1028 				ksignal(p, SIGHUP);
1029 				ksignal(p, SIGCONT);
1030 			}
1031 			return;
1032 		}
1033 	}
1034 }
1035 
1036 /*
1037  * Add a new process to the allproc list and the PID hash.  This
1038  * also assigns a pid to the new process.
1039  *
1040  * No requirements.
1041  */
1042 void
1043 proc_add_allproc(struct proc *p)
1044 {
1045 	int random_offset;
1046 
1047 	if ((random_offset = randompid) != 0) {
1048 		read_random(&random_offset, sizeof(random_offset), 1);
1049 		random_offset = (random_offset & 0x7FFFFFFF) % randompid;
1050 	}
1051 	proc_makepid(p, random_offset);
1052 }
1053 
1054 /*
1055  * Calculate a new process pid.  This function is integrated into
1056  * proc_add_allproc() to guarentee that the new pid is not reused before
1057  * the new process can be added to the allproc list.
1058  *
1059  * p_pid is assigned and the process is added to the allproc hash table
1060  *
1061  * WARNING! We need to allocate PIDs sequentially during early boot.
1062  *	    In particular, init needs to have a pid of 1.
1063  */
1064 static
1065 void
1066 proc_makepid(struct proc *p, int random_offset)
1067 {
1068 	static pid_t nextpid = 1;	/* heuristic, allowed to race */
1069 	procglob_t *prg;
1070 	struct pgrp *pg;
1071 	struct proc *ps;
1072 	struct session *sess;
1073 	pid_t base;
1074 	int8_t delta8;
1075 	int retries;
1076 	int n;
1077 
1078 	/*
1079 	 * Select the next pid base candidate.
1080 	 *
1081 	 * Check cyclement, do not allow a pid < 100.
1082 	 */
1083 	retries = 0;
1084 retry:
1085 	base = atomic_fetchadd_int(&nextpid, 1) + random_offset;
1086 	if (base <= 0 || base >= PID_MAX) {
1087 		base = base % PID_MAX;
1088 		if (base < 0)
1089 			base = 100;
1090 		if (base < 100)
1091 			base += 100;
1092 		nextpid = base;		/* reset (SMP race ok) */
1093 	}
1094 
1095 	/*
1096 	 * Do not allow a base pid to be selected from a domain that has
1097 	 * recently seen a pid/pgid/sessid reap.  Sleep a little if we looped
1098 	 * through all available domains.
1099 	 *
1100 	 * WARNING: We want the early pids to be allocated linearly,
1101 	 *	    particularly pid 1 and pid 2.
1102 	 */
1103 	if (++retries >= PIDSEL_DOMAINS)
1104 		tsleep(&nextpid, 0, "makepid", 1);
1105 	if (base >= 100) {
1106 		delta8 = (int8_t)time_second -
1107 			 (int8_t)pid_doms[base % PIDSEL_DOMAINS];
1108 		if (delta8 >= 0 && delta8 <= PIDDOM_DELAY) {
1109 			++pid_domain_skips;
1110 			goto retry;
1111 		}
1112 	}
1113 
1114 	/*
1115 	 * Calculate a hash index and find an unused process id within
1116 	 * the table, looping if we cannot find one.
1117 	 *
1118 	 * The inner loop increments by ALLPROC_HSIZE which keeps the
1119 	 * PID at the same pid_doms[] index as well as the same hash index.
1120 	 */
1121 	n = ALLPROC_HASH(base);
1122 	prg = &procglob[n];
1123 	lwkt_gettoken(&prg->proc_token);
1124 
1125 restart1:
1126 	LIST_FOREACH(ps, &prg->allproc, p_list) {
1127 		if (ps->p_pid == base) {
1128 			base += ALLPROC_HSIZE;
1129 			if (base >= PID_MAX) {
1130 				lwkt_reltoken(&prg->proc_token);
1131 				goto retry;
1132 			}
1133 			++pid_inner_skips;
1134 			goto restart1;
1135 		}
1136 	}
1137 	LIST_FOREACH(pg, &prg->allpgrp, pg_list) {
1138 		if (pg->pg_id == base) {
1139 			base += ALLPROC_HSIZE;
1140 			if (base >= PID_MAX) {
1141 				lwkt_reltoken(&prg->proc_token);
1142 				goto retry;
1143 			}
1144 			++pid_inner_skips;
1145 			goto restart1;
1146 		}
1147 	}
1148 	LIST_FOREACH(sess, &prg->allsess, s_list) {
1149 		if (sess->s_sid == base) {
1150 			base += ALLPROC_HSIZE;
1151 			if (base >= PID_MAX) {
1152 				lwkt_reltoken(&prg->proc_token);
1153 				goto retry;
1154 			}
1155 			++pid_inner_skips;
1156 			goto restart1;
1157 		}
1158 	}
1159 
1160 	/*
1161 	 * Assign the pid and insert the process.
1162 	 */
1163 	p->p_pid = base;
1164 	LIST_INSERT_HEAD(&prg->allproc, p, p_list);
1165 	lwkt_reltoken(&prg->proc_token);
1166 }
1167 
1168 /*
1169  * Called from exit1 to place the process into a zombie state.
1170  * The process is removed from the pid hash and p_stat is set
1171  * to SZOMB.  Normal pfind[n]() calls will not find it any more.
1172  *
1173  * Caller must hold p->p_token.  We are required to wait until p_lock
1174  * becomes zero before we can manipulate the list, allowing allproc
1175  * scans to guarantee consistency during a list scan.
1176  */
1177 void
1178 proc_move_allproc_zombie(struct proc *p)
1179 {
1180 	procglob_t *prg;
1181 	int n;
1182 
1183 	n = ALLPROC_HASH(p->p_pid);
1184 	prg = &procglob[n];
1185 	PSTALL(p, "reap1", 0);
1186 	lwkt_gettoken(&prg->proc_token);
1187 
1188 	PSTALL(p, "reap1a", 0);
1189 	p->p_stat = SZOMB;
1190 
1191 	lwkt_reltoken(&prg->proc_token);
1192 	dsched_exit_proc(p);
1193 }
1194 
1195 /*
1196  * This routine is called from kern_wait() and will remove the process
1197  * from the zombie list and the sibling list.  This routine will block
1198  * if someone has a lock on the proces (p_lock).
1199  *
1200  * Caller must hold p->p_token.  We are required to wait until p_lock
1201  * becomes one before we can manipulate the list, allowing allproc
1202  * scans to guarantee consistency during a list scan.
1203  *
1204  * Assumes caller has one ref.
1205  */
1206 void
1207 proc_remove_zombie(struct proc *p)
1208 {
1209 	procglob_t *prg;
1210 	int n;
1211 
1212 	n = ALLPROC_HASH(p->p_pid);
1213 	prg = &procglob[n];
1214 
1215 	PSTALL(p, "reap2", 1);
1216 	lwkt_gettoken(&prg->proc_token);
1217 	PSTALL(p, "reap2a", 1);
1218 	LIST_REMOVE(p, p_list);		/* from remove master list */
1219 	LIST_REMOVE(p, p_sibling);	/* and from sibling list */
1220 	p->p_pptr = NULL;
1221 	p->p_ppid = 0;
1222 	if (pid_doms[p->p_pid % PIDSEL_DOMAINS] != (uint8_t)time_second)
1223 		pid_doms[p->p_pid % PIDSEL_DOMAINS] = (uint8_t)time_second;
1224 	lwkt_reltoken(&prg->proc_token);
1225 }
1226 
1227 /*
1228  * Handle various requirements prior to returning to usermode.  Called from
1229  * platform trap and system call code.
1230  */
1231 void
1232 lwpuserret(struct lwp *lp)
1233 {
1234 	struct proc *p = lp->lwp_proc;
1235 
1236 	if (lp->lwp_mpflags & LWP_MP_VNLRU) {
1237 		atomic_clear_int(&lp->lwp_mpflags, LWP_MP_VNLRU);
1238 		allocvnode_gc();
1239 	}
1240 	if (lp->lwp_mpflags & LWP_MP_WEXIT) {
1241 		lwkt_gettoken(&p->p_token);
1242 		lwp_exit(0, NULL);
1243 		lwkt_reltoken(&p->p_token);     /* NOT REACHED */
1244 	}
1245 }
1246 
1247 /*
1248  * Kernel threads run from user processes can also accumulate deferred
1249  * actions which need to be acted upon.  Callers include:
1250  *
1251  * nfsd		- Can allocate lots of vnodes
1252  */
1253 void
1254 lwpkthreaddeferred(void)
1255 {
1256 	struct lwp *lp = curthread->td_lwp;
1257 
1258 	if (lp) {
1259 		if (lp->lwp_mpflags & LWP_MP_VNLRU) {
1260 			atomic_clear_int(&lp->lwp_mpflags, LWP_MP_VNLRU);
1261 			allocvnode_gc();
1262 		}
1263 	}
1264 }
1265 
1266 void
1267 proc_usermap(struct proc *p, int invfork)
1268 {
1269 	struct sys_upmap *upmap;
1270 
1271 	lwkt_gettoken(&p->p_token);
1272 	upmap = kmalloc(roundup2(sizeof(*upmap), PAGE_SIZE), M_UPMAP,
1273 			M_WAITOK | M_ZERO);
1274 	if (p->p_upmap == NULL && (p->p_flags & P_POSTEXIT) == 0) {
1275 		upmap->header[0].type = UKPTYPE_VERSION;
1276 		upmap->header[0].offset = offsetof(struct sys_upmap, version);
1277 		upmap->header[1].type = UPTYPE_RUNTICKS;
1278 		upmap->header[1].offset = offsetof(struct sys_upmap, runticks);
1279 		upmap->header[2].type = UPTYPE_FORKID;
1280 		upmap->header[2].offset = offsetof(struct sys_upmap, forkid);
1281 		upmap->header[3].type = UPTYPE_PID;
1282 		upmap->header[3].offset = offsetof(struct sys_upmap, pid);
1283 		upmap->header[4].type = UPTYPE_PROC_TITLE;
1284 		upmap->header[4].offset = offsetof(struct sys_upmap,proc_title);
1285 		upmap->header[5].type = UPTYPE_INVFORK;
1286 		upmap->header[5].offset = offsetof(struct sys_upmap, invfork);
1287 
1288 		upmap->version = UPMAP_VERSION;
1289 		upmap->pid = p->p_pid;
1290 		upmap->forkid = p->p_forkid;
1291 		upmap->invfork = invfork;
1292 		p->p_upmap = upmap;
1293 	} else {
1294 		kfree(upmap, M_UPMAP);
1295 	}
1296 	lwkt_reltoken(&p->p_token);
1297 }
1298 
1299 void
1300 proc_userunmap(struct proc *p)
1301 {
1302 	struct sys_upmap *upmap;
1303 
1304 	lwkt_gettoken(&p->p_token);
1305 	if ((upmap = p->p_upmap) != NULL) {
1306 		p->p_upmap = NULL;
1307 		kfree(upmap, M_UPMAP);
1308 	}
1309 	lwkt_reltoken(&p->p_token);
1310 }
1311 
1312 /*
1313  * Called when the per-thread user/kernel shared page needs to be
1314  * allocated.  The function refuses to allocate the page if the
1315  * thread is exiting to avoid races against lwp_userunmap().
1316  */
1317 void
1318 lwp_usermap(struct lwp *lp, int invfork)
1319 {
1320 	struct sys_lpmap *lpmap;
1321 
1322 	lwkt_gettoken(&lp->lwp_token);
1323 
1324 	lpmap = kmalloc(roundup2(sizeof(*lpmap), PAGE_SIZE), M_UPMAP,
1325 			M_WAITOK | M_ZERO);
1326 	if (lp->lwp_lpmap == NULL && (lp->lwp_mpflags & LWP_MP_WEXIT) == 0) {
1327 		lpmap->header[0].type = UKPTYPE_VERSION;
1328 		lpmap->header[0].offset = offsetof(struct sys_lpmap, version);
1329 		lpmap->header[1].type = LPTYPE_BLOCKALLSIGS;
1330 		lpmap->header[1].offset = offsetof(struct sys_lpmap,
1331 						   blockallsigs);
1332 		lpmap->header[2].type = LPTYPE_THREAD_TITLE;
1333 		lpmap->header[2].offset = offsetof(struct sys_lpmap,
1334 						   thread_title);
1335 		lpmap->header[3].type = LPTYPE_THREAD_TID;
1336 		lpmap->header[3].offset = offsetof(struct sys_lpmap, tid);
1337 
1338 		lpmap->version = LPMAP_VERSION;
1339 		lpmap->tid = lp->lwp_tid;
1340 		lp->lwp_lpmap = lpmap;
1341 	} else {
1342 		kfree(lpmap, M_UPMAP);
1343 	}
1344 	lwkt_reltoken(&lp->lwp_token);
1345 }
1346 
1347 /*
1348  * Called when a LWP (but not necessarily the whole process) exits.
1349  * Called when a process execs (after all other threads have been killed).
1350  *
1351  * lwp-specific mappings must be removed.  If userland didn't do it, then
1352  * we have to.  Otherwise we could end-up disclosing kernel memory due to
1353  * the ad-hoc pmap mapping.
1354  */
1355 void
1356 lwp_userunmap(struct lwp *lp)
1357 {
1358 	struct sys_lpmap *lpmap;
1359 	struct vm_map *map;
1360 	struct vm_map_backing *ba;
1361 	struct vm_map_backing copy;
1362 
1363 	lwkt_gettoken(&lp->lwp_token);
1364 	map = &lp->lwp_proc->p_vmspace->vm_map;
1365 	lpmap = lp->lwp_lpmap;
1366 	lp->lwp_lpmap = NULL;
1367 
1368 	spin_lock(&lp->lwp_spin);
1369 	while ((ba = TAILQ_FIRST(&lp->lwp_lpmap_backing_list)) != NULL) {
1370 		copy = *ba;
1371 		spin_unlock(&lp->lwp_spin);
1372 
1373 		lwkt_gettoken(&map->token);
1374 		vm_map_remove(map, copy.start, copy.end);
1375 		lwkt_reltoken(&map->token);
1376 
1377 		spin_lock(&lp->lwp_spin);
1378 	}
1379 	spin_unlock(&lp->lwp_spin);
1380 
1381 	if (lpmap)
1382 		kfree(lpmap, M_UPMAP);
1383 	lwkt_reltoken(&lp->lwp_token);
1384 }
1385 
1386 /*
1387  * Scan all processes on the allproc list.  The process is automatically
1388  * held for the callback.  A return value of -1 terminates the loop.
1389  * Zombie procs are skipped.
1390  *
1391  * The callback is made with the process held and proc_token held.
1392  *
1393  * We limit the scan to the number of processes as-of the start of
1394  * the scan so as not to get caught up in an endless loop if new processes
1395  * are created more quickly than we can scan the old ones.  Add a little
1396  * slop to try to catch edge cases since nprocs can race.
1397  *
1398  * No requirements.
1399  */
1400 void
1401 allproc_scan(int (*callback)(struct proc *, void *), void *data, int segmented)
1402 {
1403 	int limit = nprocs + ncpus;
1404 	struct proc *p;
1405 	int ns;
1406 	int ne;
1407 	int r;
1408 	int n;
1409 
1410 	if (segmented) {
1411 		int id = mycpu->gd_cpuid;
1412 		ns = id * ALLPROC_HSIZE / ncpus;
1413 		ne = (id + 1) * ALLPROC_HSIZE / ncpus;
1414 	} else {
1415 		ns = 0;
1416 		ne = ALLPROC_HSIZE;
1417 	}
1418 
1419 	/*
1420 	 * prg->proc_token protects the allproc list and PHOLD() prevents the
1421 	 * process from being removed from the allproc list or the zombproc
1422 	 * list.
1423 	 */
1424 	for (n = ns; n < ne; ++n) {
1425 		procglob_t *prg = &procglob[n];
1426 		if (LIST_FIRST(&prg->allproc) == NULL)
1427 			continue;
1428 		lwkt_gettoken(&prg->proc_token);
1429 		LIST_FOREACH(p, &prg->allproc, p_list) {
1430 			if (p->p_stat == SZOMB)
1431 				continue;
1432 			PHOLD(p);
1433 			r = callback(p, data);
1434 			PRELE(p);
1435 			if (r < 0)
1436 				break;
1437 			if (--limit < 0)
1438 				break;
1439 		}
1440 		lwkt_reltoken(&prg->proc_token);
1441 
1442 		/*
1443 		 * Check if asked to stop early
1444 		 */
1445 		if (p)
1446 			break;
1447 	}
1448 }
1449 
1450 /*
1451  * Scan all lwps of processes on the allproc list.  The lwp is automatically
1452  * held for the callback.  A return value of -1 terminates the loop.
1453  *
1454  * The callback is made with the proces and lwp both held, and proc_token held.
1455  *
1456  * No requirements.
1457  */
1458 void
1459 alllwp_scan(int (*callback)(struct lwp *, void *), void *data, int segmented)
1460 {
1461 	struct proc *p;
1462 	struct lwp *lp;
1463 	int ns;
1464 	int ne;
1465 	int r = 0;
1466 	int n;
1467 
1468 	if (segmented) {
1469 		int id = mycpu->gd_cpuid;
1470 		ns = id * ALLPROC_HSIZE / ncpus;
1471 		ne = (id + 1) * ALLPROC_HSIZE / ncpus;
1472 	} else {
1473 		ns = 0;
1474 		ne = ALLPROC_HSIZE;
1475 	}
1476 
1477 	for (n = ns; n < ne; ++n) {
1478 		procglob_t *prg = &procglob[n];
1479 
1480 		if (LIST_FIRST(&prg->allproc) == NULL)
1481 			continue;
1482 		lwkt_gettoken(&prg->proc_token);
1483 		LIST_FOREACH(p, &prg->allproc, p_list) {
1484 			if (p->p_stat == SZOMB)
1485 				continue;
1486 			PHOLD(p);
1487 			lwkt_gettoken(&p->p_token);
1488 			FOREACH_LWP_IN_PROC(lp, p) {
1489 				LWPHOLD(lp);
1490 				r = callback(lp, data);
1491 				LWPRELE(lp);
1492 			}
1493 			lwkt_reltoken(&p->p_token);
1494 			PRELE(p);
1495 			if (r < 0)
1496 				break;
1497 		}
1498 		lwkt_reltoken(&prg->proc_token);
1499 
1500 		/*
1501 		 * Asked to exit early
1502 		 */
1503 		if (p)
1504 			break;
1505 	}
1506 }
1507 
1508 /*
1509  * Scan all processes on the zombproc list.  The process is automatically
1510  * held for the callback.  A return value of -1 terminates the loop.
1511  *
1512  * No requirements.
1513  * The callback is made with the proces held and proc_token held.
1514  */
1515 void
1516 zombproc_scan(int (*callback)(struct proc *, void *), void *data)
1517 {
1518 	struct proc *p;
1519 	int r;
1520 	int n;
1521 
1522 	/*
1523 	 * prg->proc_token protects the allproc list and PHOLD() prevents the
1524 	 * process from being removed from the allproc list or the zombproc
1525 	 * list.
1526 	 */
1527 	for (n = 0; n < ALLPROC_HSIZE; ++n) {
1528 		procglob_t *prg = &procglob[n];
1529 
1530 		if (LIST_FIRST(&prg->allproc) == NULL)
1531 			continue;
1532 		lwkt_gettoken(&prg->proc_token);
1533 		LIST_FOREACH(p, &prg->allproc, p_list) {
1534 			if (p->p_stat != SZOMB)
1535 				continue;
1536 			PHOLD(p);
1537 			r = callback(p, data);
1538 			PRELE(p);
1539 			if (r < 0)
1540 				break;
1541 		}
1542 		lwkt_reltoken(&prg->proc_token);
1543 
1544 		/*
1545 		 * Check if asked to stop early
1546 		 */
1547 		if (p)
1548 			break;
1549 	}
1550 }
1551 
1552 #include "opt_ddb.h"
1553 #ifdef DDB
1554 #include <ddb/ddb.h>
1555 
1556 /*
1557  * Debugging only
1558  */
1559 DB_SHOW_COMMAND(pgrpdump, pgrpdump)
1560 {
1561 	struct pgrp *pgrp;
1562 	struct proc *p;
1563 	procglob_t *prg;
1564 	int i;
1565 
1566 	for (i = 0; i < ALLPROC_HSIZE; ++i) {
1567 		prg = &procglob[i];
1568 
1569 		if (LIST_EMPTY(&prg->allpgrp))
1570 			continue;
1571 		kprintf("\tindx %d\n", i);
1572 		LIST_FOREACH(pgrp, &prg->allpgrp, pg_list) {
1573 			kprintf("\tpgrp %p, pgid %ld, sess %p, "
1574 				"sesscnt %d, mem %p\n",
1575 				(void *)pgrp, (long)pgrp->pg_id,
1576 				(void *)pgrp->pg_session,
1577 				pgrp->pg_session->s_count,
1578 				(void *)LIST_FIRST(&pgrp->pg_members));
1579 			LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1580 				kprintf("\t\tpid %ld addr %p pgrp %p\n",
1581 					(long)p->p_pid, (void *)p,
1582 					(void *)p->p_pgrp);
1583 			}
1584 		}
1585 	}
1586 }
1587 #endif /* DDB */
1588 
1589 /*
1590  * The caller must hold proc_token.
1591  */
1592 static int
1593 sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags)
1594 {
1595 	struct kinfo_proc ki;
1596 	struct lwp *lp;
1597 	int skp = 0, had_output = 0;
1598 	int error;
1599 
1600 	bzero(&ki, sizeof(ki));
1601 	lwkt_gettoken_shared(&p->p_token);
1602 	fill_kinfo_proc(p, &ki);
1603 	if ((flags & KERN_PROC_FLAG_LWP) == 0)
1604 		skp = 1;
1605 	error = 0;
1606 	FOREACH_LWP_IN_PROC(lp, p) {
1607 		LWPHOLD(lp);
1608 		fill_kinfo_lwp(lp, &ki.kp_lwp);
1609 		had_output = 1;
1610 		if (skp == 0) {
1611 			error = SYSCTL_OUT(req, &ki, sizeof(ki));
1612 			bzero(&ki.kp_lwp, sizeof(ki.kp_lwp));
1613 		}
1614 		LWPRELE(lp);
1615 		if (error)
1616 			break;
1617 	}
1618 	lwkt_reltoken(&p->p_token);
1619 
1620 	/*
1621 	 * If aggregating threads, set the tid field to -1.
1622 	 */
1623 	if (skp)
1624 		ki.kp_lwp.kl_tid = -1;
1625 
1626 	/*
1627 	 * We need to output at least the proc, even if there is no lwp.
1628 	 * If skp is non-zero we aggregated the lwps and need to output
1629 	 * the result.
1630 	 */
1631 	if (had_output == 0 || skp) {
1632 		error = SYSCTL_OUT(req, &ki, sizeof(ki));
1633 	}
1634 	return (error);
1635 }
1636 
1637 /*
1638  * The caller must hold proc_token.
1639  */
1640 static int
1641 sysctl_out_proc_kthread(struct thread *td, struct sysctl_req *req)
1642 {
1643 	struct kinfo_proc ki;
1644 	int error;
1645 
1646 	fill_kinfo_proc_kthread(td, &ki);
1647 	error = SYSCTL_OUT(req, &ki, sizeof(ki));
1648 	if (error)
1649 		return error;
1650 	return(0);
1651 }
1652 
1653 /*
1654  * No requirements.
1655  */
1656 static int
1657 sysctl_kern_proc(SYSCTL_HANDLER_ARGS)
1658 {
1659 	int *name = (int *)arg1;
1660 	int oid = oidp->oid_number;
1661 	u_int namelen = arg2;
1662 	struct proc *p;
1663 	struct thread *td;
1664 	struct thread *marker;
1665 	int flags = 0;
1666 	int error = 0;
1667 	int n;
1668 	int origcpu;
1669 	struct ucred *cr1 = curproc->p_ucred;
1670 	struct ucred *crcache = NULL;
1671 
1672 	flags = oid & KERN_PROC_FLAGMASK;
1673 	oid &= ~KERN_PROC_FLAGMASK;
1674 
1675 	if ((oid == KERN_PROC_ALL && namelen != 0) ||
1676 	    (oid != KERN_PROC_ALL && namelen != 1)) {
1677 		return (EINVAL);
1678 	}
1679 
1680 	/*
1681 	 * proc_token protects the allproc list and PHOLD() prevents the
1682 	 * process from being removed from the allproc list or the zombproc
1683 	 * list.
1684 	 */
1685 	if (oid == KERN_PROC_PID) {
1686 		p = pfind((pid_t)name[0]);
1687 		if (p) {
1688 			crcache = pcredcache(crcache, p);
1689 			if (PRISON_CHECK(cr1, crcache))
1690 				error = sysctl_out_proc(p, req, flags);
1691 			PRELE(p);
1692 		}
1693 		goto post_threads;
1694 	}
1695 	p = NULL;
1696 
1697 	if (!req->oldptr) {
1698 		/* overestimate by 5 procs */
1699 		error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5);
1700 		if (error)
1701 			goto post_threads;
1702 	}
1703 
1704 	for (n = 0; n < ALLPROC_HSIZE; ++n) {
1705 		procglob_t *prg = &procglob[n];
1706 
1707 		if (LIST_EMPTY(&prg->allproc))
1708 			continue;
1709 		lwkt_gettoken_shared(&prg->proc_token);
1710 		LIST_FOREACH(p, &prg->allproc, p_list) {
1711 			/*
1712 			 * Show a user only their processes.
1713 			 */
1714 			if (ps_showallprocs == 0) {
1715 				crcache = pcredcache(crcache, p);
1716 				if (crcache == NULL ||
1717 				    p_trespass(cr1, crcache)) {
1718 					continue;
1719 				}
1720 			}
1721 
1722 			/*
1723 			 * Skip embryonic processes.
1724 			 */
1725 			if (p->p_stat == SIDL)
1726 				continue;
1727 			/*
1728 			 * TODO - make more efficient (see notes below).
1729 			 * do by session.
1730 			 */
1731 			switch (oid) {
1732 			case KERN_PROC_PGRP:
1733 				/* could do this by traversing pgrp */
1734 				if (p->p_pgrp == NULL ||
1735 				    p->p_pgrp->pg_id != (pid_t)name[0])
1736 					continue;
1737 				break;
1738 
1739 			case KERN_PROC_TTY:
1740 				if ((p->p_flags & P_CONTROLT) == 0 ||
1741 				    p->p_session == NULL ||
1742 				    p->p_session->s_ttyp == NULL ||
1743 				    devid_from_dev(p->p_session->s_ttyp->t_dev) !=
1744 					(dev_t)name[0])
1745 					continue;
1746 				break;
1747 
1748 			case KERN_PROC_UID:
1749 				crcache = pcredcache(crcache, p);
1750 				if (crcache == NULL ||
1751 				    crcache->cr_uid != (uid_t)name[0]) {
1752 					continue;
1753 				}
1754 				break;
1755 
1756 			case KERN_PROC_RUID:
1757 				crcache = pcredcache(crcache, p);
1758 				if (crcache == NULL ||
1759 				    crcache->cr_ruid != (uid_t)name[0]) {
1760 					continue;
1761 				}
1762 				break;
1763 			}
1764 
1765 			crcache = pcredcache(crcache, p);
1766 			if (!PRISON_CHECK(cr1, crcache))
1767 				continue;
1768 			PHOLD(p);
1769 			error = sysctl_out_proc(p, req, flags);
1770 			PRELE(p);
1771 			if (error) {
1772 				lwkt_reltoken(&prg->proc_token);
1773 				goto post_threads;
1774 			}
1775 		}
1776 		lwkt_reltoken(&prg->proc_token);
1777 	}
1778 
1779 	/*
1780 	 * Iterate over all active cpus and scan their thread list.  Start
1781 	 * with the next logical cpu and end with our original cpu.  We
1782 	 * migrate our own thread to each target cpu in order to safely scan
1783 	 * its thread list.  In the last loop we migrate back to our original
1784 	 * cpu.
1785 	 */
1786 	origcpu = mycpu->gd_cpuid;
1787 	if (!ps_showallthreads || jailed(cr1))
1788 		goto post_threads;
1789 
1790 	marker = kmalloc(sizeof(struct thread), M_TEMP, M_WAITOK|M_ZERO);
1791 	marker->td_flags = TDF_MARKER;
1792 	error = 0;
1793 
1794 	for (n = 1; n <= ncpus; ++n) {
1795 		globaldata_t rgd;
1796 		int nid;
1797 
1798 		nid = (origcpu + n) % ncpus;
1799 		if (CPUMASK_TESTBIT(smp_active_mask, nid) == 0)
1800 			continue;
1801 		rgd = globaldata_find(nid);
1802 		lwkt_setcpu_self(rgd);
1803 
1804 		crit_enter();
1805 		TAILQ_INSERT_TAIL(&rgd->gd_tdallq, marker, td_allq);
1806 
1807 		while ((td = TAILQ_PREV(marker, lwkt_queue, td_allq)) != NULL) {
1808 			TAILQ_REMOVE(&rgd->gd_tdallq, marker, td_allq);
1809 			TAILQ_INSERT_BEFORE(td, marker, td_allq);
1810 			if (td->td_flags & TDF_MARKER)
1811 				continue;
1812 			if (td->td_proc)
1813 				continue;
1814 
1815 			lwkt_hold(td);
1816 			crit_exit();
1817 
1818 			switch (oid) {
1819 			case KERN_PROC_PGRP:
1820 			case KERN_PROC_TTY:
1821 			case KERN_PROC_UID:
1822 			case KERN_PROC_RUID:
1823 				break;
1824 			default:
1825 				error = sysctl_out_proc_kthread(td, req);
1826 				break;
1827 			}
1828 			lwkt_rele(td);
1829 			crit_enter();
1830 			if (error)
1831 				break;
1832 		}
1833 		TAILQ_REMOVE(&rgd->gd_tdallq, marker, td_allq);
1834 		crit_exit();
1835 
1836 		if (error)
1837 			break;
1838 	}
1839 
1840 	/*
1841 	 * Userland scheduler expects us to return on the same cpu we
1842 	 * started on.
1843 	 */
1844 	if (mycpu->gd_cpuid != origcpu)
1845 		lwkt_setcpu_self(globaldata_find(origcpu));
1846 
1847 	kfree(marker, M_TEMP);
1848 
1849 post_threads:
1850 	if (crcache)
1851 		crfree(crcache);
1852 	return (error);
1853 }
1854 
1855 /*
1856  * This sysctl allows a process to retrieve the argument list or process
1857  * title for another process without groping around in the address space
1858  * of the other process.  It also allow a process to set its own "process
1859  * title to a string of its own choice.
1860  *
1861  * No requirements.
1862  */
1863 static int
1864 sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS)
1865 {
1866 	int *name = (int*) arg1;
1867 	u_int namelen = arg2;
1868 	size_t n;
1869 	struct proc *p;
1870 	struct lwp *lp;
1871 #if 0
1872 	struct pargs *opa;
1873 #endif
1874 	struct pargs *pa;
1875 	int error = 0;
1876 	struct ucred *cr1 = curproc->p_ucred;
1877 
1878 	if (namelen != 1 && namelen != 2)
1879 		return (EINVAL);
1880 
1881 	lp = NULL;
1882 	p = pfind((pid_t)name[0]);
1883 	if (p == NULL)
1884 		goto done;
1885 	lwkt_gettoken(&p->p_token);
1886 
1887 	if (namelen == 2) {
1888 		lp = lwpfind(p, (lwpid_t)name[1]);
1889 		if (lp)
1890 			lwkt_gettoken(&lp->lwp_token);
1891 	} else {
1892 		lp = NULL;
1893 	}
1894 
1895 	if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred))
1896 		goto done;
1897 
1898 	if (req->newptr && curproc != p) {
1899 		error = EPERM;
1900 		goto done;
1901 	}
1902 	if (req->oldptr) {
1903 		if (lp && lp->lwp_lpmap != NULL &&
1904 		    lp->lwp_lpmap->thread_title[0]) {
1905 			/*
1906 			 * Args set via writable user thread mmap or
1907 			 * sysctl().
1908 			 *
1909 			 * We must calculate the string length manually
1910 			 * because the user data can change at any time.
1911 			 */
1912 			size_t n;
1913 			char *base;
1914 
1915 			base = lp->lwp_lpmap->thread_title;
1916 			for (n = 0; n < LPMAP_MAXTHREADTITLE - 1; ++n) {
1917 				if (base[n] == 0)
1918 					break;
1919 			}
1920 			error = SYSCTL_OUT(req, base, n);
1921 			if (error == 0)
1922 				error = SYSCTL_OUT(req, "", 1);
1923 		} else if (p->p_upmap != NULL && p->p_upmap->proc_title[0]) {
1924 			/*
1925 			 * Args set via writable user process mmap or
1926 			 * sysctl().
1927 			 *
1928 			 * We must calculate the string length manually
1929 			 * because the user data can change at any time.
1930 			 */
1931 			size_t n;
1932 			char *base;
1933 
1934 			base = p->p_upmap->proc_title;
1935 			for (n = 0; n < UPMAP_MAXPROCTITLE - 1; ++n) {
1936 				if (base[n] == 0)
1937 					break;
1938 			}
1939 			error = SYSCTL_OUT(req, base, n);
1940 			if (error == 0)
1941 				error = SYSCTL_OUT(req, "", 1);
1942 		} else if ((pa = p->p_args) != NULL) {
1943 			/*
1944 			 * Default/original arguments.
1945 			 */
1946 			refcount_acquire(&pa->ar_ref);
1947 			error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length);
1948 			if (refcount_release(&pa->ar_ref))
1949 				kfree(pa, M_PARGS);
1950 		}
1951 	}
1952 	if (req->newptr == NULL)
1953 		goto done;
1954 
1955 	if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit) {
1956 		goto done;
1957 	}
1958 
1959 	/*
1960 	 * Get the new process or thread title from userland
1961 	 */
1962 	pa = kmalloc(sizeof(struct pargs) + req->newlen,
1963 		     M_PARGS, M_WAITOK);
1964 	refcount_init(&pa->ar_ref, 1);
1965 	pa->ar_length = req->newlen;
1966 	error = SYSCTL_IN(req, pa->ar_args, req->newlen);
1967 	if (error) {
1968 		kfree(pa, M_PARGS);
1969 		goto done;
1970 	}
1971 
1972 	if (lp) {
1973 		/*
1974 		 * Update thread title
1975 		 */
1976 		if (lp->lwp_lpmap == NULL)
1977 			lwp_usermap(lp, -1);
1978 		if (lp->lwp_lpmap) {
1979 			n = req->newlen;
1980 			if (n >= sizeof(lp->lwp_lpmap->thread_title))
1981 				n = sizeof(lp->lwp_lpmap->thread_title) - 1;
1982 			lp->lwp_lpmap->thread_title[n] = 0;
1983 			bcopy(pa->ar_args, lp->lwp_lpmap->thread_title, n);
1984 		}
1985 	} else {
1986 		/*
1987 		 * Update process title
1988 		 */
1989 		if (p->p_upmap == NULL)
1990 			proc_usermap(p, -1);
1991 		if (p->p_upmap) {
1992 			n = req->newlen;
1993 			if (n >= sizeof(lp->lwp_lpmap->thread_title))
1994 				n = sizeof(lp->lwp_lpmap->thread_title) - 1;
1995 			p->p_upmap->proc_title[n] = 0;
1996 			bcopy(pa->ar_args, p->p_upmap->proc_title, n);
1997 		}
1998 
1999 #if 0
2000 		/*
2001 		 * XXX delete this code, keep original args intact for
2002 		 * the setproctitle("") case.
2003 		 * Scrap p->p_args, p->p_upmap->proc_title[] overrides it.
2004 		 */
2005 		opa = p->p_args;
2006 		p->p_args = NULL;
2007 		if (opa) {
2008 			KKASSERT(opa->ar_ref > 0);
2009 			if (refcount_release(&opa->ar_ref)) {
2010 				kfree(opa, M_PARGS);
2011 				/* opa = NULL; */
2012 			}
2013 		}
2014 #endif
2015 	}
2016 	kfree(pa, M_PARGS);
2017 
2018 done:
2019 	if (lp) {
2020 		lwkt_reltoken(&lp->lwp_token);
2021 		LWPRELE(lp);
2022 	}
2023 	if (p) {
2024 		lwkt_reltoken(&p->p_token);
2025 		PRELE(p);
2026 	}
2027 	return (error);
2028 }
2029 
2030 static int
2031 sysctl_kern_proc_cwd(SYSCTL_HANDLER_ARGS)
2032 {
2033 	int *name = (int*) arg1;
2034 	u_int namelen = arg2;
2035 	struct proc *p;
2036 	int error = 0;
2037 	char *fullpath, *freepath;
2038 	struct ucred *cr1 = curproc->p_ucred;
2039 
2040 	if (namelen != 1)
2041 		return (EINVAL);
2042 
2043 	p = pfind((pid_t)name[0]);
2044 	if (p == NULL)
2045 		goto done;
2046 	lwkt_gettoken_shared(&p->p_token);
2047 
2048 	/*
2049 	 * If we are not allowed to see other args, we certainly shouldn't
2050 	 * get the cwd either. Also check the usual trespassing.
2051 	 */
2052 	if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred))
2053 		goto done;
2054 
2055 	if (req->oldptr && p->p_fd != NULL && p->p_fd->fd_ncdir.ncp) {
2056 		struct nchandle nch;
2057 
2058 		cache_copy(&p->p_fd->fd_ncdir, &nch);
2059 		error = cache_fullpath(p, &nch, NULL,
2060 				       &fullpath, &freepath, 0);
2061 		cache_drop(&nch);
2062 		if (error)
2063 			goto done;
2064 		error = SYSCTL_OUT(req, fullpath, strlen(fullpath) + 1);
2065 		kfree(freepath, M_TEMP);
2066 	}
2067 
2068 done:
2069 	if (p) {
2070 		lwkt_reltoken(&p->p_token);
2071 		PRELE(p);
2072 	}
2073 	return (error);
2074 }
2075 
2076 /*
2077  * This sysctl allows a process to retrieve the path of the executable for
2078  * itself or another process.
2079  */
2080 static int
2081 sysctl_kern_proc_pathname(SYSCTL_HANDLER_ARGS)
2082 {
2083 	pid_t *pidp = (pid_t *)arg1;
2084 	unsigned int arglen = arg2;
2085 	struct proc *p;
2086 	char *retbuf, *freebuf;
2087 	int error = 0;
2088 	struct nchandle nch;
2089 
2090 	if (arglen != 1)
2091 		return (EINVAL);
2092 	if (*pidp == -1) {	/* -1 means this process */
2093 		p = curproc;
2094 	} else {
2095 		p = pfind(*pidp);
2096 		if (p == NULL)
2097 			return (ESRCH);
2098 	}
2099 	lwkt_gettoken_shared(&p->p_token);	/* deal with exit race */
2100 	if (p->p_textnch.ncp) {
2101 		cache_copy(&p->p_textnch, &nch);
2102 		error = cache_fullpath(p, &nch, NULL, &retbuf, &freebuf, 0);
2103 		cache_drop(&nch);
2104 	} else {
2105 		error = EINVAL;
2106 	}
2107 	lwkt_reltoken(&p->p_token);
2108 	if (error)
2109 		goto done;
2110 	error = SYSCTL_OUT(req, retbuf, strlen(retbuf) + 1);
2111 	kfree(freebuf, M_TEMP);
2112 done:
2113 	if (*pidp != -1)
2114 		PRELE(p);
2115 
2116 	return (error);
2117 }
2118 
2119 static int
2120 sysctl_kern_proc_sigtramp(SYSCTL_HANDLER_ARGS)
2121 {
2122         /*int *name = (int *)arg1;*/
2123         u_int namelen = arg2;
2124         struct kinfo_sigtramp kst;
2125         const struct sysentvec *sv;
2126         int error;
2127 
2128         if (namelen > 1)
2129                 return (EINVAL);
2130         /* ignore pid if passed in (freebsd compatibility) */
2131 
2132         sv = curproc->p_sysent;
2133         bzero(&kst, sizeof(kst));
2134         if (sv->sv_szsigcode) {
2135 		intptr_t sigbase;
2136 
2137 		sigbase = trunc_page64((intptr_t)PS_STRINGS -
2138 				       *sv->sv_szsigcode);
2139 		sigbase -= SZSIGCODE_EXTRA_BYTES;
2140 
2141                 kst.ksigtramp_start = (void *)sigbase;
2142                 kst.ksigtramp_end = (void *)(sigbase + *sv->sv_szsigcode);
2143         }
2144         error = SYSCTL_OUT(req, &kst, sizeof(kst));
2145 
2146         return (error);
2147 }
2148 
2149 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD,  0, "Process table");
2150 
2151 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all,
2152 	CTLFLAG_RD | CTLTYPE_STRUCT | CTLFLAG_NOLOCK,
2153 	0, 0, sysctl_kern_proc, "S,proc", "Return entire process table");
2154 
2155 SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp,
2156 	CTLFLAG_RD | CTLFLAG_NOLOCK,
2157 	sysctl_kern_proc, "Process table");
2158 
2159 SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty,
2160 	CTLFLAG_RD | CTLFLAG_NOLOCK,
2161 	sysctl_kern_proc, "Process table");
2162 
2163 SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid,
2164 	CTLFLAG_RD | CTLFLAG_NOLOCK,
2165 	sysctl_kern_proc, "Process table");
2166 
2167 SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid,
2168 	CTLFLAG_RD | CTLFLAG_NOLOCK,
2169 	sysctl_kern_proc, "Process table");
2170 
2171 SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid,
2172 	CTLFLAG_RD | CTLFLAG_NOLOCK,
2173 	sysctl_kern_proc, "Process table");
2174 
2175 SYSCTL_NODE(_kern_proc, (KERN_PROC_ALL | KERN_PROC_FLAG_LWP), all_lwp,
2176 	CTLFLAG_RD | CTLFLAG_NOLOCK,
2177 	sysctl_kern_proc, "Process table");
2178 
2179 SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_FLAG_LWP), pgrp_lwp,
2180 	CTLFLAG_RD | CTLFLAG_NOLOCK,
2181 	sysctl_kern_proc, "Process table");
2182 
2183 SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_FLAG_LWP), tty_lwp,
2184 	CTLFLAG_RD | CTLFLAG_NOLOCK,
2185 	sysctl_kern_proc, "Process table");
2186 
2187 SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_FLAG_LWP), uid_lwp,
2188 	CTLFLAG_RD | CTLFLAG_NOLOCK,
2189 	sysctl_kern_proc, "Process table");
2190 
2191 SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_FLAG_LWP), ruid_lwp,
2192 	CTLFLAG_RD | CTLFLAG_NOLOCK,
2193 	sysctl_kern_proc, "Process table");
2194 
2195 SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_FLAG_LWP), pid_lwp,
2196 	CTLFLAG_RD | CTLFLAG_NOLOCK,
2197 	sysctl_kern_proc, "Process table");
2198 
2199 SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args,
2200 	CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_NOLOCK,
2201 	sysctl_kern_proc_args, "Process argument list");
2202 
2203 SYSCTL_NODE(_kern_proc, KERN_PROC_CWD, cwd,
2204 	CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_NOLOCK,
2205 	sysctl_kern_proc_cwd, "Process argument list");
2206 
2207 static SYSCTL_NODE(_kern_proc, KERN_PROC_PATHNAME, pathname,
2208 	CTLFLAG_RD | CTLFLAG_NOLOCK,
2209 	sysctl_kern_proc_pathname, "Process executable path");
2210 
2211 SYSCTL_PROC(_kern_proc, KERN_PROC_SIGTRAMP, sigtramp,
2212 	CTLFLAG_RD | CTLTYPE_STRUCT | CTLFLAG_NOLOCK,
2213         0, 0, sysctl_kern_proc_sigtramp, "S,sigtramp",
2214         "Return sigtramp address range");
2215