1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/sysctl.h> 34 #include <sys/malloc.h> 35 #include <sys/proc.h> 36 #include <sys/vnode.h> 37 #include <sys/jail.h> 38 #include <sys/filedesc.h> 39 #include <sys/tty.h> 40 #include <sys/dsched.h> 41 #include <sys/signalvar.h> 42 #include <sys/spinlock.h> 43 #include <sys/random.h> 44 #include <sys/exec.h> 45 #include <vm/vm.h> 46 #include <sys/lock.h> 47 #include <sys/kinfo.h> 48 #include <vm/pmap.h> 49 #include <vm/vm_map.h> 50 #include <machine/smp.h> 51 52 #include <sys/refcount.h> 53 #include <sys/spinlock2.h> 54 55 /* 56 * Hash table size must be a power of two and is not currently dynamically 57 * sized. There is a trade-off between the linear scans which must iterate 58 * all HSIZE elements and the number of elements which might accumulate 59 * within each hash chain. 60 */ 61 #define ALLPROC_HSIZE 256 62 #define ALLPROC_HMASK (ALLPROC_HSIZE - 1) 63 #define ALLPROC_HASH(pid) (pid & ALLPROC_HMASK) 64 #define PGRP_HASH(pid) (pid & ALLPROC_HMASK) 65 #define SESS_HASH(pid) (pid & ALLPROC_HMASK) 66 67 /* 68 * pid_doms[] management, used to control how quickly a PID can be recycled. 69 * Must be a multiple of ALLPROC_HSIZE for the proc_makepid() inner loops. 70 * 71 * WARNING! PIDDOM_DELAY should not be defined > 20 or so unless you change 72 * the array from int8_t's to int16_t's. 73 */ 74 #define PIDDOM_COUNT 10 /* 10 pids per domain - reduce array size */ 75 #define PIDDOM_DELAY 10 /* min 10 seconds after exit before reuse */ 76 #define PIDDOM_SCALE 10 /* (10,000*SCALE)/sec performance guarantee */ 77 #define PIDSEL_DOMAINS rounddown(PID_MAX * PIDDOM_SCALE / PIDDOM_COUNT, ALLPROC_HSIZE) 78 79 /* Used by libkvm */ 80 int allproc_hsize = ALLPROC_HSIZE; 81 82 LIST_HEAD(pidhashhead, proc); 83 84 static MALLOC_DEFINE(M_PGRP, "pgrp", "process group header"); 85 MALLOC_DEFINE(M_SESSION, "session", "session header"); 86 MALLOC_DEFINE(M_PROC, "proc", "Proc structures"); 87 MALLOC_DEFINE(M_LWP, "lwp", "lwp structures"); 88 MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures"); 89 MALLOC_DEFINE(M_UPMAP, "upmap", "upmap/kpmap/lpmap structures"); 90 91 int ps_showallprocs = 1; 92 static int ps_showallthreads = 1; 93 SYSCTL_INT(_security, OID_AUTO, ps_showallprocs, CTLFLAG_RW, 94 &ps_showallprocs, 0, 95 "Unprivileged processes can see processes with different UID/GID"); 96 SYSCTL_INT(_security, OID_AUTO, ps_showallthreads, CTLFLAG_RW, 97 &ps_showallthreads, 0, 98 "Unprivileged processes can see kernel threads"); 99 static u_int pid_domain_skips; 100 SYSCTL_UINT(_kern, OID_AUTO, pid_domain_skips, CTLFLAG_RW, 101 &pid_domain_skips, 0, 102 "Number of pid_doms[] skipped"); 103 static u_int pid_inner_skips; 104 SYSCTL_UINT(_kern, OID_AUTO, pid_inner_skips, CTLFLAG_RW, 105 &pid_inner_skips, 0, 106 "Number of pid_doms[] skipped"); 107 108 static void orphanpg(struct pgrp *pg); 109 static void proc_makepid(struct proc *p, int random_offset); 110 111 /* 112 * Process related lists (for proc_token, allproc, allpgrp, and allsess) 113 */ 114 typedef struct procglob procglob_t; 115 116 static procglob_t procglob[ALLPROC_HSIZE]; 117 118 /* 119 * We try our best to avoid recycling a PID too quickly. We do this by 120 * storing (uint8_t)time_second in the related pid domain on-reap and then 121 * using that to skip-over the domain on-allocate. 122 * 123 * This array has to be fairly large to support a high fork/exec rate. 124 * A ~100,000 entry array will support a 10-second reuse latency at 125 * 10,000 execs/second, worst case. Best-case multiply by PIDDOM_COUNT 126 * (approximately 100,000 execs/second). 127 * 128 * Currently we allocate around a megabyte, making the worst-case fork 129 * rate around 100,000/second. 130 */ 131 static uint8_t *pid_doms; 132 133 /* 134 * Random component to nextpid generation. We mix in a random factor to make 135 * it a little harder to predict. We sanity check the modulus value to avoid 136 * doing it in critical paths. Don't let it be too small or we pointlessly 137 * waste randomness entropy, and don't let it be impossibly large. Using a 138 * modulus that is too big causes a LOT more process table scans and slows 139 * down fork processing as the pidchecked caching is defeated. 140 */ 141 static int randompid = 0; 142 143 static __inline 144 struct ucred * 145 pcredcache(struct ucred *cr, struct proc *p) 146 { 147 if (cr != p->p_ucred) { 148 if (cr) 149 crfree(cr); 150 spin_lock(&p->p_spin); 151 if ((cr = p->p_ucred) != NULL) 152 crhold(cr); 153 spin_unlock(&p->p_spin); 154 } 155 return cr; 156 } 157 158 /* 159 * No requirements. 160 */ 161 static int 162 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS) 163 { 164 int error, pid; 165 166 pid = randompid; 167 error = sysctl_handle_int(oidp, &pid, 0, req); 168 if (error || !req->newptr) 169 return (error); 170 if (pid < 0 || pid > PID_MAX - 100) /* out of range */ 171 pid = PID_MAX - 100; 172 else if (pid < 2) /* NOP */ 173 pid = 0; 174 else if (pid < 100) /* Make it reasonable */ 175 pid = 100; 176 randompid = pid; 177 return (error); 178 } 179 180 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW, 181 0, 0, sysctl_kern_randompid, "I", "Random PID modulus"); 182 183 /* 184 * Initialize global process hashing structures. 185 * 186 * These functions are ONLY called from the low level boot code and do 187 * not lock their operations. 188 */ 189 void 190 procinit(void) 191 { 192 u_long i; 193 194 /* 195 * Allocate dynamically. This array can be large (~1MB) so don't 196 * waste boot loader space. 197 */ 198 pid_doms = kmalloc(sizeof(pid_doms[0]) * PIDSEL_DOMAINS, 199 M_PROC, M_WAITOK | M_ZERO); 200 201 /* 202 * Avoid unnecessary stalls due to pid_doms[] values all being 203 * the same. Make sure that the allocation of pid 1 and pid 2 204 * succeeds. 205 */ 206 for (i = 0; i < PIDSEL_DOMAINS; ++i) 207 pid_doms[i] = (int8_t)i - (int8_t)(PIDDOM_DELAY + 1); 208 209 /* 210 * Other misc init. 211 */ 212 for (i = 0; i < ALLPROC_HSIZE; ++i) { 213 procglob_t *prg = &procglob[i]; 214 LIST_INIT(&prg->allproc); 215 LIST_INIT(&prg->allsess); 216 LIST_INIT(&prg->allpgrp); 217 lwkt_token_init(&prg->proc_token, "allproc"); 218 } 219 uihashinit(); 220 } 221 222 void 223 procinsertinit(struct proc *p) 224 { 225 LIST_INSERT_HEAD(&procglob[ALLPROC_HASH(p->p_pid)].allproc, 226 p, p_list); 227 } 228 229 void 230 pgrpinsertinit(struct pgrp *pg) 231 { 232 LIST_INSERT_HEAD(&procglob[ALLPROC_HASH(pg->pg_id)].allpgrp, 233 pg, pg_list); 234 } 235 236 void 237 sessinsertinit(struct session *sess) 238 { 239 LIST_INSERT_HEAD(&procglob[ALLPROC_HASH(sess->s_sid)].allsess, 240 sess, s_list); 241 } 242 243 /* 244 * Process hold/release support functions. Called via the PHOLD(), 245 * PRELE(), and PSTALL() macros. 246 * 247 * p->p_lock is a simple hold count with a waiting interlock. No wakeup() 248 * is issued unless someone is actually waiting for the process. 249 * 250 * Most holds are short-term, allowing a process scan or other similar 251 * operation to access a proc structure without it getting ripped out from 252 * under us. procfs and process-list sysctl ops also use the hold function 253 * interlocked with various p_flags to keep the vmspace intact when reading 254 * or writing a user process's address space. 255 * 256 * There are two situations where a hold count can be longer. Exiting lwps 257 * hold the process until the lwp is reaped, and the parent will hold the 258 * child during vfork()/exec() sequences while the child is marked P_PPWAIT. 259 * 260 * The kernel waits for the hold count to drop to 0 (or 1 in some cases) at 261 * various critical points in the fork/exec and exit paths before proceeding. 262 */ 263 #define PLOCK_ZOMB 0x20000000 264 #define PLOCK_WAITING 0x40000000 265 #define PLOCK_MASK 0x1FFFFFFF 266 267 void 268 pstall(struct proc *p, const char *wmesg, int count) 269 { 270 int o; 271 int n; 272 273 for (;;) { 274 o = p->p_lock; 275 cpu_ccfence(); 276 if ((o & PLOCK_MASK) <= count) 277 break; 278 n = o | PLOCK_WAITING; 279 tsleep_interlock(&p->p_lock, 0); 280 281 /* 282 * If someone is trying to single-step the process during 283 * an exec or an exit they can deadlock us because procfs 284 * sleeps with the process held. 285 */ 286 if (p->p_stops) { 287 if (p->p_flags & P_INEXEC) { 288 wakeup(&p->p_stype); 289 } else if (p->p_flags & P_POSTEXIT) { 290 spin_lock(&p->p_spin); 291 p->p_stops = 0; 292 p->p_step = 0; 293 spin_unlock(&p->p_spin); 294 wakeup(&p->p_stype); 295 } 296 } 297 298 if (atomic_cmpset_int(&p->p_lock, o, n)) { 299 tsleep(&p->p_lock, PINTERLOCKED, wmesg, 0); 300 } 301 } 302 } 303 304 void 305 phold(struct proc *p) 306 { 307 atomic_add_int(&p->p_lock, 1); 308 } 309 310 /* 311 * WARNING! On last release (p) can become instantly invalid due to 312 * MP races. 313 */ 314 void 315 prele(struct proc *p) 316 { 317 int o; 318 int n; 319 320 /* 321 * Fast path 322 */ 323 if (atomic_cmpset_int(&p->p_lock, 1, 0)) 324 return; 325 326 /* 327 * Slow path 328 */ 329 for (;;) { 330 o = p->p_lock; 331 KKASSERT((o & PLOCK_MASK) > 0); 332 cpu_ccfence(); 333 n = (o - 1) & ~PLOCK_WAITING; 334 if (atomic_cmpset_int(&p->p_lock, o, n)) { 335 if (o & PLOCK_WAITING) 336 wakeup(&p->p_lock); 337 break; 338 } 339 } 340 } 341 342 /* 343 * Hold and flag serialized for zombie reaping purposes. 344 * 345 * This function will fail if it has to block, returning non-zero with 346 * neither the flag set or the hold count bumped. Note that (p) may 347 * not be valid in this case if the caller does not have some other 348 * reference on (p). 349 * 350 * This function does not block on other PHOLD()s, only on other 351 * PHOLDZOMB()s. 352 * 353 * Zero is returned on success. The hold count will be incremented and 354 * the serialization flag acquired. Note that serialization is only against 355 * other pholdzomb() calls, not against phold() calls. 356 */ 357 int 358 pholdzomb(struct proc *p) 359 { 360 int o; 361 int n; 362 363 /* 364 * Fast path 365 */ 366 if (atomic_cmpset_int(&p->p_lock, 0, PLOCK_ZOMB | 1)) 367 return(0); 368 369 /* 370 * Slow path 371 */ 372 for (;;) { 373 o = p->p_lock; 374 cpu_ccfence(); 375 if ((o & PLOCK_ZOMB) == 0) { 376 n = (o + 1) | PLOCK_ZOMB; 377 if (atomic_cmpset_int(&p->p_lock, o, n)) 378 return(0); 379 } else { 380 KKASSERT((o & PLOCK_MASK) > 0); 381 n = o | PLOCK_WAITING; 382 tsleep_interlock(&p->p_lock, 0); 383 if (atomic_cmpset_int(&p->p_lock, o, n)) { 384 tsleep(&p->p_lock, PINTERLOCKED, "phldz", 0); 385 /* (p) can be ripped out at this point */ 386 return(1); 387 } 388 } 389 } 390 } 391 392 /* 393 * Release PLOCK_ZOMB and the hold count, waking up any waiters. 394 * 395 * WARNING! On last release (p) can become instantly invalid due to 396 * MP races. 397 */ 398 void 399 prelezomb(struct proc *p) 400 { 401 int o; 402 int n; 403 404 /* 405 * Fast path 406 */ 407 if (atomic_cmpset_int(&p->p_lock, PLOCK_ZOMB | 1, 0)) 408 return; 409 410 /* 411 * Slow path 412 */ 413 KKASSERT(p->p_lock & PLOCK_ZOMB); 414 for (;;) { 415 o = p->p_lock; 416 KKASSERT((o & PLOCK_MASK) > 0); 417 cpu_ccfence(); 418 n = (o - 1) & ~(PLOCK_ZOMB | PLOCK_WAITING); 419 if (atomic_cmpset_int(&p->p_lock, o, n)) { 420 if (o & PLOCK_WAITING) 421 wakeup(&p->p_lock); 422 break; 423 } 424 } 425 } 426 427 /* 428 * Is p an inferior of the current process? 429 * 430 * No requirements. 431 */ 432 int 433 inferior(struct proc *p) 434 { 435 struct proc *p2; 436 437 PHOLD(p); 438 lwkt_gettoken_shared(&p->p_token); 439 while (p != curproc) { 440 if (p->p_pid == 0) { 441 lwkt_reltoken(&p->p_token); 442 return (0); 443 } 444 p2 = p->p_pptr; 445 PHOLD(p2); 446 lwkt_reltoken(&p->p_token); 447 PRELE(p); 448 lwkt_gettoken_shared(&p2->p_token); 449 p = p2; 450 } 451 lwkt_reltoken(&p->p_token); 452 PRELE(p); 453 454 return (1); 455 } 456 457 /* 458 * Locate a process by number. The returned process will be referenced and 459 * must be released with PRELE(). 460 * 461 * No requirements. 462 */ 463 struct proc * 464 pfind(pid_t pid) 465 { 466 struct proc *p = curproc; 467 procglob_t *prg; 468 int n; 469 470 /* 471 * Shortcut the current process 472 */ 473 if (p && p->p_pid == pid) { 474 PHOLD(p); 475 return (p); 476 } 477 478 /* 479 * Otherwise find it in the hash table. 480 */ 481 n = ALLPROC_HASH(pid); 482 prg = &procglob[n]; 483 484 lwkt_gettoken_shared(&prg->proc_token); 485 LIST_FOREACH(p, &prg->allproc, p_list) { 486 if (p->p_stat == SZOMB) 487 continue; 488 if (p->p_pid == pid) { 489 PHOLD(p); 490 lwkt_reltoken(&prg->proc_token); 491 return (p); 492 } 493 } 494 lwkt_reltoken(&prg->proc_token); 495 496 return (NULL); 497 } 498 499 /* 500 * Locate a process by number. The returned process is NOT referenced. 501 * The result will not be stable and is typically only used to validate 502 * against a process that the caller has in-hand. 503 * 504 * No requirements. 505 */ 506 struct proc * 507 pfindn(pid_t pid) 508 { 509 struct proc *p = curproc; 510 procglob_t *prg; 511 int n; 512 513 /* 514 * Shortcut the current process 515 */ 516 if (p && p->p_pid == pid) 517 return (p); 518 519 /* 520 * Otherwise find it in the hash table. 521 */ 522 n = ALLPROC_HASH(pid); 523 prg = &procglob[n]; 524 525 lwkt_gettoken_shared(&prg->proc_token); 526 LIST_FOREACH(p, &prg->allproc, p_list) { 527 if (p->p_stat == SZOMB) 528 continue; 529 if (p->p_pid == pid) { 530 lwkt_reltoken(&prg->proc_token); 531 return (p); 532 } 533 } 534 lwkt_reltoken(&prg->proc_token); 535 536 return (NULL); 537 } 538 539 /* 540 * Locate a process on the zombie list. Return a process or NULL. 541 * The returned process will be referenced and the caller must release 542 * it with PRELE(). 543 * 544 * No other requirements. 545 */ 546 struct proc * 547 zpfind(pid_t pid) 548 { 549 struct proc *p = curproc; 550 procglob_t *prg; 551 int n; 552 553 /* 554 * Shortcut the current process 555 */ 556 if (p && p->p_pid == pid) { 557 PHOLD(p); 558 return (p); 559 } 560 561 /* 562 * Otherwise find it in the hash table. 563 */ 564 n = ALLPROC_HASH(pid); 565 prg = &procglob[n]; 566 567 lwkt_gettoken_shared(&prg->proc_token); 568 LIST_FOREACH(p, &prg->allproc, p_list) { 569 if (p->p_stat != SZOMB) 570 continue; 571 if (p->p_pid == pid) { 572 PHOLD(p); 573 lwkt_reltoken(&prg->proc_token); 574 return (p); 575 } 576 } 577 lwkt_reltoken(&prg->proc_token); 578 579 return (NULL); 580 } 581 582 /* 583 * Caller must hold the process token shared or exclusive. 584 * The returned lwp, if not NULL, will be held. Caller must 585 * LWPRELE() it when done. 586 */ 587 struct lwp * 588 lwpfind(struct proc *p, lwpid_t tid) 589 { 590 struct lwp *lp; 591 592 lp = lwp_rb_tree_RB_LOOKUP(&p->p_lwp_tree, tid); 593 if (lp) 594 LWPHOLD(lp); 595 return lp; 596 } 597 598 void 599 pgref(struct pgrp *pgrp) 600 { 601 refcount_acquire(&pgrp->pg_refs); 602 } 603 604 void 605 pgrel(struct pgrp *pgrp) 606 { 607 procglob_t *prg; 608 int count; 609 int n; 610 611 n = PGRP_HASH(pgrp->pg_id); 612 prg = &procglob[n]; 613 614 for (;;) { 615 count = pgrp->pg_refs; 616 cpu_ccfence(); 617 KKASSERT(count > 0); 618 if (count == 1) { 619 lwkt_gettoken(&prg->proc_token); 620 if (atomic_cmpset_int(&pgrp->pg_refs, 1, 0)) 621 break; 622 lwkt_reltoken(&prg->proc_token); 623 /* retry */ 624 } else { 625 if (atomic_cmpset_int(&pgrp->pg_refs, count, count - 1)) 626 return; 627 /* retry */ 628 } 629 } 630 631 /* 632 * Successful 1->0 transition, pghash_spin is held. 633 */ 634 LIST_REMOVE(pgrp, pg_list); 635 if (pid_doms[pgrp->pg_id % PIDSEL_DOMAINS] != (uint8_t)time_second) 636 pid_doms[pgrp->pg_id % PIDSEL_DOMAINS] = (uint8_t)time_second; 637 638 /* 639 * Reset any sigio structures pointing to us as a result of 640 * F_SETOWN with our pgid. 641 */ 642 funsetownlst(&pgrp->pg_sigiolst); 643 644 if (pgrp->pg_session->s_ttyp != NULL && 645 pgrp->pg_session->s_ttyp->t_pgrp == pgrp) { 646 pgrp->pg_session->s_ttyp->t_pgrp = NULL; 647 } 648 lwkt_reltoken(&prg->proc_token); 649 650 sess_rele(pgrp->pg_session); 651 kfree(pgrp, M_PGRP); 652 } 653 654 /* 655 * Locate a process group by number. The returned process group will be 656 * referenced w/pgref() and must be released with pgrel() (or assigned 657 * somewhere if you wish to keep the reference). 658 * 659 * No requirements. 660 */ 661 struct pgrp * 662 pgfind(pid_t pgid) 663 { 664 struct pgrp *pgrp; 665 procglob_t *prg; 666 int n; 667 668 n = PGRP_HASH(pgid); 669 prg = &procglob[n]; 670 lwkt_gettoken_shared(&prg->proc_token); 671 672 LIST_FOREACH(pgrp, &prg->allpgrp, pg_list) { 673 if (pgrp->pg_id == pgid) { 674 refcount_acquire(&pgrp->pg_refs); 675 lwkt_reltoken(&prg->proc_token); 676 return (pgrp); 677 } 678 } 679 lwkt_reltoken(&prg->proc_token); 680 return (NULL); 681 } 682 683 /* 684 * Move p to a new or existing process group (and session) 685 * 686 * No requirements. 687 */ 688 int 689 enterpgrp(struct proc *p, pid_t pgid, int mksess) 690 { 691 struct pgrp *pgrp; 692 struct pgrp *opgrp; 693 int error; 694 695 pgrp = pgfind(pgid); 696 697 KASSERT(pgrp == NULL || !mksess, 698 ("enterpgrp: setsid into non-empty pgrp")); 699 KASSERT(!SESS_LEADER(p), 700 ("enterpgrp: session leader attempted setpgrp")); 701 702 if (pgrp == NULL) { 703 pid_t savepid = p->p_pid; 704 struct proc *np; 705 procglob_t *prg; 706 int n; 707 708 /* 709 * new process group 710 */ 711 KASSERT(p->p_pid == pgid, 712 ("enterpgrp: new pgrp and pid != pgid")); 713 pgrp = kmalloc(sizeof(struct pgrp), M_PGRP, M_WAITOK | M_ZERO); 714 pgrp->pg_id = pgid; 715 LIST_INIT(&pgrp->pg_members); 716 pgrp->pg_jobc = 0; 717 SLIST_INIT(&pgrp->pg_sigiolst); 718 lwkt_token_init(&pgrp->pg_token, "pgrp_token"); 719 refcount_init(&pgrp->pg_refs, 1); 720 lockinit(&pgrp->pg_lock, "pgwt", 0, 0); 721 722 n = PGRP_HASH(pgid); 723 prg = &procglob[n]; 724 725 if ((np = pfindn(savepid)) == NULL || np != p) { 726 lwkt_reltoken(&prg->proc_token); 727 error = ESRCH; 728 kfree(pgrp, M_PGRP); 729 goto fatal; 730 } 731 732 lwkt_gettoken(&prg->proc_token); 733 if (mksess) { 734 struct session *sess; 735 736 /* 737 * new session 738 */ 739 sess = kmalloc(sizeof(struct session), M_SESSION, 740 M_WAITOK | M_ZERO); 741 lwkt_gettoken(&p->p_token); 742 sess->s_prg = prg; 743 sess->s_leader = p; 744 sess->s_sid = p->p_pid; 745 sess->s_count = 1; 746 sess->s_ttyvp = NULL; 747 sess->s_ttyp = NULL; 748 bcopy(p->p_session->s_login, sess->s_login, 749 sizeof(sess->s_login)); 750 pgrp->pg_session = sess; 751 KASSERT(p == curproc, 752 ("enterpgrp: mksession and p != curproc")); 753 p->p_flags &= ~P_CONTROLT; 754 LIST_INSERT_HEAD(&prg->allsess, sess, s_list); 755 lwkt_reltoken(&p->p_token); 756 } else { 757 lwkt_gettoken(&p->p_token); 758 pgrp->pg_session = p->p_session; 759 sess_hold(pgrp->pg_session); 760 lwkt_reltoken(&p->p_token); 761 } 762 LIST_INSERT_HEAD(&prg->allpgrp, pgrp, pg_list); 763 764 lwkt_reltoken(&prg->proc_token); 765 } else if (pgrp == p->p_pgrp) { 766 pgrel(pgrp); 767 goto done; 768 } /* else pgfind() referenced the pgrp */ 769 770 lwkt_gettoken(&pgrp->pg_token); 771 lwkt_gettoken(&p->p_token); 772 773 /* 774 * Replace p->p_pgrp, handling any races that occur. 775 */ 776 while ((opgrp = p->p_pgrp) != NULL) { 777 pgref(opgrp); 778 lwkt_gettoken(&opgrp->pg_token); 779 if (opgrp != p->p_pgrp) { 780 lwkt_reltoken(&opgrp->pg_token); 781 pgrel(opgrp); 782 continue; 783 } 784 LIST_REMOVE(p, p_pglist); 785 break; 786 } 787 p->p_pgrp = pgrp; 788 LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist); 789 790 /* 791 * Adjust eligibility of affected pgrps to participate in job control. 792 * Increment eligibility counts before decrementing, otherwise we 793 * could reach 0 spuriously during the first call. 794 */ 795 fixjobc(p, pgrp, 1); 796 if (opgrp) { 797 fixjobc(p, opgrp, 0); 798 lwkt_reltoken(&opgrp->pg_token); 799 pgrel(opgrp); /* manual pgref */ 800 pgrel(opgrp); /* p->p_pgrp ref */ 801 } 802 lwkt_reltoken(&p->p_token); 803 lwkt_reltoken(&pgrp->pg_token); 804 done: 805 error = 0; 806 fatal: 807 return (error); 808 } 809 810 /* 811 * Remove process from process group 812 * 813 * No requirements. 814 */ 815 int 816 leavepgrp(struct proc *p) 817 { 818 struct pgrp *pg = p->p_pgrp; 819 820 lwkt_gettoken(&p->p_token); 821 while ((pg = p->p_pgrp) != NULL) { 822 pgref(pg); 823 lwkt_gettoken(&pg->pg_token); 824 if (p->p_pgrp != pg) { 825 lwkt_reltoken(&pg->pg_token); 826 pgrel(pg); 827 continue; 828 } 829 p->p_pgrp = NULL; 830 LIST_REMOVE(p, p_pglist); 831 lwkt_reltoken(&pg->pg_token); 832 pgrel(pg); /* manual pgref */ 833 pgrel(pg); /* p->p_pgrp ref */ 834 break; 835 } 836 lwkt_reltoken(&p->p_token); 837 838 return (0); 839 } 840 841 /* 842 * Adjust the ref count on a session structure. When the ref count falls to 843 * zero the tty is disassociated from the session and the session structure 844 * is freed. Note that tty assocation is not itself ref-counted. 845 * 846 * No requirements. 847 */ 848 void 849 sess_hold(struct session *sp) 850 { 851 atomic_add_int(&sp->s_count, 1); 852 } 853 854 /* 855 * No requirements. 856 */ 857 void 858 sess_rele(struct session *sess) 859 { 860 procglob_t *prg; 861 struct tty *tp; 862 int count; 863 int n; 864 865 n = SESS_HASH(sess->s_sid); 866 prg = &procglob[n]; 867 868 for (;;) { 869 count = sess->s_count; 870 cpu_ccfence(); 871 KKASSERT(count > 0); 872 if (count == 1) { 873 lwkt_gettoken(&prg->proc_token); 874 if (atomic_cmpset_int(&sess->s_count, 1, 0)) 875 break; 876 lwkt_reltoken(&prg->proc_token); 877 /* retry */ 878 } else { 879 if (atomic_cmpset_int(&sess->s_count, count, count - 1)) 880 return; 881 /* retry */ 882 } 883 } 884 885 /* 886 * Successful 1->0 transition and prg->proc_token is held. 887 */ 888 LIST_REMOVE(sess, s_list); 889 if (pid_doms[sess->s_sid % PIDSEL_DOMAINS] != (uint8_t)time_second) 890 pid_doms[sess->s_sid % PIDSEL_DOMAINS] = (uint8_t)time_second; 891 892 if (sess->s_ttyp && sess->s_ttyp->t_session) { 893 #ifdef TTY_DO_FULL_CLOSE 894 /* FULL CLOSE, see ttyclearsession() */ 895 KKASSERT(sess->s_ttyp->t_session == sess); 896 sess->s_ttyp->t_session = NULL; 897 #else 898 /* HALF CLOSE, see ttyclearsession() */ 899 if (sess->s_ttyp->t_session == sess) 900 sess->s_ttyp->t_session = NULL; 901 #endif 902 } 903 if ((tp = sess->s_ttyp) != NULL) { 904 sess->s_ttyp = NULL; 905 ttyunhold(tp); 906 } 907 lwkt_reltoken(&prg->proc_token); 908 909 kfree(sess, M_SESSION); 910 } 911 912 /* 913 * Adjust pgrp jobc counters when specified process changes process group. 914 * We count the number of processes in each process group that "qualify" 915 * the group for terminal job control (those with a parent in a different 916 * process group of the same session). If that count reaches zero, the 917 * process group becomes orphaned. Check both the specified process' 918 * process group and that of its children. 919 * entering == 0 => p is leaving specified group. 920 * entering == 1 => p is entering specified group. 921 * 922 * No requirements. 923 */ 924 void 925 fixjobc(struct proc *p, struct pgrp *pgrp, int entering) 926 { 927 struct pgrp *hispgrp; 928 struct session *mysession; 929 struct proc *np; 930 931 /* 932 * Check p's parent to see whether p qualifies its own process 933 * group; if so, adjust count for p's process group. 934 */ 935 lwkt_gettoken(&p->p_token); /* p_children scan */ 936 lwkt_gettoken(&pgrp->pg_token); 937 938 mysession = pgrp->pg_session; 939 if ((hispgrp = p->p_pptr->p_pgrp) != pgrp && 940 hispgrp->pg_session == mysession) { 941 if (entering) 942 pgrp->pg_jobc++; 943 else if (--pgrp->pg_jobc == 0) 944 orphanpg(pgrp); 945 } 946 947 /* 948 * Check this process' children to see whether they qualify 949 * their process groups; if so, adjust counts for children's 950 * process groups. 951 */ 952 LIST_FOREACH(np, &p->p_children, p_sibling) { 953 PHOLD(np); 954 lwkt_gettoken(&np->p_token); 955 if ((hispgrp = np->p_pgrp) != pgrp && 956 hispgrp->pg_session == mysession && 957 np->p_stat != SZOMB) { 958 pgref(hispgrp); 959 lwkt_gettoken(&hispgrp->pg_token); 960 if (entering) 961 hispgrp->pg_jobc++; 962 else if (--hispgrp->pg_jobc == 0) 963 orphanpg(hispgrp); 964 lwkt_reltoken(&hispgrp->pg_token); 965 pgrel(hispgrp); 966 } 967 lwkt_reltoken(&np->p_token); 968 PRELE(np); 969 } 970 KKASSERT(pgrp->pg_refs > 0); 971 lwkt_reltoken(&pgrp->pg_token); 972 lwkt_reltoken(&p->p_token); 973 } 974 975 /* 976 * A process group has become orphaned; 977 * if there are any stopped processes in the group, 978 * hang-up all process in that group. 979 * 980 * The caller must hold pg_token. 981 */ 982 static void 983 orphanpg(struct pgrp *pg) 984 { 985 struct proc *p; 986 987 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 988 if (p->p_stat == SSTOP) { 989 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 990 ksignal(p, SIGHUP); 991 ksignal(p, SIGCONT); 992 } 993 return; 994 } 995 } 996 } 997 998 /* 999 * Add a new process to the allproc list and the PID hash. This 1000 * also assigns a pid to the new process. 1001 * 1002 * No requirements. 1003 */ 1004 void 1005 proc_add_allproc(struct proc *p) 1006 { 1007 int random_offset; 1008 1009 if ((random_offset = randompid) != 0) { 1010 read_random(&random_offset, sizeof(random_offset)); 1011 random_offset = (random_offset & 0x7FFFFFFF) % randompid; 1012 } 1013 proc_makepid(p, random_offset); 1014 } 1015 1016 /* 1017 * Calculate a new process pid. This function is integrated into 1018 * proc_add_allproc() to guarentee that the new pid is not reused before 1019 * the new process can be added to the allproc list. 1020 * 1021 * p_pid is assigned and the process is added to the allproc hash table 1022 * 1023 * WARNING! We need to allocate PIDs sequentially during early boot. 1024 * In particular, init needs to have a pid of 1. 1025 */ 1026 static 1027 void 1028 proc_makepid(struct proc *p, int random_offset) 1029 { 1030 static pid_t nextpid = 1; /* heuristic, allowed to race */ 1031 procglob_t *prg; 1032 struct pgrp *pg; 1033 struct proc *ps; 1034 struct session *sess; 1035 pid_t base; 1036 int8_t delta8; 1037 int retries; 1038 int n; 1039 1040 /* 1041 * Select the next pid base candidate. 1042 * 1043 * Check cyclement, do not allow a pid < 100. 1044 */ 1045 retries = 0; 1046 retry: 1047 base = atomic_fetchadd_int(&nextpid, 1) + random_offset; 1048 if (base <= 0 || base >= PID_MAX) { 1049 base = base % PID_MAX; 1050 if (base < 0) 1051 base = 100; 1052 if (base < 100) 1053 base += 100; 1054 nextpid = base; /* reset (SMP race ok) */ 1055 } 1056 1057 /* 1058 * Do not allow a base pid to be selected from a domain that has 1059 * recently seen a pid/pgid/sessid reap. Sleep a little if we looped 1060 * through all available domains. 1061 * 1062 * WARNING: We want the early pids to be allocated linearly, 1063 * particularly pid 1 and pid 2. 1064 */ 1065 if (++retries >= PIDSEL_DOMAINS) 1066 tsleep(&nextpid, 0, "makepid", 1); 1067 if (base >= 100) { 1068 delta8 = (int8_t)time_second - 1069 (int8_t)pid_doms[base % PIDSEL_DOMAINS]; 1070 if (delta8 >= 0 && delta8 <= PIDDOM_DELAY) { 1071 ++pid_domain_skips; 1072 goto retry; 1073 } 1074 } 1075 1076 /* 1077 * Calculate a hash index and find an unused process id within 1078 * the table, looping if we cannot find one. 1079 * 1080 * The inner loop increments by ALLPROC_HSIZE which keeps the 1081 * PID at the same pid_doms[] index as well as the same hash index. 1082 */ 1083 n = ALLPROC_HASH(base); 1084 prg = &procglob[n]; 1085 lwkt_gettoken(&prg->proc_token); 1086 1087 restart1: 1088 LIST_FOREACH(ps, &prg->allproc, p_list) { 1089 if (ps->p_pid == base) { 1090 base += ALLPROC_HSIZE; 1091 if (base >= PID_MAX) { 1092 lwkt_reltoken(&prg->proc_token); 1093 goto retry; 1094 } 1095 ++pid_inner_skips; 1096 goto restart1; 1097 } 1098 } 1099 LIST_FOREACH(pg, &prg->allpgrp, pg_list) { 1100 if (pg->pg_id == base) { 1101 base += ALLPROC_HSIZE; 1102 if (base >= PID_MAX) { 1103 lwkt_reltoken(&prg->proc_token); 1104 goto retry; 1105 } 1106 ++pid_inner_skips; 1107 goto restart1; 1108 } 1109 } 1110 LIST_FOREACH(sess, &prg->allsess, s_list) { 1111 if (sess->s_sid == base) { 1112 base += ALLPROC_HSIZE; 1113 if (base >= PID_MAX) { 1114 lwkt_reltoken(&prg->proc_token); 1115 goto retry; 1116 } 1117 ++pid_inner_skips; 1118 goto restart1; 1119 } 1120 } 1121 1122 /* 1123 * Assign the pid and insert the process. 1124 */ 1125 p->p_pid = base; 1126 LIST_INSERT_HEAD(&prg->allproc, p, p_list); 1127 lwkt_reltoken(&prg->proc_token); 1128 } 1129 1130 /* 1131 * Called from exit1 to place the process into a zombie state. 1132 * The process is removed from the pid hash and p_stat is set 1133 * to SZOMB. Normal pfind[n]() calls will not find it any more. 1134 * 1135 * Caller must hold p->p_token. We are required to wait until p_lock 1136 * becomes zero before we can manipulate the list, allowing allproc 1137 * scans to guarantee consistency during a list scan. 1138 */ 1139 void 1140 proc_move_allproc_zombie(struct proc *p) 1141 { 1142 procglob_t *prg; 1143 int n; 1144 1145 n = ALLPROC_HASH(p->p_pid); 1146 prg = &procglob[n]; 1147 PSTALL(p, "reap1", 0); 1148 lwkt_gettoken(&prg->proc_token); 1149 1150 PSTALL(p, "reap1a", 0); 1151 p->p_stat = SZOMB; 1152 1153 lwkt_reltoken(&prg->proc_token); 1154 dsched_exit_proc(p); 1155 } 1156 1157 /* 1158 * This routine is called from kern_wait() and will remove the process 1159 * from the zombie list and the sibling list. This routine will block 1160 * if someone has a lock on the proces (p_lock). 1161 * 1162 * Caller must hold p->p_token. We are required to wait until p_lock 1163 * becomes one before we can manipulate the list, allowing allproc 1164 * scans to guarantee consistency during a list scan. 1165 * 1166 * Assumes caller has one ref. 1167 */ 1168 void 1169 proc_remove_zombie(struct proc *p) 1170 { 1171 procglob_t *prg; 1172 int n; 1173 1174 n = ALLPROC_HASH(p->p_pid); 1175 prg = &procglob[n]; 1176 1177 PSTALL(p, "reap2", 1); 1178 lwkt_gettoken(&prg->proc_token); 1179 PSTALL(p, "reap2a", 1); 1180 LIST_REMOVE(p, p_list); /* from remove master list */ 1181 LIST_REMOVE(p, p_sibling); /* and from sibling list */ 1182 p->p_pptr = NULL; 1183 p->p_ppid = 0; 1184 if (pid_doms[p->p_pid % PIDSEL_DOMAINS] != (uint8_t)time_second) 1185 pid_doms[p->p_pid % PIDSEL_DOMAINS] = (uint8_t)time_second; 1186 lwkt_reltoken(&prg->proc_token); 1187 } 1188 1189 /* 1190 * Handle various requirements prior to returning to usermode. Called from 1191 * platform trap and system call code. 1192 */ 1193 void 1194 lwpuserret(struct lwp *lp) 1195 { 1196 struct proc *p = lp->lwp_proc; 1197 1198 if (lp->lwp_mpflags & LWP_MP_VNLRU) { 1199 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_VNLRU); 1200 allocvnode_gc(); 1201 } 1202 if (lp->lwp_mpflags & LWP_MP_WEXIT) { 1203 lwkt_gettoken(&p->p_token); 1204 lwp_exit(0, NULL); 1205 lwkt_reltoken(&p->p_token); /* NOT REACHED */ 1206 } 1207 } 1208 1209 /* 1210 * Kernel threads run from user processes can also accumulate deferred 1211 * actions which need to be acted upon. Callers include: 1212 * 1213 * nfsd - Can allocate lots of vnodes 1214 */ 1215 void 1216 lwpkthreaddeferred(void) 1217 { 1218 struct lwp *lp = curthread->td_lwp; 1219 1220 if (lp) { 1221 if (lp->lwp_mpflags & LWP_MP_VNLRU) { 1222 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_VNLRU); 1223 allocvnode_gc(); 1224 } 1225 } 1226 } 1227 1228 void 1229 proc_usermap(struct proc *p, int invfork) 1230 { 1231 struct sys_upmap *upmap; 1232 1233 lwkt_gettoken(&p->p_token); 1234 upmap = kmalloc(roundup2(sizeof(*upmap), PAGE_SIZE), M_UPMAP, 1235 M_WAITOK | M_ZERO); 1236 if (p->p_upmap == NULL && (p->p_flags & P_POSTEXIT) == 0) { 1237 upmap->header[0].type = UKPTYPE_VERSION; 1238 upmap->header[0].offset = offsetof(struct sys_upmap, version); 1239 upmap->header[1].type = UPTYPE_RUNTICKS; 1240 upmap->header[1].offset = offsetof(struct sys_upmap, runticks); 1241 upmap->header[2].type = UPTYPE_FORKID; 1242 upmap->header[2].offset = offsetof(struct sys_upmap, forkid); 1243 upmap->header[3].type = UPTYPE_PID; 1244 upmap->header[3].offset = offsetof(struct sys_upmap, pid); 1245 upmap->header[4].type = UPTYPE_PROC_TITLE; 1246 upmap->header[4].offset = offsetof(struct sys_upmap,proc_title); 1247 upmap->header[5].type = UPTYPE_INVFORK; 1248 upmap->header[5].offset = offsetof(struct sys_upmap, invfork); 1249 1250 upmap->version = UPMAP_VERSION; 1251 upmap->pid = p->p_pid; 1252 upmap->forkid = p->p_forkid; 1253 upmap->invfork = invfork; 1254 p->p_upmap = upmap; 1255 } else { 1256 kfree(upmap, M_UPMAP); 1257 } 1258 lwkt_reltoken(&p->p_token); 1259 } 1260 1261 void 1262 proc_userunmap(struct proc *p) 1263 { 1264 struct sys_upmap *upmap; 1265 1266 lwkt_gettoken(&p->p_token); 1267 if ((upmap = p->p_upmap) != NULL) { 1268 p->p_upmap = NULL; 1269 kfree(upmap, M_UPMAP); 1270 } 1271 lwkt_reltoken(&p->p_token); 1272 } 1273 1274 /* 1275 * Called when the per-thread user/kernel shared page needs to be 1276 * allocated. The function refuses to allocate the page if the 1277 * thread is exiting to avoid races against lwp_userunmap(). 1278 */ 1279 void 1280 lwp_usermap(struct lwp *lp, int invfork) 1281 { 1282 struct sys_lpmap *lpmap; 1283 1284 lwkt_gettoken(&lp->lwp_token); 1285 1286 lpmap = kmalloc(roundup2(sizeof(*lpmap), PAGE_SIZE), M_UPMAP, 1287 M_WAITOK | M_ZERO); 1288 if (lp->lwp_lpmap == NULL && (lp->lwp_mpflags & LWP_MP_WEXIT) == 0) { 1289 lpmap->header[0].type = UKPTYPE_VERSION; 1290 lpmap->header[0].offset = offsetof(struct sys_lpmap, version); 1291 lpmap->header[1].type = LPTYPE_BLOCKALLSIGS; 1292 lpmap->header[1].offset = offsetof(struct sys_lpmap, 1293 blockallsigs); 1294 lpmap->header[2].type = LPTYPE_THREAD_TITLE; 1295 lpmap->header[2].offset = offsetof(struct sys_lpmap, 1296 thread_title); 1297 lpmap->header[3].type = LPTYPE_THREAD_TID; 1298 lpmap->header[3].offset = offsetof(struct sys_lpmap, tid); 1299 1300 lpmap->version = LPMAP_VERSION; 1301 lpmap->tid = lp->lwp_tid; 1302 lp->lwp_lpmap = lpmap; 1303 } else { 1304 kfree(lpmap, M_UPMAP); 1305 } 1306 lwkt_reltoken(&lp->lwp_token); 1307 } 1308 1309 /* 1310 * Called when a LWP (but not necessarily the whole process) exits. 1311 * Called when a process execs (after all other threads have been killed). 1312 * 1313 * lwp-specific mappings must be removed. If userland didn't do it, then 1314 * we have to. Otherwise we could end-up disclosing kernel memory due to 1315 * the ad-hoc pmap mapping. 1316 */ 1317 void 1318 lwp_userunmap(struct lwp *lp) 1319 { 1320 struct sys_lpmap *lpmap; 1321 struct vm_map *map; 1322 struct vm_map_backing *ba; 1323 struct vm_map_backing copy; 1324 1325 lwkt_gettoken(&lp->lwp_token); 1326 map = &lp->lwp_proc->p_vmspace->vm_map; 1327 lpmap = lp->lwp_lpmap; 1328 lp->lwp_lpmap = NULL; 1329 1330 spin_lock(&lp->lwp_spin); 1331 while ((ba = TAILQ_FIRST(&lp->lwp_lpmap_backing_list)) != NULL) { 1332 copy = *ba; 1333 spin_unlock(&lp->lwp_spin); 1334 1335 lwkt_gettoken(&map->token); 1336 vm_map_remove(map, copy.start, copy.end); 1337 lwkt_reltoken(&map->token); 1338 1339 spin_lock(&lp->lwp_spin); 1340 } 1341 spin_unlock(&lp->lwp_spin); 1342 1343 if (lpmap) 1344 kfree(lpmap, M_UPMAP); 1345 lwkt_reltoken(&lp->lwp_token); 1346 } 1347 1348 /* 1349 * Scan all processes on the allproc list. The process is automatically 1350 * held for the callback. A return value of -1 terminates the loop. 1351 * Zombie procs are skipped. 1352 * 1353 * The callback is made with the process held and proc_token held. 1354 * 1355 * We limit the scan to the number of processes as-of the start of 1356 * the scan so as not to get caught up in an endless loop if new processes 1357 * are created more quickly than we can scan the old ones. Add a little 1358 * slop to try to catch edge cases since nprocs can race. 1359 * 1360 * No requirements. 1361 */ 1362 void 1363 allproc_scan(int (*callback)(struct proc *, void *), void *data, int segmented) 1364 { 1365 int limit = nprocs + ncpus; 1366 struct proc *p; 1367 int ns; 1368 int ne; 1369 int r; 1370 int n; 1371 1372 if (segmented) { 1373 int id = mycpu->gd_cpuid; 1374 ns = id * ALLPROC_HSIZE / ncpus; 1375 ne = (id + 1) * ALLPROC_HSIZE / ncpus; 1376 } else { 1377 ns = 0; 1378 ne = ALLPROC_HSIZE; 1379 } 1380 1381 /* 1382 * prg->proc_token protects the allproc list and PHOLD() prevents the 1383 * process from being removed from the allproc list or the zombproc 1384 * list. 1385 */ 1386 for (n = ns; n < ne; ++n) { 1387 procglob_t *prg = &procglob[n]; 1388 if (LIST_FIRST(&prg->allproc) == NULL) 1389 continue; 1390 lwkt_gettoken(&prg->proc_token); 1391 LIST_FOREACH(p, &prg->allproc, p_list) { 1392 if (p->p_stat == SZOMB) 1393 continue; 1394 PHOLD(p); 1395 r = callback(p, data); 1396 PRELE(p); 1397 if (r < 0) 1398 break; 1399 if (--limit < 0) 1400 break; 1401 } 1402 lwkt_reltoken(&prg->proc_token); 1403 1404 /* 1405 * Check if asked to stop early 1406 */ 1407 if (p) 1408 break; 1409 } 1410 } 1411 1412 /* 1413 * Scan all lwps of processes on the allproc list. The lwp is automatically 1414 * held for the callback. A return value of -1 terminates the loop. 1415 * 1416 * The callback is made with the proces and lwp both held, and proc_token held. 1417 * 1418 * No requirements. 1419 */ 1420 void 1421 alllwp_scan(int (*callback)(struct lwp *, void *), void *data, int segmented) 1422 { 1423 struct proc *p; 1424 struct lwp *lp; 1425 int ns; 1426 int ne; 1427 int r = 0; 1428 int n; 1429 1430 if (segmented) { 1431 int id = mycpu->gd_cpuid; 1432 ns = id * ALLPROC_HSIZE / ncpus; 1433 ne = (id + 1) * ALLPROC_HSIZE / ncpus; 1434 } else { 1435 ns = 0; 1436 ne = ALLPROC_HSIZE; 1437 } 1438 1439 for (n = ns; n < ne; ++n) { 1440 procglob_t *prg = &procglob[n]; 1441 1442 if (LIST_FIRST(&prg->allproc) == NULL) 1443 continue; 1444 lwkt_gettoken(&prg->proc_token); 1445 LIST_FOREACH(p, &prg->allproc, p_list) { 1446 if (p->p_stat == SZOMB) 1447 continue; 1448 PHOLD(p); 1449 lwkt_gettoken(&p->p_token); 1450 FOREACH_LWP_IN_PROC(lp, p) { 1451 LWPHOLD(lp); 1452 r = callback(lp, data); 1453 LWPRELE(lp); 1454 } 1455 lwkt_reltoken(&p->p_token); 1456 PRELE(p); 1457 if (r < 0) 1458 break; 1459 } 1460 lwkt_reltoken(&prg->proc_token); 1461 1462 /* 1463 * Asked to exit early 1464 */ 1465 if (p) 1466 break; 1467 } 1468 } 1469 1470 /* 1471 * Scan all processes on the zombproc list. The process is automatically 1472 * held for the callback. A return value of -1 terminates the loop. 1473 * 1474 * No requirements. 1475 * The callback is made with the proces held and proc_token held. 1476 */ 1477 void 1478 zombproc_scan(int (*callback)(struct proc *, void *), void *data) 1479 { 1480 struct proc *p; 1481 int r; 1482 int n; 1483 1484 /* 1485 * prg->proc_token protects the allproc list and PHOLD() prevents the 1486 * process from being removed from the allproc list or the zombproc 1487 * list. 1488 */ 1489 for (n = 0; n < ALLPROC_HSIZE; ++n) { 1490 procglob_t *prg = &procglob[n]; 1491 1492 if (LIST_FIRST(&prg->allproc) == NULL) 1493 continue; 1494 lwkt_gettoken(&prg->proc_token); 1495 LIST_FOREACH(p, &prg->allproc, p_list) { 1496 if (p->p_stat != SZOMB) 1497 continue; 1498 PHOLD(p); 1499 r = callback(p, data); 1500 PRELE(p); 1501 if (r < 0) 1502 break; 1503 } 1504 lwkt_reltoken(&prg->proc_token); 1505 1506 /* 1507 * Check if asked to stop early 1508 */ 1509 if (p) 1510 break; 1511 } 1512 } 1513 1514 #include "opt_ddb.h" 1515 #ifdef DDB 1516 #include <ddb/ddb.h> 1517 1518 /* 1519 * Debugging only 1520 */ 1521 DB_SHOW_COMMAND(pgrpdump, pgrpdump) 1522 { 1523 struct pgrp *pgrp; 1524 struct proc *p; 1525 procglob_t *prg; 1526 int i; 1527 1528 for (i = 0; i < ALLPROC_HSIZE; ++i) { 1529 prg = &procglob[i]; 1530 1531 if (LIST_EMPTY(&prg->allpgrp)) 1532 continue; 1533 kprintf("\tindx %d\n", i); 1534 LIST_FOREACH(pgrp, &prg->allpgrp, pg_list) { 1535 kprintf("\tpgrp %p, pgid %ld, sess %p, " 1536 "sesscnt %d, mem %p\n", 1537 (void *)pgrp, (long)pgrp->pg_id, 1538 (void *)pgrp->pg_session, 1539 pgrp->pg_session->s_count, 1540 (void *)LIST_FIRST(&pgrp->pg_members)); 1541 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 1542 kprintf("\t\tpid %ld addr %p pgrp %p\n", 1543 (long)p->p_pid, (void *)p, 1544 (void *)p->p_pgrp); 1545 } 1546 } 1547 } 1548 } 1549 #endif /* DDB */ 1550 1551 /* 1552 * The caller must hold proc_token. 1553 */ 1554 static int 1555 sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags) 1556 { 1557 struct kinfo_proc ki; 1558 struct lwp *lp; 1559 int skp = 0, had_output = 0; 1560 int error; 1561 1562 bzero(&ki, sizeof(ki)); 1563 lwkt_gettoken_shared(&p->p_token); 1564 fill_kinfo_proc(p, &ki); 1565 if ((flags & KERN_PROC_FLAG_LWP) == 0) 1566 skp = 1; 1567 error = 0; 1568 FOREACH_LWP_IN_PROC(lp, p) { 1569 LWPHOLD(lp); 1570 fill_kinfo_lwp(lp, &ki.kp_lwp); 1571 had_output = 1; 1572 if (skp == 0) { 1573 error = SYSCTL_OUT(req, &ki, sizeof(ki)); 1574 bzero(&ki.kp_lwp, sizeof(ki.kp_lwp)); 1575 } 1576 LWPRELE(lp); 1577 if (error) 1578 break; 1579 } 1580 lwkt_reltoken(&p->p_token); 1581 1582 /* 1583 * If aggregating threads, set the tid field to -1. 1584 */ 1585 if (skp) 1586 ki.kp_lwp.kl_tid = -1; 1587 1588 /* 1589 * We need to output at least the proc, even if there is no lwp. 1590 * If skp is non-zero we aggregated the lwps and need to output 1591 * the result. 1592 */ 1593 if (had_output == 0 || skp) { 1594 error = SYSCTL_OUT(req, &ki, sizeof(ki)); 1595 } 1596 return (error); 1597 } 1598 1599 /* 1600 * The caller must hold proc_token. 1601 */ 1602 static int 1603 sysctl_out_proc_kthread(struct thread *td, struct sysctl_req *req) 1604 { 1605 struct kinfo_proc ki; 1606 int error; 1607 1608 fill_kinfo_proc_kthread(td, &ki); 1609 error = SYSCTL_OUT(req, &ki, sizeof(ki)); 1610 if (error) 1611 return error; 1612 return(0); 1613 } 1614 1615 /* 1616 * No requirements. 1617 */ 1618 static int 1619 sysctl_kern_proc(SYSCTL_HANDLER_ARGS) 1620 { 1621 int *name = (int *)arg1; 1622 int oid = oidp->oid_number; 1623 u_int namelen = arg2; 1624 struct proc *p; 1625 struct thread *td; 1626 struct thread *marker; 1627 int flags = 0; 1628 int error = 0; 1629 int n; 1630 int origcpu; 1631 struct ucred *cr1 = curproc->p_ucred; 1632 struct ucred *crcache = NULL; 1633 1634 flags = oid & KERN_PROC_FLAGMASK; 1635 oid &= ~KERN_PROC_FLAGMASK; 1636 1637 if ((oid == KERN_PROC_ALL && namelen != 0) || 1638 (oid != KERN_PROC_ALL && namelen != 1)) { 1639 return (EINVAL); 1640 } 1641 1642 /* 1643 * proc_token protects the allproc list and PHOLD() prevents the 1644 * process from being removed from the allproc list or the zombproc 1645 * list. 1646 */ 1647 if (oid == KERN_PROC_PID) { 1648 p = pfind((pid_t)name[0]); 1649 if (p) { 1650 crcache = pcredcache(crcache, p); 1651 if (PRISON_CHECK(cr1, crcache)) 1652 error = sysctl_out_proc(p, req, flags); 1653 PRELE(p); 1654 } 1655 goto post_threads; 1656 } 1657 p = NULL; 1658 1659 if (!req->oldptr) { 1660 /* overestimate by 5 procs */ 1661 error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5); 1662 if (error) 1663 goto post_threads; 1664 } 1665 1666 for (n = 0; n < ALLPROC_HSIZE; ++n) { 1667 procglob_t *prg = &procglob[n]; 1668 1669 if (LIST_EMPTY(&prg->allproc)) 1670 continue; 1671 lwkt_gettoken_shared(&prg->proc_token); 1672 LIST_FOREACH(p, &prg->allproc, p_list) { 1673 /* 1674 * Show a user only their processes. 1675 */ 1676 if (ps_showallprocs == 0) { 1677 crcache = pcredcache(crcache, p); 1678 if (crcache == NULL || 1679 p_trespass(cr1, crcache)) { 1680 continue; 1681 } 1682 } 1683 1684 /* 1685 * Skip embryonic processes. 1686 */ 1687 if (p->p_stat == SIDL) 1688 continue; 1689 /* 1690 * TODO - make more efficient (see notes below). 1691 * do by session. 1692 */ 1693 switch (oid) { 1694 case KERN_PROC_PGRP: 1695 /* could do this by traversing pgrp */ 1696 if (p->p_pgrp == NULL || 1697 p->p_pgrp->pg_id != (pid_t)name[0]) 1698 continue; 1699 break; 1700 1701 case KERN_PROC_TTY: 1702 if ((p->p_flags & P_CONTROLT) == 0 || 1703 p->p_session == NULL || 1704 p->p_session->s_ttyp == NULL || 1705 dev2udev(p->p_session->s_ttyp->t_dev) != 1706 (udev_t)name[0]) 1707 continue; 1708 break; 1709 1710 case KERN_PROC_UID: 1711 crcache = pcredcache(crcache, p); 1712 if (crcache == NULL || 1713 crcache->cr_uid != (uid_t)name[0]) { 1714 continue; 1715 } 1716 break; 1717 1718 case KERN_PROC_RUID: 1719 crcache = pcredcache(crcache, p); 1720 if (crcache == NULL || 1721 crcache->cr_ruid != (uid_t)name[0]) { 1722 continue; 1723 } 1724 break; 1725 } 1726 1727 crcache = pcredcache(crcache, p); 1728 if (!PRISON_CHECK(cr1, crcache)) 1729 continue; 1730 PHOLD(p); 1731 error = sysctl_out_proc(p, req, flags); 1732 PRELE(p); 1733 if (error) { 1734 lwkt_reltoken(&prg->proc_token); 1735 goto post_threads; 1736 } 1737 } 1738 lwkt_reltoken(&prg->proc_token); 1739 } 1740 1741 /* 1742 * Iterate over all active cpus and scan their thread list. Start 1743 * with the next logical cpu and end with our original cpu. We 1744 * migrate our own thread to each target cpu in order to safely scan 1745 * its thread list. In the last loop we migrate back to our original 1746 * cpu. 1747 */ 1748 origcpu = mycpu->gd_cpuid; 1749 if (!ps_showallthreads || jailed(cr1)) 1750 goto post_threads; 1751 1752 marker = kmalloc(sizeof(struct thread), M_TEMP, M_WAITOK|M_ZERO); 1753 marker->td_flags = TDF_MARKER; 1754 error = 0; 1755 1756 for (n = 1; n <= ncpus; ++n) { 1757 globaldata_t rgd; 1758 int nid; 1759 1760 nid = (origcpu + n) % ncpus; 1761 if (CPUMASK_TESTBIT(smp_active_mask, nid) == 0) 1762 continue; 1763 rgd = globaldata_find(nid); 1764 lwkt_setcpu_self(rgd); 1765 1766 crit_enter(); 1767 TAILQ_INSERT_TAIL(&rgd->gd_tdallq, marker, td_allq); 1768 1769 while ((td = TAILQ_PREV(marker, lwkt_queue, td_allq)) != NULL) { 1770 TAILQ_REMOVE(&rgd->gd_tdallq, marker, td_allq); 1771 TAILQ_INSERT_BEFORE(td, marker, td_allq); 1772 if (td->td_flags & TDF_MARKER) 1773 continue; 1774 if (td->td_proc) 1775 continue; 1776 1777 lwkt_hold(td); 1778 crit_exit(); 1779 1780 switch (oid) { 1781 case KERN_PROC_PGRP: 1782 case KERN_PROC_TTY: 1783 case KERN_PROC_UID: 1784 case KERN_PROC_RUID: 1785 break; 1786 default: 1787 error = sysctl_out_proc_kthread(td, req); 1788 break; 1789 } 1790 lwkt_rele(td); 1791 crit_enter(); 1792 if (error) 1793 break; 1794 } 1795 TAILQ_REMOVE(&rgd->gd_tdallq, marker, td_allq); 1796 crit_exit(); 1797 1798 if (error) 1799 break; 1800 } 1801 1802 /* 1803 * Userland scheduler expects us to return on the same cpu we 1804 * started on. 1805 */ 1806 if (mycpu->gd_cpuid != origcpu) 1807 lwkt_setcpu_self(globaldata_find(origcpu)); 1808 1809 kfree(marker, M_TEMP); 1810 1811 post_threads: 1812 if (crcache) 1813 crfree(crcache); 1814 return (error); 1815 } 1816 1817 /* 1818 * This sysctl allows a process to retrieve the argument list or process 1819 * title for another process without groping around in the address space 1820 * of the other process. It also allow a process to set its own "process 1821 * title to a string of its own choice. 1822 * 1823 * No requirements. 1824 */ 1825 static int 1826 sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS) 1827 { 1828 int *name = (int*) arg1; 1829 u_int namelen = arg2; 1830 size_t n; 1831 struct proc *p; 1832 struct lwp *lp; 1833 #if 0 1834 struct pargs *opa; 1835 #endif 1836 struct pargs *pa; 1837 int error = 0; 1838 struct ucred *cr1 = curproc->p_ucred; 1839 1840 if (namelen != 1 && namelen != 2) 1841 return (EINVAL); 1842 1843 lp = NULL; 1844 p = pfind((pid_t)name[0]); 1845 if (p == NULL) 1846 goto done; 1847 lwkt_gettoken(&p->p_token); 1848 1849 if (namelen == 2) { 1850 lp = lwpfind(p, (lwpid_t)name[1]); 1851 if (lp) 1852 lwkt_gettoken(&lp->lwp_token); 1853 } else { 1854 lp = NULL; 1855 } 1856 1857 if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred)) 1858 goto done; 1859 1860 if (req->newptr && curproc != p) { 1861 error = EPERM; 1862 goto done; 1863 } 1864 if (req->oldptr) { 1865 if (lp && lp->lwp_lpmap != NULL && 1866 lp->lwp_lpmap->thread_title[0]) { 1867 /* 1868 * Args set via writable user thread mmap or 1869 * sysctl(). 1870 * 1871 * We must calculate the string length manually 1872 * because the user data can change at any time. 1873 */ 1874 size_t n; 1875 char *base; 1876 1877 base = lp->lwp_lpmap->thread_title; 1878 for (n = 0; n < LPMAP_MAXTHREADTITLE - 1; ++n) { 1879 if (base[n] == 0) 1880 break; 1881 } 1882 error = SYSCTL_OUT(req, base, n); 1883 if (error == 0) 1884 error = SYSCTL_OUT(req, "", 1); 1885 } else if (p->p_upmap != NULL && p->p_upmap->proc_title[0]) { 1886 /* 1887 * Args set via writable user process mmap or 1888 * sysctl(). 1889 * 1890 * We must calculate the string length manually 1891 * because the user data can change at any time. 1892 */ 1893 size_t n; 1894 char *base; 1895 1896 base = p->p_upmap->proc_title; 1897 for (n = 0; n < UPMAP_MAXPROCTITLE - 1; ++n) { 1898 if (base[n] == 0) 1899 break; 1900 } 1901 error = SYSCTL_OUT(req, base, n); 1902 if (error == 0) 1903 error = SYSCTL_OUT(req, "", 1); 1904 } else if ((pa = p->p_args) != NULL) { 1905 /* 1906 * Default/original arguments. 1907 */ 1908 refcount_acquire(&pa->ar_ref); 1909 error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length); 1910 if (refcount_release(&pa->ar_ref)) 1911 kfree(pa, M_PARGS); 1912 } 1913 } 1914 if (req->newptr == NULL) 1915 goto done; 1916 1917 if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit) { 1918 goto done; 1919 } 1920 1921 /* 1922 * Get the new process or thread title from userland 1923 */ 1924 pa = kmalloc(sizeof(struct pargs) + req->newlen, 1925 M_PARGS, M_WAITOK); 1926 refcount_init(&pa->ar_ref, 1); 1927 pa->ar_length = req->newlen; 1928 error = SYSCTL_IN(req, pa->ar_args, req->newlen); 1929 if (error) { 1930 kfree(pa, M_PARGS); 1931 goto done; 1932 } 1933 1934 if (lp) { 1935 /* 1936 * Update thread title 1937 */ 1938 if (lp->lwp_lpmap == NULL) 1939 lwp_usermap(lp, -1); 1940 if (lp->lwp_lpmap) { 1941 n = req->newlen; 1942 if (n >= sizeof(lp->lwp_lpmap->thread_title)) 1943 n = sizeof(lp->lwp_lpmap->thread_title) - 1; 1944 lp->lwp_lpmap->thread_title[n] = 0; 1945 bcopy(pa->ar_args, lp->lwp_lpmap->thread_title, n); 1946 } 1947 } else { 1948 /* 1949 * Update process title 1950 */ 1951 if (p->p_upmap == NULL) 1952 proc_usermap(p, -1); 1953 if (p->p_upmap) { 1954 n = req->newlen; 1955 if (n >= sizeof(lp->lwp_lpmap->thread_title)) 1956 n = sizeof(lp->lwp_lpmap->thread_title) - 1; 1957 p->p_upmap->proc_title[n] = 0; 1958 bcopy(pa->ar_args, p->p_upmap->proc_title, n); 1959 } 1960 1961 #if 0 1962 /* 1963 * XXX delete this code, keep original args intact for 1964 * the setproctitle("") case. 1965 * Scrap p->p_args, p->p_upmap->proc_title[] overrides it. 1966 */ 1967 opa = p->p_args; 1968 p->p_args = NULL; 1969 if (opa) { 1970 KKASSERT(opa->ar_ref > 0); 1971 if (refcount_release(&opa->ar_ref)) { 1972 kfree(opa, M_PARGS); 1973 /* opa = NULL; */ 1974 } 1975 } 1976 #endif 1977 } 1978 kfree(pa, M_PARGS); 1979 1980 done: 1981 if (lp) { 1982 lwkt_reltoken(&lp->lwp_token); 1983 LWPRELE(lp); 1984 } 1985 if (p) { 1986 lwkt_reltoken(&p->p_token); 1987 PRELE(p); 1988 } 1989 return (error); 1990 } 1991 1992 static int 1993 sysctl_kern_proc_cwd(SYSCTL_HANDLER_ARGS) 1994 { 1995 int *name = (int*) arg1; 1996 u_int namelen = arg2; 1997 struct proc *p; 1998 int error = 0; 1999 char *fullpath, *freepath; 2000 struct ucred *cr1 = curproc->p_ucred; 2001 2002 if (namelen != 1) 2003 return (EINVAL); 2004 2005 p = pfind((pid_t)name[0]); 2006 if (p == NULL) 2007 goto done; 2008 lwkt_gettoken_shared(&p->p_token); 2009 2010 /* 2011 * If we are not allowed to see other args, we certainly shouldn't 2012 * get the cwd either. Also check the usual trespassing. 2013 */ 2014 if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred)) 2015 goto done; 2016 2017 if (req->oldptr && p->p_fd != NULL && p->p_fd->fd_ncdir.ncp) { 2018 struct nchandle nch; 2019 2020 cache_copy(&p->p_fd->fd_ncdir, &nch); 2021 error = cache_fullpath(p, &nch, NULL, 2022 &fullpath, &freepath, 0); 2023 cache_drop(&nch); 2024 if (error) 2025 goto done; 2026 error = SYSCTL_OUT(req, fullpath, strlen(fullpath) + 1); 2027 kfree(freepath, M_TEMP); 2028 } 2029 2030 done: 2031 if (p) { 2032 lwkt_reltoken(&p->p_token); 2033 PRELE(p); 2034 } 2035 return (error); 2036 } 2037 2038 /* 2039 * This sysctl allows a process to retrieve the path of the executable for 2040 * itself or another process. 2041 */ 2042 static int 2043 sysctl_kern_proc_pathname(SYSCTL_HANDLER_ARGS) 2044 { 2045 pid_t *pidp = (pid_t *)arg1; 2046 unsigned int arglen = arg2; 2047 struct proc *p; 2048 char *retbuf, *freebuf; 2049 int error = 0; 2050 struct nchandle nch; 2051 2052 if (arglen != 1) 2053 return (EINVAL); 2054 if (*pidp == -1) { /* -1 means this process */ 2055 p = curproc; 2056 } else { 2057 p = pfind(*pidp); 2058 if (p == NULL) 2059 return (ESRCH); 2060 } 2061 2062 cache_copy(&p->p_textnch, &nch); 2063 error = cache_fullpath(p, &nch, NULL, &retbuf, &freebuf, 0); 2064 cache_drop(&nch); 2065 if (error) 2066 goto done; 2067 error = SYSCTL_OUT(req, retbuf, strlen(retbuf) + 1); 2068 kfree(freebuf, M_TEMP); 2069 done: 2070 if (*pidp != -1) 2071 PRELE(p); 2072 2073 return (error); 2074 } 2075 2076 static int 2077 sysctl_kern_proc_sigtramp(SYSCTL_HANDLER_ARGS) 2078 { 2079 /*int *name = (int *)arg1;*/ 2080 u_int namelen = arg2; 2081 struct kinfo_sigtramp kst; 2082 const struct sysentvec *sv; 2083 int error; 2084 2085 if (namelen > 1) 2086 return (EINVAL); 2087 /* ignore pid if passed in (freebsd compatibility) */ 2088 2089 sv = curproc->p_sysent; 2090 bzero(&kst, sizeof(kst)); 2091 if (sv->sv_szsigcode) { 2092 intptr_t sigbase; 2093 2094 sigbase = trunc_page64((intptr_t)PS_STRINGS - 2095 *sv->sv_szsigcode); 2096 sigbase -= SZSIGCODE_EXTRA_BYTES; 2097 2098 kst.ksigtramp_start = (void *)sigbase; 2099 kst.ksigtramp_end = (void *)(sigbase + *sv->sv_szsigcode); 2100 } 2101 error = SYSCTL_OUT(req, &kst, sizeof(kst)); 2102 2103 return (error); 2104 } 2105 2106 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD, 0, "Process table"); 2107 2108 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, 2109 CTLFLAG_RD | CTLTYPE_STRUCT | CTLFLAG_NOLOCK, 2110 0, 0, sysctl_kern_proc, "S,proc", "Return entire process table"); 2111 2112 SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, 2113 CTLFLAG_RD | CTLFLAG_NOLOCK, 2114 sysctl_kern_proc, "Process table"); 2115 2116 SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, 2117 CTLFLAG_RD | CTLFLAG_NOLOCK, 2118 sysctl_kern_proc, "Process table"); 2119 2120 SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, 2121 CTLFLAG_RD | CTLFLAG_NOLOCK, 2122 sysctl_kern_proc, "Process table"); 2123 2124 SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, 2125 CTLFLAG_RD | CTLFLAG_NOLOCK, 2126 sysctl_kern_proc, "Process table"); 2127 2128 SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, 2129 CTLFLAG_RD | CTLFLAG_NOLOCK, 2130 sysctl_kern_proc, "Process table"); 2131 2132 SYSCTL_NODE(_kern_proc, (KERN_PROC_ALL | KERN_PROC_FLAG_LWP), all_lwp, 2133 CTLFLAG_RD | CTLFLAG_NOLOCK, 2134 sysctl_kern_proc, "Process table"); 2135 2136 SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_FLAG_LWP), pgrp_lwp, 2137 CTLFLAG_RD | CTLFLAG_NOLOCK, 2138 sysctl_kern_proc, "Process table"); 2139 2140 SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_FLAG_LWP), tty_lwp, 2141 CTLFLAG_RD | CTLFLAG_NOLOCK, 2142 sysctl_kern_proc, "Process table"); 2143 2144 SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_FLAG_LWP), uid_lwp, 2145 CTLFLAG_RD | CTLFLAG_NOLOCK, 2146 sysctl_kern_proc, "Process table"); 2147 2148 SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_FLAG_LWP), ruid_lwp, 2149 CTLFLAG_RD | CTLFLAG_NOLOCK, 2150 sysctl_kern_proc, "Process table"); 2151 2152 SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_FLAG_LWP), pid_lwp, 2153 CTLFLAG_RD | CTLFLAG_NOLOCK, 2154 sysctl_kern_proc, "Process table"); 2155 2156 SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, 2157 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_NOLOCK, 2158 sysctl_kern_proc_args, "Process argument list"); 2159 2160 SYSCTL_NODE(_kern_proc, KERN_PROC_CWD, cwd, 2161 CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_NOLOCK, 2162 sysctl_kern_proc_cwd, "Process argument list"); 2163 2164 static SYSCTL_NODE(_kern_proc, KERN_PROC_PATHNAME, pathname, 2165 CTLFLAG_RD | CTLFLAG_NOLOCK, 2166 sysctl_kern_proc_pathname, "Process executable path"); 2167 2168 SYSCTL_PROC(_kern_proc, KERN_PROC_SIGTRAMP, sigtramp, 2169 CTLFLAG_RD | CTLTYPE_STRUCT | CTLFLAG_NOLOCK, 2170 0, 0, sysctl_kern_proc_sigtramp, "S,sigtramp", 2171 "Return sigtramp address range"); 2172