1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1982, 1986, 1989, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)kern_proc.c 8.7 (Berkeley) 2/14/95 32 * $FreeBSD: src/sys/kern/kern_proc.c,v 1.63.2.9 2003/05/08 07:47:16 kbyanc Exp $ 33 */ 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/sysctl.h> 39 #include <sys/malloc.h> 40 #include <sys/proc.h> 41 #include <sys/vnode.h> 42 #include <sys/jail.h> 43 #include <sys/filedesc.h> 44 #include <sys/tty.h> 45 #include <sys/dsched.h> 46 #include <sys/signalvar.h> 47 #include <sys/spinlock.h> 48 #include <vm/vm.h> 49 #include <sys/lock.h> 50 #include <vm/pmap.h> 51 #include <vm/vm_map.h> 52 #include <sys/user.h> 53 #include <machine/smp.h> 54 55 #include <sys/refcount.h> 56 #include <sys/spinlock2.h> 57 #include <sys/mplock2.h> 58 59 static MALLOC_DEFINE(M_PGRP, "pgrp", "process group header"); 60 MALLOC_DEFINE(M_SESSION, "session", "session header"); 61 MALLOC_DEFINE(M_PROC, "proc", "Proc structures"); 62 MALLOC_DEFINE(M_LWP, "lwp", "lwp structures"); 63 MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures"); 64 65 int ps_showallprocs = 1; 66 static int ps_showallthreads = 1; 67 SYSCTL_INT(_security, OID_AUTO, ps_showallprocs, CTLFLAG_RW, 68 &ps_showallprocs, 0, 69 "Unprivileged processes can see processes with different UID/GID"); 70 SYSCTL_INT(_security, OID_AUTO, ps_showallthreads, CTLFLAG_RW, 71 &ps_showallthreads, 0, 72 "Unprivileged processes can see kernel threads"); 73 74 static void pgdelete(struct pgrp *); 75 static void orphanpg(struct pgrp *pg); 76 static pid_t proc_getnewpid_locked(int random_offset); 77 78 /* 79 * Other process lists 80 */ 81 struct pidhashhead *pidhashtbl; 82 u_long pidhash; 83 struct pgrphashhead *pgrphashtbl; 84 u_long pgrphash; 85 struct proclist allproc; 86 struct proclist zombproc; 87 88 /* 89 * Random component to nextpid generation. We mix in a random factor to make 90 * it a little harder to predict. We sanity check the modulus value to avoid 91 * doing it in critical paths. Don't let it be too small or we pointlessly 92 * waste randomness entropy, and don't let it be impossibly large. Using a 93 * modulus that is too big causes a LOT more process table scans and slows 94 * down fork processing as the pidchecked caching is defeated. 95 */ 96 static int randompid = 0; 97 98 /* 99 * No requirements. 100 */ 101 static int 102 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS) 103 { 104 int error, pid; 105 106 pid = randompid; 107 error = sysctl_handle_int(oidp, &pid, 0, req); 108 if (error || !req->newptr) 109 return (error); 110 if (pid < 0 || pid > PID_MAX - 100) /* out of range */ 111 pid = PID_MAX - 100; 112 else if (pid < 2) /* NOP */ 113 pid = 0; 114 else if (pid < 100) /* Make it reasonable */ 115 pid = 100; 116 randompid = pid; 117 return (error); 118 } 119 120 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW, 121 0, 0, sysctl_kern_randompid, "I", "Random PID modulus"); 122 123 /* 124 * Initialize global process hashing structures. 125 * 126 * Called from the low level boot code only. 127 */ 128 void 129 procinit(void) 130 { 131 LIST_INIT(&allproc); 132 LIST_INIT(&zombproc); 133 lwkt_init(); 134 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash); 135 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash); 136 uihashinit(); 137 } 138 139 /* 140 * Process hold/release support functions. These functions must be MPSAFE. 141 * Called via the PHOLD(), PRELE(), and PSTALL() macros. 142 * 143 * p->p_lock is a simple hold count with a waiting interlock. No wakeup() 144 * is issued unless someone is actually waiting for the process. 145 * 146 * Most holds are short-term, allowing a process scan or other similar 147 * operation to access a proc structure without it getting ripped out from 148 * under us. procfs and process-list sysctl ops also use the hold function 149 * interlocked with various p_flags to keep the vmspace intact when reading 150 * or writing a user process's address space. 151 * 152 * There are two situations where a hold count can be longer. Exiting lwps 153 * hold the process until the lwp is reaped, and the parent will hold the 154 * child during vfork()/exec() sequences while the child is marked P_PPWAIT. 155 * 156 * The kernel waits for the hold count to drop to 0 (or 1 in some cases) at 157 * various critical points in the fork/exec and exit paths before proceeding. 158 */ 159 #define PLOCK_ZOMB 0x20000000 160 #define PLOCK_WAITING 0x40000000 161 #define PLOCK_MASK 0x1FFFFFFF 162 163 void 164 pstall(struct proc *p, const char *wmesg, int count) 165 { 166 int o; 167 int n; 168 169 for (;;) { 170 o = p->p_lock; 171 cpu_ccfence(); 172 if ((o & PLOCK_MASK) <= count) 173 break; 174 n = o | PLOCK_WAITING; 175 tsleep_interlock(&p->p_lock, 0); 176 177 /* 178 * If someone is trying to single-step the process during 179 * an exec or an exit they can deadlock us because procfs 180 * sleeps with the process held. 181 */ 182 if (p->p_stops) { 183 if (p->p_flags & P_INEXEC) { 184 wakeup(&p->p_stype); 185 } else if (p->p_flags & P_POSTEXIT) { 186 spin_lock(&p->p_spin); 187 p->p_stops = 0; 188 p->p_step = 0; 189 spin_unlock(&p->p_spin); 190 wakeup(&p->p_stype); 191 } 192 } 193 194 if (atomic_cmpset_int(&p->p_lock, o, n)) { 195 tsleep(&p->p_lock, PINTERLOCKED, wmesg, 0); 196 } 197 } 198 } 199 200 void 201 phold(struct proc *p) 202 { 203 atomic_add_int(&p->p_lock, 1); 204 } 205 206 /* 207 * WARNING! On last release (p) can become instantly invalid due to 208 * MP races. 209 */ 210 void 211 prele(struct proc *p) 212 { 213 int o; 214 int n; 215 216 /* 217 * Fast path 218 */ 219 if (atomic_cmpset_int(&p->p_lock, 1, 0)) 220 return; 221 222 /* 223 * Slow path 224 */ 225 for (;;) { 226 o = p->p_lock; 227 KKASSERT((o & PLOCK_MASK) > 0); 228 cpu_ccfence(); 229 n = (o - 1) & ~PLOCK_WAITING; 230 if (atomic_cmpset_int(&p->p_lock, o, n)) { 231 if (o & PLOCK_WAITING) 232 wakeup(&p->p_lock); 233 break; 234 } 235 } 236 } 237 238 /* 239 * Hold and flag serialized for zombie reaping purposes. 240 * 241 * This function will fail if it has to block, returning non-zero with 242 * neither the flag set or the hold count bumped. Note that we must block 243 * without holding a ref, meaning that the caller must ensure that (p) 244 * remains valid through some other interlock (typically on its parent 245 * process's p_token). 246 * 247 * Zero is returned on success. The hold count will be incremented and 248 * the serialization flag acquired. Note that serialization is only against 249 * other pholdzomb() calls, not against phold() calls. 250 */ 251 int 252 pholdzomb(struct proc *p) 253 { 254 int o; 255 int n; 256 257 /* 258 * Fast path 259 */ 260 if (atomic_cmpset_int(&p->p_lock, 0, PLOCK_ZOMB | 1)) 261 return(0); 262 263 /* 264 * Slow path 265 */ 266 for (;;) { 267 o = p->p_lock; 268 cpu_ccfence(); 269 if ((o & PLOCK_ZOMB) == 0) { 270 n = (o + 1) | PLOCK_ZOMB; 271 if (atomic_cmpset_int(&p->p_lock, o, n)) 272 return(0); 273 } else { 274 KKASSERT((o & PLOCK_MASK) > 0); 275 n = o | PLOCK_WAITING; 276 tsleep_interlock(&p->p_lock, 0); 277 if (atomic_cmpset_int(&p->p_lock, o, n)) { 278 tsleep(&p->p_lock, PINTERLOCKED, "phldz", 0); 279 /* (p) can be ripped out at this point */ 280 return(1); 281 } 282 } 283 } 284 } 285 286 /* 287 * Release PLOCK_ZOMB and the hold count, waking up any waiters. 288 * 289 * WARNING! On last release (p) can become instantly invalid due to 290 * MP races. 291 */ 292 void 293 prelezomb(struct proc *p) 294 { 295 int o; 296 int n; 297 298 /* 299 * Fast path 300 */ 301 if (atomic_cmpset_int(&p->p_lock, PLOCK_ZOMB | 1, 0)) 302 return; 303 304 /* 305 * Slow path 306 */ 307 KKASSERT(p->p_lock & PLOCK_ZOMB); 308 for (;;) { 309 o = p->p_lock; 310 KKASSERT((o & PLOCK_MASK) > 0); 311 cpu_ccfence(); 312 n = (o - 1) & ~(PLOCK_ZOMB | PLOCK_WAITING); 313 if (atomic_cmpset_int(&p->p_lock, o, n)) { 314 if (o & PLOCK_WAITING) 315 wakeup(&p->p_lock); 316 break; 317 } 318 } 319 } 320 321 /* 322 * Is p an inferior of the current process? 323 * 324 * No requirements. 325 * The caller must hold proc_token if the caller wishes a stable result. 326 */ 327 int 328 inferior(struct proc *p) 329 { 330 lwkt_gettoken(&proc_token); 331 while (p != curproc) { 332 if (p->p_pid == 0) { 333 lwkt_reltoken(&proc_token); 334 return (0); 335 } 336 p = p->p_pptr; 337 } 338 lwkt_reltoken(&proc_token); 339 return (1); 340 } 341 342 /* 343 * Locate a process by number. The returned process will be referenced and 344 * must be released with PRELE(). 345 * 346 * No requirements. 347 */ 348 struct proc * 349 pfind(pid_t pid) 350 { 351 struct proc *p; 352 353 lwkt_gettoken(&proc_token); 354 LIST_FOREACH(p, PIDHASH(pid), p_hash) { 355 if (p->p_pid == pid) { 356 PHOLD(p); 357 lwkt_reltoken(&proc_token); 358 return (p); 359 } 360 } 361 lwkt_reltoken(&proc_token); 362 return (NULL); 363 } 364 365 /* 366 * Locate a process by number. The returned process is NOT referenced. 367 * The caller should hold proc_token if the caller wishes a stable result. 368 * 369 * No requirements. 370 */ 371 struct proc * 372 pfindn(pid_t pid) 373 { 374 struct proc *p; 375 376 lwkt_gettoken(&proc_token); 377 LIST_FOREACH(p, PIDHASH(pid), p_hash) { 378 if (p->p_pid == pid) { 379 lwkt_reltoken(&proc_token); 380 return (p); 381 } 382 } 383 lwkt_reltoken(&proc_token); 384 return (NULL); 385 } 386 387 void 388 pgref(struct pgrp *pgrp) 389 { 390 refcount_acquire(&pgrp->pg_refs); 391 } 392 393 void 394 pgrel(struct pgrp *pgrp) 395 { 396 if (refcount_release(&pgrp->pg_refs)) 397 pgdelete(pgrp); 398 } 399 400 /* 401 * Locate a process group by number. The returned process group will be 402 * referenced w/pgref() and must be released with pgrel() (or assigned 403 * somewhere if you wish to keep the reference). 404 * 405 * No requirements. 406 */ 407 struct pgrp * 408 pgfind(pid_t pgid) 409 { 410 struct pgrp *pgrp; 411 412 lwkt_gettoken(&proc_token); 413 LIST_FOREACH(pgrp, PGRPHASH(pgid), pg_hash) { 414 if (pgrp->pg_id == pgid) { 415 refcount_acquire(&pgrp->pg_refs); 416 lwkt_reltoken(&proc_token); 417 return (pgrp); 418 } 419 } 420 lwkt_reltoken(&proc_token); 421 return (NULL); 422 } 423 424 /* 425 * Move p to a new or existing process group (and session) 426 * 427 * No requirements. 428 */ 429 int 430 enterpgrp(struct proc *p, pid_t pgid, int mksess) 431 { 432 struct pgrp *pgrp; 433 struct pgrp *opgrp; 434 int error; 435 436 pgrp = pgfind(pgid); 437 438 KASSERT(pgrp == NULL || !mksess, 439 ("enterpgrp: setsid into non-empty pgrp")); 440 KASSERT(!SESS_LEADER(p), 441 ("enterpgrp: session leader attempted setpgrp")); 442 443 if (pgrp == NULL) { 444 pid_t savepid = p->p_pid; 445 struct proc *np; 446 /* 447 * new process group 448 */ 449 KASSERT(p->p_pid == pgid, 450 ("enterpgrp: new pgrp and pid != pgid")); 451 if ((np = pfindn(savepid)) == NULL || np != p) { 452 error = ESRCH; 453 goto fatal; 454 } 455 pgrp = kmalloc(sizeof(struct pgrp), M_PGRP, M_WAITOK); 456 if (mksess) { 457 struct session *sess; 458 459 /* 460 * new session 461 */ 462 sess = kmalloc(sizeof(struct session), M_SESSION, 463 M_WAITOK); 464 sess->s_leader = p; 465 sess->s_sid = p->p_pid; 466 sess->s_count = 1; 467 sess->s_ttyvp = NULL; 468 sess->s_ttyp = NULL; 469 bcopy(p->p_session->s_login, sess->s_login, 470 sizeof(sess->s_login)); 471 pgrp->pg_session = sess; 472 KASSERT(p == curproc, 473 ("enterpgrp: mksession and p != curproc")); 474 lwkt_gettoken(&p->p_token); 475 p->p_flags &= ~P_CONTROLT; 476 lwkt_reltoken(&p->p_token); 477 } else { 478 pgrp->pg_session = p->p_session; 479 sess_hold(pgrp->pg_session); 480 } 481 pgrp->pg_id = pgid; 482 LIST_INIT(&pgrp->pg_members); 483 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash); 484 pgrp->pg_jobc = 0; 485 SLIST_INIT(&pgrp->pg_sigiolst); 486 lwkt_token_init(&pgrp->pg_token, "pgrp_token"); 487 refcount_init(&pgrp->pg_refs, 1); 488 lockinit(&pgrp->pg_lock, "pgwt", 0, 0); 489 } else if (pgrp == p->p_pgrp) { 490 pgrel(pgrp); 491 goto done; 492 } /* else pgfind() referenced the pgrp */ 493 494 /* 495 * Adjust eligibility of affected pgrps to participate in job control. 496 * Increment eligibility counts before decrementing, otherwise we 497 * could reach 0 spuriously during the first call. 498 */ 499 lwkt_gettoken(&pgrp->pg_token); 500 lwkt_gettoken(&p->p_token); 501 fixjobc(p, pgrp, 1); 502 fixjobc(p, p->p_pgrp, 0); 503 while ((opgrp = p->p_pgrp) != NULL) { 504 opgrp = p->p_pgrp; 505 lwkt_gettoken(&opgrp->pg_token); 506 LIST_REMOVE(p, p_pglist); 507 p->p_pgrp = NULL; 508 lwkt_reltoken(&opgrp->pg_token); 509 pgrel(opgrp); 510 } 511 p->p_pgrp = pgrp; 512 LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist); 513 lwkt_reltoken(&p->p_token); 514 lwkt_reltoken(&pgrp->pg_token); 515 done: 516 error = 0; 517 fatal: 518 return (error); 519 } 520 521 /* 522 * Remove process from process group 523 * 524 * No requirements. 525 */ 526 int 527 leavepgrp(struct proc *p) 528 { 529 struct pgrp *pg = p->p_pgrp; 530 531 lwkt_gettoken(&p->p_token); 532 pg = p->p_pgrp; 533 if (pg) { 534 pgref(pg); 535 lwkt_gettoken(&pg->pg_token); 536 if (p->p_pgrp == pg) { 537 p->p_pgrp = NULL; 538 LIST_REMOVE(p, p_pglist); 539 pgrel(pg); 540 } 541 lwkt_reltoken(&pg->pg_token); 542 lwkt_reltoken(&p->p_token); /* avoid chaining on rel */ 543 pgrel(pg); 544 } else { 545 lwkt_reltoken(&p->p_token); 546 } 547 return (0); 548 } 549 550 /* 551 * Delete a process group. Must be called only after the last ref has been 552 * released. 553 */ 554 static void 555 pgdelete(struct pgrp *pgrp) 556 { 557 /* 558 * Reset any sigio structures pointing to us as a result of 559 * F_SETOWN with our pgid. 560 */ 561 funsetownlst(&pgrp->pg_sigiolst); 562 563 if (pgrp->pg_session->s_ttyp != NULL && 564 pgrp->pg_session->s_ttyp->t_pgrp == pgrp) 565 pgrp->pg_session->s_ttyp->t_pgrp = NULL; 566 LIST_REMOVE(pgrp, pg_hash); 567 sess_rele(pgrp->pg_session); 568 kfree(pgrp, M_PGRP); 569 } 570 571 /* 572 * Adjust the ref count on a session structure. When the ref count falls to 573 * zero the tty is disassociated from the session and the session structure 574 * is freed. Note that tty assocation is not itself ref-counted. 575 * 576 * No requirements. 577 */ 578 void 579 sess_hold(struct session *sp) 580 { 581 lwkt_gettoken(&tty_token); 582 ++sp->s_count; 583 lwkt_reltoken(&tty_token); 584 } 585 586 /* 587 * No requirements. 588 */ 589 void 590 sess_rele(struct session *sp) 591 { 592 struct tty *tp; 593 594 KKASSERT(sp->s_count > 0); 595 lwkt_gettoken(&tty_token); 596 if (--sp->s_count == 0) { 597 if (sp->s_ttyp && sp->s_ttyp->t_session) { 598 #ifdef TTY_DO_FULL_CLOSE 599 /* FULL CLOSE, see ttyclearsession() */ 600 KKASSERT(sp->s_ttyp->t_session == sp); 601 sp->s_ttyp->t_session = NULL; 602 #else 603 /* HALF CLOSE, see ttyclearsession() */ 604 if (sp->s_ttyp->t_session == sp) 605 sp->s_ttyp->t_session = NULL; 606 #endif 607 } 608 if ((tp = sp->s_ttyp) != NULL) { 609 sp->s_ttyp = NULL; 610 ttyunhold(tp); 611 } 612 kfree(sp, M_SESSION); 613 } 614 lwkt_reltoken(&tty_token); 615 } 616 617 /* 618 * Adjust pgrp jobc counters when specified process changes process group. 619 * We count the number of processes in each process group that "qualify" 620 * the group for terminal job control (those with a parent in a different 621 * process group of the same session). If that count reaches zero, the 622 * process group becomes orphaned. Check both the specified process' 623 * process group and that of its children. 624 * entering == 0 => p is leaving specified group. 625 * entering == 1 => p is entering specified group. 626 * 627 * No requirements. 628 */ 629 void 630 fixjobc(struct proc *p, struct pgrp *pgrp, int entering) 631 { 632 struct pgrp *hispgrp; 633 struct session *mysession; 634 struct proc *np; 635 636 /* 637 * Check p's parent to see whether p qualifies its own process 638 * group; if so, adjust count for p's process group. 639 */ 640 lwkt_gettoken(&p->p_token); /* p_children scan */ 641 lwkt_gettoken(&pgrp->pg_token); 642 643 mysession = pgrp->pg_session; 644 if ((hispgrp = p->p_pptr->p_pgrp) != pgrp && 645 hispgrp->pg_session == mysession) { 646 if (entering) 647 pgrp->pg_jobc++; 648 else if (--pgrp->pg_jobc == 0) 649 orphanpg(pgrp); 650 } 651 652 /* 653 * Check this process' children to see whether they qualify 654 * their process groups; if so, adjust counts for children's 655 * process groups. 656 */ 657 LIST_FOREACH(np, &p->p_children, p_sibling) { 658 PHOLD(np); 659 lwkt_gettoken(&np->p_token); 660 if ((hispgrp = np->p_pgrp) != pgrp && 661 hispgrp->pg_session == mysession && 662 np->p_stat != SZOMB) { 663 pgref(hispgrp); 664 lwkt_gettoken(&hispgrp->pg_token); 665 if (entering) 666 hispgrp->pg_jobc++; 667 else if (--hispgrp->pg_jobc == 0) 668 orphanpg(hispgrp); 669 lwkt_reltoken(&hispgrp->pg_token); 670 pgrel(hispgrp); 671 } 672 lwkt_reltoken(&np->p_token); 673 PRELE(np); 674 } 675 KKASSERT(pgrp->pg_refs > 0); 676 lwkt_reltoken(&pgrp->pg_token); 677 lwkt_reltoken(&p->p_token); 678 } 679 680 /* 681 * A process group has become orphaned; 682 * if there are any stopped processes in the group, 683 * hang-up all process in that group. 684 * 685 * The caller must hold pg_token. 686 */ 687 static void 688 orphanpg(struct pgrp *pg) 689 { 690 struct proc *p; 691 692 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 693 if (p->p_stat == SSTOP) { 694 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 695 ksignal(p, SIGHUP); 696 ksignal(p, SIGCONT); 697 } 698 return; 699 } 700 } 701 } 702 703 /* 704 * Add a new process to the allproc list and the PID hash. This 705 * also assigns a pid to the new process. 706 * 707 * No requirements. 708 */ 709 void 710 proc_add_allproc(struct proc *p) 711 { 712 int random_offset; 713 714 if ((random_offset = randompid) != 0) { 715 get_mplock(); 716 random_offset = karc4random() % random_offset; 717 rel_mplock(); 718 } 719 720 lwkt_gettoken(&proc_token); 721 p->p_pid = proc_getnewpid_locked(random_offset); 722 LIST_INSERT_HEAD(&allproc, p, p_list); 723 LIST_INSERT_HEAD(PIDHASH(p->p_pid), p, p_hash); 724 lwkt_reltoken(&proc_token); 725 } 726 727 /* 728 * Calculate a new process pid. This function is integrated into 729 * proc_add_allproc() to guarentee that the new pid is not reused before 730 * the new process can be added to the allproc list. 731 * 732 * The caller must hold proc_token. 733 */ 734 static 735 pid_t 736 proc_getnewpid_locked(int random_offset) 737 { 738 static pid_t nextpid; 739 static pid_t pidchecked; 740 struct proc *p; 741 742 /* 743 * Find an unused process ID. We remember a range of unused IDs 744 * ready to use (from nextpid+1 through pidchecked-1). 745 */ 746 nextpid = nextpid + 1 + random_offset; 747 retry: 748 /* 749 * If the process ID prototype has wrapped around, 750 * restart somewhat above 0, as the low-numbered procs 751 * tend to include daemons that don't exit. 752 */ 753 if (nextpid >= PID_MAX) { 754 nextpid = nextpid % PID_MAX; 755 if (nextpid < 100) 756 nextpid += 100; 757 pidchecked = 0; 758 } 759 if (nextpid >= pidchecked) { 760 int doingzomb = 0; 761 762 pidchecked = PID_MAX; 763 764 /* 765 * Scan the active and zombie procs to check whether this pid 766 * is in use. Remember the lowest pid that's greater 767 * than nextpid, so we can avoid checking for a while. 768 * 769 * NOTE: Processes in the midst of being forked may not 770 * yet have p_pgrp and p_pgrp->pg_session set up 771 * yet, so we have to check for NULL. 772 * 773 * Processes being torn down should be interlocked 774 * with proc_token prior to the clearing of their 775 * p_pgrp. 776 */ 777 p = LIST_FIRST(&allproc); 778 again: 779 for (; p != NULL; p = LIST_NEXT(p, p_list)) { 780 while (p->p_pid == nextpid || 781 (p->p_pgrp && p->p_pgrp->pg_id == nextpid) || 782 (p->p_pgrp && p->p_session && 783 p->p_session->s_sid == nextpid)) { 784 nextpid++; 785 if (nextpid >= pidchecked) 786 goto retry; 787 } 788 if (p->p_pid > nextpid && pidchecked > p->p_pid) 789 pidchecked = p->p_pid; 790 if (p->p_pgrp && 791 p->p_pgrp->pg_id > nextpid && 792 pidchecked > p->p_pgrp->pg_id) { 793 pidchecked = p->p_pgrp->pg_id; 794 } 795 if (p->p_pgrp && p->p_session && 796 p->p_session->s_sid > nextpid && 797 pidchecked > p->p_session->s_sid) { 798 pidchecked = p->p_session->s_sid; 799 } 800 } 801 if (!doingzomb) { 802 doingzomb = 1; 803 p = LIST_FIRST(&zombproc); 804 goto again; 805 } 806 } 807 return(nextpid); 808 } 809 810 /* 811 * Called from exit1 to remove a process from the allproc 812 * list and move it to the zombie list. 813 * 814 * Caller must hold p->p_token. We are required to wait until p_lock 815 * becomes zero before we can manipulate the list, allowing allproc 816 * scans to guarantee consistency during a list scan. 817 */ 818 void 819 proc_move_allproc_zombie(struct proc *p) 820 { 821 lwkt_gettoken(&proc_token); 822 PSTALL(p, "reap1", 0); 823 LIST_REMOVE(p, p_list); 824 LIST_INSERT_HEAD(&zombproc, p, p_list); 825 LIST_REMOVE(p, p_hash); 826 p->p_stat = SZOMB; 827 lwkt_reltoken(&proc_token); 828 dsched_exit_proc(p); 829 } 830 831 /* 832 * This routine is called from kern_wait() and will remove the process 833 * from the zombie list and the sibling list. This routine will block 834 * if someone has a lock on the proces (p_lock). 835 * 836 * Caller must hold p->p_token. We are required to wait until p_lock 837 * becomes zero before we can manipulate the list, allowing allproc 838 * scans to guarantee consistency during a list scan. 839 */ 840 void 841 proc_remove_zombie(struct proc *p) 842 { 843 lwkt_gettoken(&proc_token); 844 PSTALL(p, "reap2", 0); 845 LIST_REMOVE(p, p_list); /* off zombproc */ 846 LIST_REMOVE(p, p_sibling); 847 p->p_pptr = NULL; 848 lwkt_reltoken(&proc_token); 849 } 850 851 /* 852 * Handle various requirements prior to returning to usermode. Called from 853 * platform trap and system call code. 854 */ 855 void 856 lwpuserret(struct lwp *lp) 857 { 858 struct proc *p = lp->lwp_proc; 859 860 if (lp->lwp_mpflags & LWP_MP_VNLRU) { 861 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_VNLRU); 862 allocvnode_gc(); 863 } 864 if (lp->lwp_mpflags & LWP_MP_WEXIT) { 865 lwkt_gettoken(&p->p_token); 866 lwp_exit(0); 867 lwkt_reltoken(&p->p_token); /* NOT REACHED */ 868 } 869 } 870 871 /* 872 * Kernel threads run from user processes can also accumulate deferred 873 * actions which need to be acted upon. Callers include: 874 * 875 * nfsd - Can allocate lots of vnodes 876 */ 877 void 878 lwpkthreaddeferred(void) 879 { 880 struct lwp *lp = curthread->td_lwp; 881 882 if (lp) { 883 if (lp->lwp_mpflags & LWP_MP_VNLRU) { 884 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_VNLRU); 885 allocvnode_gc(); 886 } 887 } 888 } 889 890 /* 891 * Scan all processes on the allproc list. The process is automatically 892 * held for the callback. A return value of -1 terminates the loop. 893 * 894 * The callback is made with the process held and proc_token held. 895 * 896 * We limit the scan to the number of processes as-of the start of 897 * the scan so as not to get caught up in an endless loop if new processes 898 * are created more quickly than we can scan the old ones. Add a little 899 * slop to try to catch edge cases since nprocs can race. 900 * 901 * No requirements. 902 */ 903 void 904 allproc_scan(int (*callback)(struct proc *, void *), void *data) 905 { 906 struct proc *p; 907 int r; 908 int limit = nprocs + ncpus; 909 910 /* 911 * proc_token protects the allproc list and PHOLD() prevents the 912 * process from being removed from the allproc list or the zombproc 913 * list. 914 */ 915 lwkt_gettoken(&proc_token); 916 LIST_FOREACH(p, &allproc, p_list) { 917 PHOLD(p); 918 r = callback(p, data); 919 PRELE(p); 920 if (r < 0) 921 break; 922 if (--limit < 0) 923 break; 924 } 925 lwkt_reltoken(&proc_token); 926 } 927 928 /* 929 * Scan all lwps of processes on the allproc list. The lwp is automatically 930 * held for the callback. A return value of -1 terminates the loop. 931 * 932 * The callback is made with the proces and lwp both held, and proc_token held. 933 * 934 * No requirements. 935 */ 936 void 937 alllwp_scan(int (*callback)(struct lwp *, void *), void *data) 938 { 939 struct proc *p; 940 struct lwp *lp; 941 int r = 0; 942 943 /* 944 * proc_token protects the allproc list and PHOLD() prevents the 945 * process from being removed from the allproc list or the zombproc 946 * list. 947 */ 948 lwkt_gettoken(&proc_token); 949 LIST_FOREACH(p, &allproc, p_list) { 950 PHOLD(p); 951 FOREACH_LWP_IN_PROC(lp, p) { 952 LWPHOLD(lp); 953 r = callback(lp, data); 954 LWPRELE(lp); 955 } 956 PRELE(p); 957 if (r < 0) 958 break; 959 } 960 lwkt_reltoken(&proc_token); 961 } 962 963 /* 964 * Scan all processes on the zombproc list. The process is automatically 965 * held for the callback. A return value of -1 terminates the loop. 966 * 967 * No requirements. 968 * The callback is made with the proces held and proc_token held. 969 */ 970 void 971 zombproc_scan(int (*callback)(struct proc *, void *), void *data) 972 { 973 struct proc *p; 974 int r; 975 976 lwkt_gettoken(&proc_token); 977 LIST_FOREACH(p, &zombproc, p_list) { 978 PHOLD(p); 979 r = callback(p, data); 980 PRELE(p); 981 if (r < 0) 982 break; 983 } 984 lwkt_reltoken(&proc_token); 985 } 986 987 #include "opt_ddb.h" 988 #ifdef DDB 989 #include <ddb/ddb.h> 990 991 /* 992 * Debugging only 993 */ 994 DB_SHOW_COMMAND(pgrpdump, pgrpdump) 995 { 996 struct pgrp *pgrp; 997 struct proc *p; 998 int i; 999 1000 for (i = 0; i <= pgrphash; i++) { 1001 if (!LIST_EMPTY(&pgrphashtbl[i])) { 1002 kprintf("\tindx %d\n", i); 1003 LIST_FOREACH(pgrp, &pgrphashtbl[i], pg_hash) { 1004 kprintf( 1005 "\tpgrp %p, pgid %ld, sess %p, sesscnt %d, mem %p\n", 1006 (void *)pgrp, (long)pgrp->pg_id, 1007 (void *)pgrp->pg_session, 1008 pgrp->pg_session->s_count, 1009 (void *)LIST_FIRST(&pgrp->pg_members)); 1010 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 1011 kprintf("\t\tpid %ld addr %p pgrp %p\n", 1012 (long)p->p_pid, (void *)p, 1013 (void *)p->p_pgrp); 1014 } 1015 } 1016 } 1017 } 1018 } 1019 #endif /* DDB */ 1020 1021 /* 1022 * Locate a process on the zombie list. Return a process or NULL. 1023 * The returned process will be referenced and the caller must release 1024 * it with PRELE(). 1025 * 1026 * No other requirements. 1027 */ 1028 struct proc * 1029 zpfind(pid_t pid) 1030 { 1031 struct proc *p; 1032 1033 lwkt_gettoken(&proc_token); 1034 LIST_FOREACH(p, &zombproc, p_list) { 1035 if (p->p_pid == pid) { 1036 PHOLD(p); 1037 lwkt_reltoken(&proc_token); 1038 return (p); 1039 } 1040 } 1041 lwkt_reltoken(&proc_token); 1042 return (NULL); 1043 } 1044 1045 /* 1046 * The caller must hold proc_token. 1047 */ 1048 static int 1049 sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags) 1050 { 1051 struct kinfo_proc ki; 1052 struct lwp *lp; 1053 int skp = 0, had_output = 0; 1054 int error; 1055 1056 bzero(&ki, sizeof(ki)); 1057 lwkt_gettoken(&p->p_token); 1058 fill_kinfo_proc(p, &ki); 1059 if ((flags & KERN_PROC_FLAG_LWP) == 0) 1060 skp = 1; 1061 error = 0; 1062 FOREACH_LWP_IN_PROC(lp, p) { 1063 LWPHOLD(lp); 1064 fill_kinfo_lwp(lp, &ki.kp_lwp); 1065 had_output = 1; 1066 error = SYSCTL_OUT(req, &ki, sizeof(ki)); 1067 LWPRELE(lp); 1068 if (error) 1069 break; 1070 if (skp) 1071 break; 1072 } 1073 lwkt_reltoken(&p->p_token); 1074 /* We need to output at least the proc, even if there is no lwp. */ 1075 if (had_output == 0) { 1076 error = SYSCTL_OUT(req, &ki, sizeof(ki)); 1077 } 1078 return (error); 1079 } 1080 1081 /* 1082 * The caller must hold proc_token. 1083 */ 1084 static int 1085 sysctl_out_proc_kthread(struct thread *td, struct sysctl_req *req, int flags) 1086 { 1087 struct kinfo_proc ki; 1088 int error; 1089 1090 fill_kinfo_proc_kthread(td, &ki); 1091 error = SYSCTL_OUT(req, &ki, sizeof(ki)); 1092 if (error) 1093 return error; 1094 return(0); 1095 } 1096 1097 /* 1098 * No requirements. 1099 */ 1100 static int 1101 sysctl_kern_proc(SYSCTL_HANDLER_ARGS) 1102 { 1103 int *name = (int*) arg1; 1104 int oid = oidp->oid_number; 1105 u_int namelen = arg2; 1106 struct proc *p; 1107 struct proclist *plist; 1108 struct thread *td; 1109 struct thread *marker; 1110 int doingzomb, flags = 0; 1111 int error = 0; 1112 int n; 1113 int origcpu; 1114 struct ucred *cr1 = curproc->p_ucred; 1115 1116 flags = oid & KERN_PROC_FLAGMASK; 1117 oid &= ~KERN_PROC_FLAGMASK; 1118 1119 if ((oid == KERN_PROC_ALL && namelen != 0) || 1120 (oid != KERN_PROC_ALL && namelen != 1)) { 1121 return (EINVAL); 1122 } 1123 1124 /* 1125 * proc_token protects the allproc list and PHOLD() prevents the 1126 * process from being removed from the allproc list or the zombproc 1127 * list. 1128 */ 1129 lwkt_gettoken(&proc_token); 1130 if (oid == KERN_PROC_PID) { 1131 p = pfindn((pid_t)name[0]); 1132 if (p == NULL) 1133 goto post_threads; 1134 if (!PRISON_CHECK(cr1, p->p_ucred)) 1135 goto post_threads; 1136 PHOLD(p); 1137 error = sysctl_out_proc(p, req, flags); 1138 PRELE(p); 1139 goto post_threads; 1140 } 1141 1142 if (!req->oldptr) { 1143 /* overestimate by 5 procs */ 1144 error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5); 1145 if (error) 1146 goto post_threads; 1147 } 1148 for (doingzomb = 0; doingzomb <= 1; doingzomb++) { 1149 if (doingzomb) 1150 plist = &zombproc; 1151 else 1152 plist = &allproc; 1153 LIST_FOREACH(p, plist, p_list) { 1154 /* 1155 * Show a user only their processes. 1156 */ 1157 if ((!ps_showallprocs) && p_trespass(cr1, p->p_ucred)) 1158 continue; 1159 /* 1160 * Skip embryonic processes. 1161 */ 1162 if (p->p_stat == SIDL) 1163 continue; 1164 /* 1165 * TODO - make more efficient (see notes below). 1166 * do by session. 1167 */ 1168 switch (oid) { 1169 case KERN_PROC_PGRP: 1170 /* could do this by traversing pgrp */ 1171 if (p->p_pgrp == NULL || 1172 p->p_pgrp->pg_id != (pid_t)name[0]) 1173 continue; 1174 break; 1175 1176 case KERN_PROC_TTY: 1177 if ((p->p_flags & P_CONTROLT) == 0 || 1178 p->p_session == NULL || 1179 p->p_session->s_ttyp == NULL || 1180 dev2udev(p->p_session->s_ttyp->t_dev) != 1181 (udev_t)name[0]) 1182 continue; 1183 break; 1184 1185 case KERN_PROC_UID: 1186 if (p->p_ucred == NULL || 1187 p->p_ucred->cr_uid != (uid_t)name[0]) 1188 continue; 1189 break; 1190 1191 case KERN_PROC_RUID: 1192 if (p->p_ucred == NULL || 1193 p->p_ucred->cr_ruid != (uid_t)name[0]) 1194 continue; 1195 break; 1196 } 1197 1198 if (!PRISON_CHECK(cr1, p->p_ucred)) 1199 continue; 1200 PHOLD(p); 1201 error = sysctl_out_proc(p, req, flags); 1202 PRELE(p); 1203 if (error) 1204 goto post_threads; 1205 } 1206 } 1207 1208 /* 1209 * Iterate over all active cpus and scan their thread list. Start 1210 * with the next logical cpu and end with our original cpu. We 1211 * migrate our own thread to each target cpu in order to safely scan 1212 * its thread list. In the last loop we migrate back to our original 1213 * cpu. 1214 */ 1215 origcpu = mycpu->gd_cpuid; 1216 if (!ps_showallthreads || jailed(cr1)) 1217 goto post_threads; 1218 1219 marker = kmalloc(sizeof(struct thread), M_TEMP, M_WAITOK|M_ZERO); 1220 marker->td_flags = TDF_MARKER; 1221 error = 0; 1222 1223 for (n = 1; n <= ncpus; ++n) { 1224 globaldata_t rgd; 1225 int nid; 1226 1227 nid = (origcpu + n) % ncpus; 1228 if ((smp_active_mask & CPUMASK(nid)) == 0) 1229 continue; 1230 rgd = globaldata_find(nid); 1231 lwkt_setcpu_self(rgd); 1232 1233 crit_enter(); 1234 TAILQ_INSERT_TAIL(&rgd->gd_tdallq, marker, td_allq); 1235 1236 while ((td = TAILQ_PREV(marker, lwkt_queue, td_allq)) != NULL) { 1237 TAILQ_REMOVE(&rgd->gd_tdallq, marker, td_allq); 1238 TAILQ_INSERT_BEFORE(td, marker, td_allq); 1239 if (td->td_flags & TDF_MARKER) 1240 continue; 1241 if (td->td_proc) 1242 continue; 1243 1244 lwkt_hold(td); 1245 crit_exit(); 1246 1247 switch (oid) { 1248 case KERN_PROC_PGRP: 1249 case KERN_PROC_TTY: 1250 case KERN_PROC_UID: 1251 case KERN_PROC_RUID: 1252 break; 1253 default: 1254 error = sysctl_out_proc_kthread(td, req, 1255 doingzomb); 1256 break; 1257 } 1258 lwkt_rele(td); 1259 crit_enter(); 1260 if (error) 1261 break; 1262 } 1263 TAILQ_REMOVE(&rgd->gd_tdallq, marker, td_allq); 1264 crit_exit(); 1265 1266 if (error) 1267 break; 1268 } 1269 kfree(marker, M_TEMP); 1270 1271 post_threads: 1272 lwkt_reltoken(&proc_token); 1273 return (error); 1274 } 1275 1276 /* 1277 * This sysctl allows a process to retrieve the argument list or process 1278 * title for another process without groping around in the address space 1279 * of the other process. It also allow a process to set its own "process 1280 * title to a string of its own choice. 1281 * 1282 * No requirements. 1283 */ 1284 static int 1285 sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS) 1286 { 1287 int *name = (int*) arg1; 1288 u_int namelen = arg2; 1289 struct proc *p; 1290 struct pargs *opa; 1291 struct pargs *pa; 1292 int error = 0; 1293 struct ucred *cr1 = curproc->p_ucred; 1294 1295 if (namelen != 1) 1296 return (EINVAL); 1297 1298 p = pfind((pid_t)name[0]); 1299 if (p == NULL) 1300 goto done; 1301 lwkt_gettoken(&p->p_token); 1302 1303 if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred)) 1304 goto done; 1305 1306 if (req->newptr && curproc != p) { 1307 error = EPERM; 1308 goto done; 1309 } 1310 if (req->oldptr && (pa = p->p_args) != NULL) { 1311 refcount_acquire(&pa->ar_ref); 1312 error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length); 1313 if (refcount_release(&pa->ar_ref)) 1314 kfree(pa, M_PARGS); 1315 } 1316 if (req->newptr == NULL) 1317 goto done; 1318 1319 if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit) { 1320 goto done; 1321 } 1322 1323 pa = kmalloc(sizeof(struct pargs) + req->newlen, M_PARGS, M_WAITOK); 1324 refcount_init(&pa->ar_ref, 1); 1325 pa->ar_length = req->newlen; 1326 error = SYSCTL_IN(req, pa->ar_args, req->newlen); 1327 if (error) { 1328 kfree(pa, M_PARGS); 1329 goto done; 1330 } 1331 1332 1333 /* 1334 * Replace p_args with the new pa. p_args may have previously 1335 * been NULL. 1336 */ 1337 opa = p->p_args; 1338 p->p_args = pa; 1339 1340 if (opa) { 1341 KKASSERT(opa->ar_ref > 0); 1342 if (refcount_release(&opa->ar_ref)) { 1343 kfree(opa, M_PARGS); 1344 /* opa = NULL; */ 1345 } 1346 } 1347 done: 1348 if (p) { 1349 lwkt_reltoken(&p->p_token); 1350 PRELE(p); 1351 } 1352 return (error); 1353 } 1354 1355 static int 1356 sysctl_kern_proc_cwd(SYSCTL_HANDLER_ARGS) 1357 { 1358 int *name = (int*) arg1; 1359 u_int namelen = arg2; 1360 struct proc *p; 1361 int error = 0; 1362 char *fullpath, *freepath; 1363 struct ucred *cr1 = curproc->p_ucred; 1364 1365 if (namelen != 1) 1366 return (EINVAL); 1367 1368 p = pfind((pid_t)name[0]); 1369 if (p == NULL) 1370 goto done; 1371 lwkt_gettoken(&p->p_token); 1372 1373 /* 1374 * If we are not allowed to see other args, we certainly shouldn't 1375 * get the cwd either. Also check the usual trespassing. 1376 */ 1377 if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred)) 1378 goto done; 1379 1380 if (req->oldptr && p->p_fd != NULL && p->p_fd->fd_ncdir.ncp) { 1381 struct nchandle nch; 1382 1383 cache_copy(&p->p_fd->fd_ncdir, &nch); 1384 error = cache_fullpath(p, &nch, NULL, 1385 &fullpath, &freepath, 0); 1386 cache_drop(&nch); 1387 if (error) 1388 goto done; 1389 error = SYSCTL_OUT(req, fullpath, strlen(fullpath) + 1); 1390 kfree(freepath, M_TEMP); 1391 } 1392 1393 done: 1394 if (p) { 1395 lwkt_reltoken(&p->p_token); 1396 PRELE(p); 1397 } 1398 return (error); 1399 } 1400 1401 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD, 0, "Process table"); 1402 1403 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT, 1404 0, 0, sysctl_kern_proc, "S,proc", "Return entire process table"); 1405 1406 SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD, 1407 sysctl_kern_proc, "Process table"); 1408 1409 SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD, 1410 sysctl_kern_proc, "Process table"); 1411 1412 SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD, 1413 sysctl_kern_proc, "Process table"); 1414 1415 SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD, 1416 sysctl_kern_proc, "Process table"); 1417 1418 SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD, 1419 sysctl_kern_proc, "Process table"); 1420 1421 SYSCTL_NODE(_kern_proc, (KERN_PROC_ALL | KERN_PROC_FLAG_LWP), all_lwp, CTLFLAG_RD, 1422 sysctl_kern_proc, "Process table"); 1423 1424 SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_FLAG_LWP), pgrp_lwp, CTLFLAG_RD, 1425 sysctl_kern_proc, "Process table"); 1426 1427 SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_FLAG_LWP), tty_lwp, CTLFLAG_RD, 1428 sysctl_kern_proc, "Process table"); 1429 1430 SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_FLAG_LWP), uid_lwp, CTLFLAG_RD, 1431 sysctl_kern_proc, "Process table"); 1432 1433 SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_FLAG_LWP), ruid_lwp, CTLFLAG_RD, 1434 sysctl_kern_proc, "Process table"); 1435 1436 SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_FLAG_LWP), pid_lwp, CTLFLAG_RD, 1437 sysctl_kern_proc, "Process table"); 1438 1439 SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, CTLFLAG_RW | CTLFLAG_ANYBODY, 1440 sysctl_kern_proc_args, "Process argument list"); 1441 1442 SYSCTL_NODE(_kern_proc, KERN_PROC_CWD, cwd, CTLFLAG_RD | CTLFLAG_ANYBODY, 1443 sysctl_kern_proc_cwd, "Process argument list"); 1444