1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/sysctl.h> 34 #include <sys/malloc.h> 35 #include <sys/proc.h> 36 #include <sys/vnode.h> 37 #include <sys/jail.h> 38 #include <sys/filedesc.h> 39 #include <sys/tty.h> 40 #include <sys/dsched.h> 41 #include <sys/signalvar.h> 42 #include <sys/spinlock.h> 43 #include <sys/random.h> 44 #include <sys/vnode.h> 45 #include <vm/vm.h> 46 #include <sys/lock.h> 47 #include <vm/pmap.h> 48 #include <vm/vm_map.h> 49 #include <sys/user.h> 50 #include <machine/smp.h> 51 52 #include <sys/refcount.h> 53 #include <sys/spinlock2.h> 54 #include <sys/mplock2.h> 55 56 /* 57 * Hash table size must be a power of two and is not currently dynamically 58 * sized. There is a trade-off between the linear scans which must iterate 59 * all HSIZE elements and the number of elements which might accumulate 60 * within each hash chain. 61 */ 62 #define ALLPROC_HSIZE 256 63 #define ALLPROC_HMASK (ALLPROC_HSIZE - 1) 64 #define ALLPROC_HASH(pid) (pid & ALLPROC_HMASK) 65 #define PGRP_HASH(pid) (pid & ALLPROC_HMASK) 66 #define SESS_HASH(pid) (pid & ALLPROC_HMASK) 67 68 /* Used by libkvm */ 69 int allproc_hsize = ALLPROC_HSIZE; 70 71 LIST_HEAD(pidhashhead, proc); 72 73 static MALLOC_DEFINE(M_PGRP, "pgrp", "process group header"); 74 MALLOC_DEFINE(M_SESSION, "session", "session header"); 75 MALLOC_DEFINE(M_PROC, "proc", "Proc structures"); 76 MALLOC_DEFINE(M_LWP, "lwp", "lwp structures"); 77 MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures"); 78 79 int ps_showallprocs = 1; 80 static int ps_showallthreads = 1; 81 SYSCTL_INT(_security, OID_AUTO, ps_showallprocs, CTLFLAG_RW, 82 &ps_showallprocs, 0, 83 "Unprivileged processes can see processes with different UID/GID"); 84 SYSCTL_INT(_security, OID_AUTO, ps_showallthreads, CTLFLAG_RW, 85 &ps_showallthreads, 0, 86 "Unprivileged processes can see kernel threads"); 87 88 static void orphanpg(struct pgrp *pg); 89 static void proc_makepid(struct proc *p, int random_offset); 90 91 /* 92 * Other process lists 93 */ 94 static struct lwkt_token proc_tokens[ALLPROC_HSIZE]; 95 static struct proclist allprocs[ALLPROC_HSIZE]; /* locked by proc_tokens */ 96 static struct pgrplist allpgrps[ALLPROC_HSIZE]; /* locked by proc_tokens */ 97 static struct sesslist allsessn[ALLPROC_HSIZE]; /* locked by proc_tokens */ 98 99 /* 100 * Random component to nextpid generation. We mix in a random factor to make 101 * it a little harder to predict. We sanity check the modulus value to avoid 102 * doing it in critical paths. Don't let it be too small or we pointlessly 103 * waste randomness entropy, and don't let it be impossibly large. Using a 104 * modulus that is too big causes a LOT more process table scans and slows 105 * down fork processing as the pidchecked caching is defeated. 106 */ 107 static int randompid = 0; 108 109 /* 110 * No requirements. 111 */ 112 static int 113 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS) 114 { 115 int error, pid; 116 117 pid = randompid; 118 error = sysctl_handle_int(oidp, &pid, 0, req); 119 if (error || !req->newptr) 120 return (error); 121 if (pid < 0 || pid > PID_MAX - 100) /* out of range */ 122 pid = PID_MAX - 100; 123 else if (pid < 2) /* NOP */ 124 pid = 0; 125 else if (pid < 100) /* Make it reasonable */ 126 pid = 100; 127 randompid = pid; 128 return (error); 129 } 130 131 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW, 132 0, 0, sysctl_kern_randompid, "I", "Random PID modulus"); 133 134 /* 135 * Initialize global process hashing structures. 136 * 137 * These functions are ONLY called from the low level boot code and do 138 * not lock their operations. 139 */ 140 void 141 procinit(void) 142 { 143 u_long i; 144 145 for (i = 0; i < ALLPROC_HSIZE; ++i) { 146 LIST_INIT(&allprocs[i]); 147 LIST_INIT(&allsessn[i]); 148 LIST_INIT(&allpgrps[i]); 149 lwkt_token_init(&proc_tokens[i], "allproc"); 150 } 151 lwkt_init(); 152 uihashinit(); 153 } 154 155 void 156 procinsertinit(struct proc *p) 157 { 158 LIST_INSERT_HEAD(&allprocs[ALLPROC_HASH(p->p_pid)], p, p_list); 159 } 160 161 void 162 pgrpinsertinit(struct pgrp *pg) 163 { 164 LIST_INSERT_HEAD(&allpgrps[ALLPROC_HASH(pg->pg_id)], pg, pg_list); 165 } 166 167 void 168 sessinsertinit(struct session *sess) 169 { 170 LIST_INSERT_HEAD(&allsessn[ALLPROC_HASH(sess->s_sid)], sess, s_list); 171 } 172 173 /* 174 * Process hold/release support functions. Called via the PHOLD(), 175 * PRELE(), and PSTALL() macros. 176 * 177 * p->p_lock is a simple hold count with a waiting interlock. No wakeup() 178 * is issued unless someone is actually waiting for the process. 179 * 180 * Most holds are short-term, allowing a process scan or other similar 181 * operation to access a proc structure without it getting ripped out from 182 * under us. procfs and process-list sysctl ops also use the hold function 183 * interlocked with various p_flags to keep the vmspace intact when reading 184 * or writing a user process's address space. 185 * 186 * There are two situations where a hold count can be longer. Exiting lwps 187 * hold the process until the lwp is reaped, and the parent will hold the 188 * child during vfork()/exec() sequences while the child is marked P_PPWAIT. 189 * 190 * The kernel waits for the hold count to drop to 0 (or 1 in some cases) at 191 * various critical points in the fork/exec and exit paths before proceeding. 192 */ 193 #define PLOCK_ZOMB 0x20000000 194 #define PLOCK_WAITING 0x40000000 195 #define PLOCK_MASK 0x1FFFFFFF 196 197 void 198 pstall(struct proc *p, const char *wmesg, int count) 199 { 200 int o; 201 int n; 202 203 for (;;) { 204 o = p->p_lock; 205 cpu_ccfence(); 206 if ((o & PLOCK_MASK) <= count) 207 break; 208 n = o | PLOCK_WAITING; 209 tsleep_interlock(&p->p_lock, 0); 210 211 /* 212 * If someone is trying to single-step the process during 213 * an exec or an exit they can deadlock us because procfs 214 * sleeps with the process held. 215 */ 216 if (p->p_stops) { 217 if (p->p_flags & P_INEXEC) { 218 wakeup(&p->p_stype); 219 } else if (p->p_flags & P_POSTEXIT) { 220 spin_lock(&p->p_spin); 221 p->p_stops = 0; 222 p->p_step = 0; 223 spin_unlock(&p->p_spin); 224 wakeup(&p->p_stype); 225 } 226 } 227 228 if (atomic_cmpset_int(&p->p_lock, o, n)) { 229 tsleep(&p->p_lock, PINTERLOCKED, wmesg, 0); 230 } 231 } 232 } 233 234 void 235 phold(struct proc *p) 236 { 237 atomic_add_int(&p->p_lock, 1); 238 } 239 240 /* 241 * WARNING! On last release (p) can become instantly invalid due to 242 * MP races. 243 */ 244 void 245 prele(struct proc *p) 246 { 247 int o; 248 int n; 249 250 /* 251 * Fast path 252 */ 253 if (atomic_cmpset_int(&p->p_lock, 1, 0)) 254 return; 255 256 /* 257 * Slow path 258 */ 259 for (;;) { 260 o = p->p_lock; 261 KKASSERT((o & PLOCK_MASK) > 0); 262 cpu_ccfence(); 263 n = (o - 1) & ~PLOCK_WAITING; 264 if (atomic_cmpset_int(&p->p_lock, o, n)) { 265 if (o & PLOCK_WAITING) 266 wakeup(&p->p_lock); 267 break; 268 } 269 } 270 } 271 272 /* 273 * Hold and flag serialized for zombie reaping purposes. 274 * 275 * This function will fail if it has to block, returning non-zero with 276 * neither the flag set or the hold count bumped. Note that we must block 277 * without holding a ref, meaning that the caller must ensure that (p) 278 * remains valid through some other interlock (typically on its parent 279 * process's p_token). 280 * 281 * Zero is returned on success. The hold count will be incremented and 282 * the serialization flag acquired. Note that serialization is only against 283 * other pholdzomb() calls, not against phold() calls. 284 */ 285 int 286 pholdzomb(struct proc *p) 287 { 288 int o; 289 int n; 290 291 /* 292 * Fast path 293 */ 294 if (atomic_cmpset_int(&p->p_lock, 0, PLOCK_ZOMB | 1)) 295 return(0); 296 297 /* 298 * Slow path 299 */ 300 for (;;) { 301 o = p->p_lock; 302 cpu_ccfence(); 303 if ((o & PLOCK_ZOMB) == 0) { 304 n = (o + 1) | PLOCK_ZOMB; 305 if (atomic_cmpset_int(&p->p_lock, o, n)) 306 return(0); 307 } else { 308 KKASSERT((o & PLOCK_MASK) > 0); 309 n = o | PLOCK_WAITING; 310 tsleep_interlock(&p->p_lock, 0); 311 if (atomic_cmpset_int(&p->p_lock, o, n)) { 312 tsleep(&p->p_lock, PINTERLOCKED, "phldz", 0); 313 /* (p) can be ripped out at this point */ 314 return(1); 315 } 316 } 317 } 318 } 319 320 /* 321 * Release PLOCK_ZOMB and the hold count, waking up any waiters. 322 * 323 * WARNING! On last release (p) can become instantly invalid due to 324 * MP races. 325 */ 326 void 327 prelezomb(struct proc *p) 328 { 329 int o; 330 int n; 331 332 /* 333 * Fast path 334 */ 335 if (atomic_cmpset_int(&p->p_lock, PLOCK_ZOMB | 1, 0)) 336 return; 337 338 /* 339 * Slow path 340 */ 341 KKASSERT(p->p_lock & PLOCK_ZOMB); 342 for (;;) { 343 o = p->p_lock; 344 KKASSERT((o & PLOCK_MASK) > 0); 345 cpu_ccfence(); 346 n = (o - 1) & ~(PLOCK_ZOMB | PLOCK_WAITING); 347 if (atomic_cmpset_int(&p->p_lock, o, n)) { 348 if (o & PLOCK_WAITING) 349 wakeup(&p->p_lock); 350 break; 351 } 352 } 353 } 354 355 /* 356 * Is p an inferior of the current process? 357 * 358 * No requirements. 359 */ 360 int 361 inferior(struct proc *p) 362 { 363 struct proc *p2; 364 365 PHOLD(p); 366 lwkt_gettoken_shared(&p->p_token); 367 while (p != curproc) { 368 if (p->p_pid == 0) { 369 lwkt_reltoken(&p->p_token); 370 return (0); 371 } 372 p2 = p->p_pptr; 373 PHOLD(p2); 374 lwkt_reltoken(&p->p_token); 375 PRELE(p); 376 lwkt_gettoken_shared(&p2->p_token); 377 p = p2; 378 } 379 lwkt_reltoken(&p->p_token); 380 PRELE(p); 381 382 return (1); 383 } 384 385 /* 386 * Locate a process by number. The returned process will be referenced and 387 * must be released with PRELE(). 388 * 389 * No requirements. 390 */ 391 struct proc * 392 pfind(pid_t pid) 393 { 394 struct proc *p = curproc; 395 int n; 396 397 /* 398 * Shortcut the current process 399 */ 400 if (p && p->p_pid == pid) { 401 PHOLD(p); 402 return (p); 403 } 404 405 /* 406 * Otherwise find it in the hash table. 407 */ 408 n = ALLPROC_HASH(pid); 409 410 lwkt_gettoken_shared(&proc_tokens[n]); 411 LIST_FOREACH(p, &allprocs[n], p_list) { 412 if (p->p_stat == SZOMB) 413 continue; 414 if (p->p_pid == pid) { 415 PHOLD(p); 416 lwkt_reltoken(&proc_tokens[n]); 417 return (p); 418 } 419 } 420 lwkt_reltoken(&proc_tokens[n]); 421 422 return (NULL); 423 } 424 425 /* 426 * Locate a process by number. The returned process is NOT referenced. 427 * The result will not be stable and is typically only used to validate 428 * against a process that the caller has in-hand. 429 * 430 * No requirements. 431 */ 432 struct proc * 433 pfindn(pid_t pid) 434 { 435 struct proc *p = curproc; 436 int n; 437 438 /* 439 * Shortcut the current process 440 */ 441 if (p && p->p_pid == pid) 442 return (p); 443 444 /* 445 * Otherwise find it in the hash table. 446 */ 447 n = ALLPROC_HASH(pid); 448 449 lwkt_gettoken_shared(&proc_tokens[n]); 450 LIST_FOREACH(p, &allprocs[n], p_list) { 451 if (p->p_stat == SZOMB) 452 continue; 453 if (p->p_pid == pid) { 454 lwkt_reltoken(&proc_tokens[n]); 455 return (p); 456 } 457 } 458 lwkt_reltoken(&proc_tokens[n]); 459 460 return (NULL); 461 } 462 463 /* 464 * Locate a process on the zombie list. Return a process or NULL. 465 * The returned process will be referenced and the caller must release 466 * it with PRELE(). 467 * 468 * No other requirements. 469 */ 470 struct proc * 471 zpfind(pid_t pid) 472 { 473 struct proc *p = curproc; 474 int n; 475 476 /* 477 * Shortcut the current process 478 */ 479 if (p && p->p_pid == pid) { 480 PHOLD(p); 481 return (p); 482 } 483 484 /* 485 * Otherwise find it in the hash table. 486 */ 487 n = ALLPROC_HASH(pid); 488 489 lwkt_gettoken_shared(&proc_tokens[n]); 490 LIST_FOREACH(p, &allprocs[n], p_list) { 491 if (p->p_stat != SZOMB) 492 continue; 493 if (p->p_pid == pid) { 494 PHOLD(p); 495 lwkt_reltoken(&proc_tokens[n]); 496 return (p); 497 } 498 } 499 lwkt_reltoken(&proc_tokens[n]); 500 501 return (NULL); 502 } 503 504 505 void 506 pgref(struct pgrp *pgrp) 507 { 508 refcount_acquire(&pgrp->pg_refs); 509 } 510 511 void 512 pgrel(struct pgrp *pgrp) 513 { 514 int count; 515 int n; 516 517 n = PGRP_HASH(pgrp->pg_id); 518 for (;;) { 519 count = pgrp->pg_refs; 520 cpu_ccfence(); 521 KKASSERT(count > 0); 522 if (count == 1) { 523 lwkt_gettoken(&proc_tokens[n]); 524 if (atomic_cmpset_int(&pgrp->pg_refs, 1, 0)) 525 break; 526 lwkt_reltoken(&proc_tokens[n]); 527 /* retry */ 528 } else { 529 if (atomic_cmpset_int(&pgrp->pg_refs, count, count - 1)) 530 return; 531 /* retry */ 532 } 533 } 534 535 /* 536 * Successful 1->0 transition, pghash_spin is held. 537 */ 538 LIST_REMOVE(pgrp, pg_list); 539 540 /* 541 * Reset any sigio structures pointing to us as a result of 542 * F_SETOWN with our pgid. 543 */ 544 funsetownlst(&pgrp->pg_sigiolst); 545 546 if (pgrp->pg_session->s_ttyp != NULL && 547 pgrp->pg_session->s_ttyp->t_pgrp == pgrp) { 548 pgrp->pg_session->s_ttyp->t_pgrp = NULL; 549 } 550 lwkt_reltoken(&proc_tokens[n]); 551 552 sess_rele(pgrp->pg_session); 553 kfree(pgrp, M_PGRP); 554 } 555 556 /* 557 * Locate a process group by number. The returned process group will be 558 * referenced w/pgref() and must be released with pgrel() (or assigned 559 * somewhere if you wish to keep the reference). 560 * 561 * No requirements. 562 */ 563 struct pgrp * 564 pgfind(pid_t pgid) 565 { 566 struct pgrp *pgrp; 567 int n; 568 569 n = PGRP_HASH(pgid); 570 lwkt_gettoken_shared(&proc_tokens[n]); 571 572 LIST_FOREACH(pgrp, &allpgrps[n], pg_list) { 573 if (pgrp->pg_id == pgid) { 574 refcount_acquire(&pgrp->pg_refs); 575 lwkt_reltoken(&proc_tokens[n]); 576 return (pgrp); 577 } 578 } 579 lwkt_reltoken(&proc_tokens[n]); 580 return (NULL); 581 } 582 583 /* 584 * Move p to a new or existing process group (and session) 585 * 586 * No requirements. 587 */ 588 int 589 enterpgrp(struct proc *p, pid_t pgid, int mksess) 590 { 591 struct pgrp *pgrp; 592 struct pgrp *opgrp; 593 int error; 594 595 pgrp = pgfind(pgid); 596 597 KASSERT(pgrp == NULL || !mksess, 598 ("enterpgrp: setsid into non-empty pgrp")); 599 KASSERT(!SESS_LEADER(p), 600 ("enterpgrp: session leader attempted setpgrp")); 601 602 if (pgrp == NULL) { 603 pid_t savepid = p->p_pid; 604 struct proc *np; 605 int n; 606 607 /* 608 * new process group 609 */ 610 KASSERT(p->p_pid == pgid, 611 ("enterpgrp: new pgrp and pid != pgid")); 612 pgrp = kmalloc(sizeof(struct pgrp), M_PGRP, M_WAITOK | M_ZERO); 613 pgrp->pg_id = pgid; 614 LIST_INIT(&pgrp->pg_members); 615 pgrp->pg_jobc = 0; 616 SLIST_INIT(&pgrp->pg_sigiolst); 617 lwkt_token_init(&pgrp->pg_token, "pgrp_token"); 618 refcount_init(&pgrp->pg_refs, 1); 619 lockinit(&pgrp->pg_lock, "pgwt", 0, 0); 620 621 n = PGRP_HASH(pgid); 622 623 if ((np = pfindn(savepid)) == NULL || np != p) { 624 lwkt_reltoken(&proc_tokens[n]); 625 error = ESRCH; 626 kfree(pgrp, M_PGRP); 627 goto fatal; 628 } 629 630 lwkt_gettoken(&proc_tokens[n]); 631 if (mksess) { 632 struct session *sess; 633 634 /* 635 * new session 636 */ 637 sess = kmalloc(sizeof(struct session), M_SESSION, 638 M_WAITOK | M_ZERO); 639 lwkt_gettoken(&p->p_token); 640 sess->s_leader = p; 641 sess->s_sid = p->p_pid; 642 sess->s_count = 1; 643 sess->s_ttyvp = NULL; 644 sess->s_ttyp = NULL; 645 bcopy(p->p_session->s_login, sess->s_login, 646 sizeof(sess->s_login)); 647 pgrp->pg_session = sess; 648 KASSERT(p == curproc, 649 ("enterpgrp: mksession and p != curproc")); 650 p->p_flags &= ~P_CONTROLT; 651 LIST_INSERT_HEAD(&allsessn[n], sess, s_list); 652 lwkt_reltoken(&p->p_token); 653 } else { 654 lwkt_gettoken(&p->p_token); 655 pgrp->pg_session = p->p_session; 656 sess_hold(pgrp->pg_session); 657 lwkt_reltoken(&p->p_token); 658 } 659 LIST_INSERT_HEAD(&allpgrps[n], pgrp, pg_list); 660 661 lwkt_reltoken(&proc_tokens[n]); 662 } else if (pgrp == p->p_pgrp) { 663 pgrel(pgrp); 664 goto done; 665 } /* else pgfind() referenced the pgrp */ 666 667 lwkt_gettoken(&pgrp->pg_token); 668 lwkt_gettoken(&p->p_token); 669 670 /* 671 * Replace p->p_pgrp, handling any races that occur. 672 */ 673 while ((opgrp = p->p_pgrp) != NULL) { 674 pgref(opgrp); 675 lwkt_gettoken(&opgrp->pg_token); 676 if (opgrp != p->p_pgrp) { 677 lwkt_reltoken(&opgrp->pg_token); 678 pgrel(opgrp); 679 continue; 680 } 681 LIST_REMOVE(p, p_pglist); 682 break; 683 } 684 p->p_pgrp = pgrp; 685 LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist); 686 687 /* 688 * Adjust eligibility of affected pgrps to participate in job control. 689 * Increment eligibility counts before decrementing, otherwise we 690 * could reach 0 spuriously during the first call. 691 */ 692 fixjobc(p, pgrp, 1); 693 if (opgrp) { 694 fixjobc(p, opgrp, 0); 695 lwkt_reltoken(&opgrp->pg_token); 696 pgrel(opgrp); /* manual pgref */ 697 pgrel(opgrp); /* p->p_pgrp ref */ 698 } 699 lwkt_reltoken(&p->p_token); 700 lwkt_reltoken(&pgrp->pg_token); 701 done: 702 error = 0; 703 fatal: 704 return (error); 705 } 706 707 /* 708 * Remove process from process group 709 * 710 * No requirements. 711 */ 712 int 713 leavepgrp(struct proc *p) 714 { 715 struct pgrp *pg = p->p_pgrp; 716 717 lwkt_gettoken(&p->p_token); 718 while ((pg = p->p_pgrp) != NULL) { 719 pgref(pg); 720 lwkt_gettoken(&pg->pg_token); 721 if (p->p_pgrp != pg) { 722 lwkt_reltoken(&pg->pg_token); 723 pgrel(pg); 724 continue; 725 } 726 p->p_pgrp = NULL; 727 LIST_REMOVE(p, p_pglist); 728 lwkt_reltoken(&pg->pg_token); 729 pgrel(pg); /* manual pgref */ 730 pgrel(pg); /* p->p_pgrp ref */ 731 break; 732 } 733 lwkt_reltoken(&p->p_token); 734 735 return (0); 736 } 737 738 /* 739 * Adjust the ref count on a session structure. When the ref count falls to 740 * zero the tty is disassociated from the session and the session structure 741 * is freed. Note that tty assocation is not itself ref-counted. 742 * 743 * No requirements. 744 */ 745 void 746 sess_hold(struct session *sp) 747 { 748 atomic_add_int(&sp->s_count, 1); 749 } 750 751 /* 752 * No requirements. 753 */ 754 void 755 sess_rele(struct session *sess) 756 { 757 struct tty *tp; 758 int count; 759 int n; 760 761 n = SESS_HASH(sess->s_sid); 762 for (;;) { 763 count = sess->s_count; 764 cpu_ccfence(); 765 KKASSERT(count > 0); 766 if (count == 1) { 767 lwkt_gettoken(&tty_token); 768 lwkt_gettoken(&proc_tokens[n]); 769 if (atomic_cmpset_int(&sess->s_count, 1, 0)) 770 break; 771 lwkt_reltoken(&proc_tokens[n]); 772 lwkt_reltoken(&tty_token); 773 /* retry */ 774 } else { 775 if (atomic_cmpset_int(&sess->s_count, count, count - 1)) 776 return; 777 /* retry */ 778 } 779 } 780 781 /* 782 * Successful 1->0 transition and tty_token is held. 783 */ 784 LIST_REMOVE(sess, s_list); 785 786 if (sess->s_ttyp && sess->s_ttyp->t_session) { 787 #ifdef TTY_DO_FULL_CLOSE 788 /* FULL CLOSE, see ttyclearsession() */ 789 KKASSERT(sess->s_ttyp->t_session == sess); 790 sess->s_ttyp->t_session = NULL; 791 #else 792 /* HALF CLOSE, see ttyclearsession() */ 793 if (sess->s_ttyp->t_session == sess) 794 sess->s_ttyp->t_session = NULL; 795 #endif 796 } 797 if ((tp = sess->s_ttyp) != NULL) { 798 sess->s_ttyp = NULL; 799 ttyunhold(tp); 800 } 801 lwkt_reltoken(&proc_tokens[n]); 802 lwkt_reltoken(&tty_token); 803 804 kfree(sess, M_SESSION); 805 } 806 807 /* 808 * Adjust pgrp jobc counters when specified process changes process group. 809 * We count the number of processes in each process group that "qualify" 810 * the group for terminal job control (those with a parent in a different 811 * process group of the same session). If that count reaches zero, the 812 * process group becomes orphaned. Check both the specified process' 813 * process group and that of its children. 814 * entering == 0 => p is leaving specified group. 815 * entering == 1 => p is entering specified group. 816 * 817 * No requirements. 818 */ 819 void 820 fixjobc(struct proc *p, struct pgrp *pgrp, int entering) 821 { 822 struct pgrp *hispgrp; 823 struct session *mysession; 824 struct proc *np; 825 826 /* 827 * Check p's parent to see whether p qualifies its own process 828 * group; if so, adjust count for p's process group. 829 */ 830 lwkt_gettoken(&p->p_token); /* p_children scan */ 831 lwkt_gettoken(&pgrp->pg_token); 832 833 mysession = pgrp->pg_session; 834 if ((hispgrp = p->p_pptr->p_pgrp) != pgrp && 835 hispgrp->pg_session == mysession) { 836 if (entering) 837 pgrp->pg_jobc++; 838 else if (--pgrp->pg_jobc == 0) 839 orphanpg(pgrp); 840 } 841 842 /* 843 * Check this process' children to see whether they qualify 844 * their process groups; if so, adjust counts for children's 845 * process groups. 846 */ 847 LIST_FOREACH(np, &p->p_children, p_sibling) { 848 PHOLD(np); 849 lwkt_gettoken(&np->p_token); 850 if ((hispgrp = np->p_pgrp) != pgrp && 851 hispgrp->pg_session == mysession && 852 np->p_stat != SZOMB) { 853 pgref(hispgrp); 854 lwkt_gettoken(&hispgrp->pg_token); 855 if (entering) 856 hispgrp->pg_jobc++; 857 else if (--hispgrp->pg_jobc == 0) 858 orphanpg(hispgrp); 859 lwkt_reltoken(&hispgrp->pg_token); 860 pgrel(hispgrp); 861 } 862 lwkt_reltoken(&np->p_token); 863 PRELE(np); 864 } 865 KKASSERT(pgrp->pg_refs > 0); 866 lwkt_reltoken(&pgrp->pg_token); 867 lwkt_reltoken(&p->p_token); 868 } 869 870 /* 871 * A process group has become orphaned; 872 * if there are any stopped processes in the group, 873 * hang-up all process in that group. 874 * 875 * The caller must hold pg_token. 876 */ 877 static void 878 orphanpg(struct pgrp *pg) 879 { 880 struct proc *p; 881 882 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 883 if (p->p_stat == SSTOP) { 884 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 885 ksignal(p, SIGHUP); 886 ksignal(p, SIGCONT); 887 } 888 return; 889 } 890 } 891 } 892 893 /* 894 * Add a new process to the allproc list and the PID hash. This 895 * also assigns a pid to the new process. 896 * 897 * No requirements. 898 */ 899 void 900 proc_add_allproc(struct proc *p) 901 { 902 int random_offset; 903 904 if ((random_offset = randompid) != 0) { 905 read_random(&random_offset, sizeof(random_offset)); 906 random_offset = (random_offset & 0x7FFFFFFF) % randompid; 907 } 908 proc_makepid(p, random_offset); 909 } 910 911 /* 912 * Calculate a new process pid. This function is integrated into 913 * proc_add_allproc() to guarentee that the new pid is not reused before 914 * the new process can be added to the allproc list. 915 * 916 * p_pid is assigned and the process is added to the allproc hash table 917 */ 918 static 919 void 920 proc_makepid(struct proc *p, int random_offset) 921 { 922 static pid_t nextpid; /* heuristic, allowed to race */ 923 struct pgrp *pg; 924 struct proc *ps; 925 struct session *sess; 926 pid_t base; 927 int n; 928 929 /* 930 * Calculate a hash index and find an unused process id within 931 * the table, looping if we cannot find one. 932 */ 933 if (random_offset) 934 atomic_add_int(&nextpid, random_offset); 935 retry: 936 base = atomic_fetchadd_int(&nextpid, 1) + 1; 937 if (base >= PID_MAX) { 938 base = base % PID_MAX; 939 if (base < 100) 940 base += 100; 941 } 942 n = ALLPROC_HASH(base); 943 lwkt_gettoken(&proc_tokens[n]); 944 945 LIST_FOREACH(ps, &allprocs[n], p_list) { 946 if (ps->p_pid == base) { 947 base += ALLPROC_HSIZE; 948 if (base >= PID_MAX) { 949 lwkt_reltoken(&proc_tokens[n]); 950 goto retry; 951 } 952 } 953 } 954 LIST_FOREACH(pg, &allpgrps[n], pg_list) { 955 if (pg->pg_id == base) { 956 base += ALLPROC_HSIZE; 957 if (base >= PID_MAX) { 958 lwkt_reltoken(&proc_tokens[n]); 959 goto retry; 960 } 961 } 962 } 963 LIST_FOREACH(sess, &allsessn[n], s_list) { 964 if (sess->s_sid == base) { 965 base += ALLPROC_HSIZE; 966 if (base >= PID_MAX) { 967 lwkt_reltoken(&proc_tokens[n]); 968 goto retry; 969 } 970 } 971 } 972 973 /* 974 * Assign the pid and insert the process. 975 */ 976 p->p_pid = base; 977 LIST_INSERT_HEAD(&allprocs[n], p, p_list); 978 lwkt_reltoken(&proc_tokens[n]); 979 } 980 981 /* 982 * Called from exit1 to place the process into a zombie state. 983 * The process is removed from the pid hash and p_stat is set 984 * to SZOMB. Normal pfind[n]() calls will not find it any more. 985 * 986 * Caller must hold p->p_token. We are required to wait until p_lock 987 * becomes zero before we can manipulate the list, allowing allproc 988 * scans to guarantee consistency during a list scan. 989 */ 990 void 991 proc_move_allproc_zombie(struct proc *p) 992 { 993 int n; 994 995 n = ALLPROC_HASH(p->p_pid); 996 PSTALL(p, "reap1", 0); 997 lwkt_gettoken(&proc_tokens[n]); 998 999 PSTALL(p, "reap1a", 0); 1000 p->p_stat = SZOMB; 1001 1002 lwkt_reltoken(&proc_tokens[n]); 1003 dsched_exit_proc(p); 1004 } 1005 1006 /* 1007 * This routine is called from kern_wait() and will remove the process 1008 * from the zombie list and the sibling list. This routine will block 1009 * if someone has a lock on the proces (p_lock). 1010 * 1011 * Caller must hold p->p_token. We are required to wait until p_lock 1012 * becomes zero before we can manipulate the list, allowing allproc 1013 * scans to guarantee consistency during a list scan. 1014 */ 1015 void 1016 proc_remove_zombie(struct proc *p) 1017 { 1018 int n; 1019 1020 n = ALLPROC_HASH(p->p_pid); 1021 1022 PSTALL(p, "reap2", 0); 1023 lwkt_gettoken(&proc_tokens[n]); 1024 PSTALL(p, "reap2a", 0); 1025 LIST_REMOVE(p, p_list); /* from remove master list */ 1026 LIST_REMOVE(p, p_sibling); /* and from sibling list */ 1027 p->p_pptr = NULL; 1028 lwkt_reltoken(&proc_tokens[n]); 1029 } 1030 1031 /* 1032 * Handle various requirements prior to returning to usermode. Called from 1033 * platform trap and system call code. 1034 */ 1035 void 1036 lwpuserret(struct lwp *lp) 1037 { 1038 struct proc *p = lp->lwp_proc; 1039 1040 if (lp->lwp_mpflags & LWP_MP_VNLRU) { 1041 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_VNLRU); 1042 allocvnode_gc(); 1043 } 1044 if (lp->lwp_mpflags & LWP_MP_WEXIT) { 1045 lwkt_gettoken(&p->p_token); 1046 lwp_exit(0, NULL); 1047 lwkt_reltoken(&p->p_token); /* NOT REACHED */ 1048 } 1049 } 1050 1051 /* 1052 * Kernel threads run from user processes can also accumulate deferred 1053 * actions which need to be acted upon. Callers include: 1054 * 1055 * nfsd - Can allocate lots of vnodes 1056 */ 1057 void 1058 lwpkthreaddeferred(void) 1059 { 1060 struct lwp *lp = curthread->td_lwp; 1061 1062 if (lp) { 1063 if (lp->lwp_mpflags & LWP_MP_VNLRU) { 1064 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_VNLRU); 1065 allocvnode_gc(); 1066 } 1067 } 1068 } 1069 1070 /* 1071 * Scan all processes on the allproc list. The process is automatically 1072 * held for the callback. A return value of -1 terminates the loop. 1073 * Zombie procs are skipped. 1074 * 1075 * The callback is made with the process held and proc_token held. 1076 * 1077 * We limit the scan to the number of processes as-of the start of 1078 * the scan so as not to get caught up in an endless loop if new processes 1079 * are created more quickly than we can scan the old ones. Add a little 1080 * slop to try to catch edge cases since nprocs can race. 1081 * 1082 * No requirements. 1083 */ 1084 void 1085 allproc_scan(int (*callback)(struct proc *, void *), void *data) 1086 { 1087 int limit = nprocs + ncpus; 1088 struct proc *p; 1089 int r; 1090 int n; 1091 1092 /* 1093 * proc_tokens[n] protects the allproc list and PHOLD() prevents the 1094 * process from being removed from the allproc list or the zombproc 1095 * list. 1096 */ 1097 for (n = 0; n < ALLPROC_HSIZE; ++n) { 1098 if (LIST_FIRST(&allprocs[n]) == NULL) 1099 continue; 1100 lwkt_gettoken(&proc_tokens[n]); 1101 LIST_FOREACH(p, &allprocs[n], p_list) { 1102 if (p->p_stat == SZOMB) 1103 continue; 1104 PHOLD(p); 1105 r = callback(p, data); 1106 PRELE(p); 1107 if (r < 0) 1108 break; 1109 if (--limit < 0) 1110 break; 1111 } 1112 lwkt_reltoken(&proc_tokens[n]); 1113 1114 /* 1115 * Check if asked to stop early 1116 */ 1117 if (p) 1118 break; 1119 } 1120 } 1121 1122 /* 1123 * Scan all lwps of processes on the allproc list. The lwp is automatically 1124 * held for the callback. A return value of -1 terminates the loop. 1125 * 1126 * The callback is made with the proces and lwp both held, and proc_token held. 1127 * 1128 * No requirements. 1129 */ 1130 void 1131 alllwp_scan(int (*callback)(struct lwp *, void *), void *data) 1132 { 1133 struct proc *p; 1134 struct lwp *lp; 1135 int r = 0; 1136 int n; 1137 1138 for (n = 0; n < ALLPROC_HSIZE; ++n) { 1139 if (LIST_FIRST(&allprocs[n]) == NULL) 1140 continue; 1141 lwkt_gettoken(&proc_tokens[n]); 1142 LIST_FOREACH(p, &allprocs[n], p_list) { 1143 if (p->p_stat == SZOMB) 1144 continue; 1145 PHOLD(p); 1146 lwkt_gettoken(&p->p_token); 1147 FOREACH_LWP_IN_PROC(lp, p) { 1148 LWPHOLD(lp); 1149 r = callback(lp, data); 1150 LWPRELE(lp); 1151 } 1152 lwkt_reltoken(&p->p_token); 1153 PRELE(p); 1154 if (r < 0) 1155 break; 1156 } 1157 lwkt_reltoken(&proc_tokens[n]); 1158 1159 /* 1160 * Asked to exit early 1161 */ 1162 if (p) 1163 break; 1164 } 1165 } 1166 1167 /* 1168 * Scan all processes on the zombproc list. The process is automatically 1169 * held for the callback. A return value of -1 terminates the loop. 1170 * 1171 * No requirements. 1172 * The callback is made with the proces held and proc_token held. 1173 */ 1174 void 1175 zombproc_scan(int (*callback)(struct proc *, void *), void *data) 1176 { 1177 struct proc *p; 1178 int r; 1179 int n; 1180 1181 /* 1182 * proc_tokens[n] protects the allproc list and PHOLD() prevents the 1183 * process from being removed from the allproc list or the zombproc 1184 * list. 1185 */ 1186 for (n = 0; n < ALLPROC_HSIZE; ++n) { 1187 if (LIST_FIRST(&allprocs[n]) == NULL) 1188 continue; 1189 lwkt_gettoken(&proc_tokens[n]); 1190 LIST_FOREACH(p, &allprocs[n], p_list) { 1191 if (p->p_stat != SZOMB) 1192 continue; 1193 PHOLD(p); 1194 r = callback(p, data); 1195 PRELE(p); 1196 if (r < 0) 1197 break; 1198 } 1199 lwkt_reltoken(&proc_tokens[n]); 1200 1201 /* 1202 * Check if asked to stop early 1203 */ 1204 if (p) 1205 break; 1206 } 1207 } 1208 1209 #include "opt_ddb.h" 1210 #ifdef DDB 1211 #include <ddb/ddb.h> 1212 1213 /* 1214 * Debugging only 1215 */ 1216 DB_SHOW_COMMAND(pgrpdump, pgrpdump) 1217 { 1218 struct pgrp *pgrp; 1219 struct proc *p; 1220 int i; 1221 1222 for (i = 0; i < ALLPROC_HSIZE; ++i) { 1223 if (LIST_EMPTY(&allpgrps[i])) 1224 continue; 1225 kprintf("\tindx %d\n", i); 1226 LIST_FOREACH(pgrp, &allpgrps[i], pg_list) { 1227 kprintf("\tpgrp %p, pgid %ld, sess %p, " 1228 "sesscnt %d, mem %p\n", 1229 (void *)pgrp, (long)pgrp->pg_id, 1230 (void *)pgrp->pg_session, 1231 pgrp->pg_session->s_count, 1232 (void *)LIST_FIRST(&pgrp->pg_members)); 1233 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 1234 kprintf("\t\tpid %ld addr %p pgrp %p\n", 1235 (long)p->p_pid, (void *)p, 1236 (void *)p->p_pgrp); 1237 } 1238 } 1239 } 1240 } 1241 #endif /* DDB */ 1242 1243 /* 1244 * The caller must hold proc_token. 1245 */ 1246 static int 1247 sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags) 1248 { 1249 struct kinfo_proc ki; 1250 struct lwp *lp; 1251 int skp = 0, had_output = 0; 1252 int error; 1253 1254 bzero(&ki, sizeof(ki)); 1255 lwkt_gettoken_shared(&p->p_token); 1256 fill_kinfo_proc(p, &ki); 1257 if ((flags & KERN_PROC_FLAG_LWP) == 0) 1258 skp = 1; 1259 error = 0; 1260 FOREACH_LWP_IN_PROC(lp, p) { 1261 LWPHOLD(lp); 1262 fill_kinfo_lwp(lp, &ki.kp_lwp); 1263 had_output = 1; 1264 error = SYSCTL_OUT(req, &ki, sizeof(ki)); 1265 LWPRELE(lp); 1266 if (error) 1267 break; 1268 if (skp) 1269 break; 1270 } 1271 lwkt_reltoken(&p->p_token); 1272 /* We need to output at least the proc, even if there is no lwp. */ 1273 if (had_output == 0) { 1274 error = SYSCTL_OUT(req, &ki, sizeof(ki)); 1275 } 1276 return (error); 1277 } 1278 1279 /* 1280 * The caller must hold proc_token. 1281 */ 1282 static int 1283 sysctl_out_proc_kthread(struct thread *td, struct sysctl_req *req) 1284 { 1285 struct kinfo_proc ki; 1286 int error; 1287 1288 fill_kinfo_proc_kthread(td, &ki); 1289 error = SYSCTL_OUT(req, &ki, sizeof(ki)); 1290 if (error) 1291 return error; 1292 return(0); 1293 } 1294 1295 /* 1296 * No requirements. 1297 */ 1298 static int 1299 sysctl_kern_proc(SYSCTL_HANDLER_ARGS) 1300 { 1301 int *name = (int *)arg1; 1302 int oid = oidp->oid_number; 1303 u_int namelen = arg2; 1304 struct proc *p; 1305 struct thread *td; 1306 struct thread *marker; 1307 int flags = 0; 1308 int error = 0; 1309 int n; 1310 int origcpu; 1311 struct ucred *cr1 = curproc->p_ucred; 1312 1313 flags = oid & KERN_PROC_FLAGMASK; 1314 oid &= ~KERN_PROC_FLAGMASK; 1315 1316 if ((oid == KERN_PROC_ALL && namelen != 0) || 1317 (oid != KERN_PROC_ALL && namelen != 1)) { 1318 return (EINVAL); 1319 } 1320 1321 /* 1322 * proc_token protects the allproc list and PHOLD() prevents the 1323 * process from being removed from the allproc list or the zombproc 1324 * list. 1325 */ 1326 if (oid == KERN_PROC_PID) { 1327 p = pfind((pid_t)name[0]); 1328 if (p) { 1329 if (PRISON_CHECK(cr1, p->p_ucred)) 1330 error = sysctl_out_proc(p, req, flags); 1331 PRELE(p); 1332 } 1333 goto post_threads; 1334 } 1335 p = NULL; 1336 1337 if (!req->oldptr) { 1338 /* overestimate by 5 procs */ 1339 error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5); 1340 if (error) 1341 goto post_threads; 1342 } 1343 1344 for (n = 0; n < ALLPROC_HSIZE; ++n) { 1345 if (LIST_EMPTY(&allprocs[n])) 1346 continue; 1347 lwkt_gettoken_shared(&proc_tokens[n]); 1348 LIST_FOREACH(p, &allprocs[n], p_list) { 1349 /* 1350 * Show a user only their processes. 1351 */ 1352 if ((!ps_showallprocs) && 1353 (p->p_ucred == NULL || p_trespass(cr1, p->p_ucred))) { 1354 continue; 1355 } 1356 /* 1357 * Skip embryonic processes. 1358 */ 1359 if (p->p_stat == SIDL) 1360 continue; 1361 /* 1362 * TODO - make more efficient (see notes below). 1363 * do by session. 1364 */ 1365 switch (oid) { 1366 case KERN_PROC_PGRP: 1367 /* could do this by traversing pgrp */ 1368 if (p->p_pgrp == NULL || 1369 p->p_pgrp->pg_id != (pid_t)name[0]) 1370 continue; 1371 break; 1372 1373 case KERN_PROC_TTY: 1374 if ((p->p_flags & P_CONTROLT) == 0 || 1375 p->p_session == NULL || 1376 p->p_session->s_ttyp == NULL || 1377 dev2udev(p->p_session->s_ttyp->t_dev) != 1378 (udev_t)name[0]) 1379 continue; 1380 break; 1381 1382 case KERN_PROC_UID: 1383 if (p->p_ucred == NULL || 1384 p->p_ucred->cr_uid != (uid_t)name[0]) 1385 continue; 1386 break; 1387 1388 case KERN_PROC_RUID: 1389 if (p->p_ucred == NULL || 1390 p->p_ucred->cr_ruid != (uid_t)name[0]) 1391 continue; 1392 break; 1393 } 1394 1395 if (!PRISON_CHECK(cr1, p->p_ucred)) 1396 continue; 1397 PHOLD(p); 1398 error = sysctl_out_proc(p, req, flags); 1399 PRELE(p); 1400 if (error) { 1401 lwkt_reltoken(&proc_tokens[n]); 1402 goto post_threads; 1403 } 1404 } 1405 lwkt_reltoken(&proc_tokens[n]); 1406 } 1407 1408 /* 1409 * Iterate over all active cpus and scan their thread list. Start 1410 * with the next logical cpu and end with our original cpu. We 1411 * migrate our own thread to each target cpu in order to safely scan 1412 * its thread list. In the last loop we migrate back to our original 1413 * cpu. 1414 */ 1415 origcpu = mycpu->gd_cpuid; 1416 if (!ps_showallthreads || jailed(cr1)) 1417 goto post_threads; 1418 1419 marker = kmalloc(sizeof(struct thread), M_TEMP, M_WAITOK|M_ZERO); 1420 marker->td_flags = TDF_MARKER; 1421 error = 0; 1422 1423 for (n = 1; n <= ncpus; ++n) { 1424 globaldata_t rgd; 1425 int nid; 1426 1427 nid = (origcpu + n) % ncpus; 1428 if ((smp_active_mask & CPUMASK(nid)) == 0) 1429 continue; 1430 rgd = globaldata_find(nid); 1431 lwkt_setcpu_self(rgd); 1432 1433 crit_enter(); 1434 TAILQ_INSERT_TAIL(&rgd->gd_tdallq, marker, td_allq); 1435 1436 while ((td = TAILQ_PREV(marker, lwkt_queue, td_allq)) != NULL) { 1437 TAILQ_REMOVE(&rgd->gd_tdallq, marker, td_allq); 1438 TAILQ_INSERT_BEFORE(td, marker, td_allq); 1439 if (td->td_flags & TDF_MARKER) 1440 continue; 1441 if (td->td_proc) 1442 continue; 1443 1444 lwkt_hold(td); 1445 crit_exit(); 1446 1447 switch (oid) { 1448 case KERN_PROC_PGRP: 1449 case KERN_PROC_TTY: 1450 case KERN_PROC_UID: 1451 case KERN_PROC_RUID: 1452 break; 1453 default: 1454 error = sysctl_out_proc_kthread(td, req); 1455 break; 1456 } 1457 lwkt_rele(td); 1458 crit_enter(); 1459 if (error) 1460 break; 1461 } 1462 TAILQ_REMOVE(&rgd->gd_tdallq, marker, td_allq); 1463 crit_exit(); 1464 1465 if (error) 1466 break; 1467 } 1468 1469 /* 1470 * Userland scheduler expects us to return on the same cpu we 1471 * started on. 1472 */ 1473 if (mycpu->gd_cpuid != origcpu) 1474 lwkt_setcpu_self(globaldata_find(origcpu)); 1475 1476 kfree(marker, M_TEMP); 1477 1478 post_threads: 1479 return (error); 1480 } 1481 1482 /* 1483 * This sysctl allows a process to retrieve the argument list or process 1484 * title for another process without groping around in the address space 1485 * of the other process. It also allow a process to set its own "process 1486 * title to a string of its own choice. 1487 * 1488 * No requirements. 1489 */ 1490 static int 1491 sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS) 1492 { 1493 int *name = (int*) arg1; 1494 u_int namelen = arg2; 1495 struct proc *p; 1496 struct pargs *opa; 1497 struct pargs *pa; 1498 int error = 0; 1499 struct ucred *cr1 = curproc->p_ucred; 1500 1501 if (namelen != 1) 1502 return (EINVAL); 1503 1504 p = pfind((pid_t)name[0]); 1505 if (p == NULL) 1506 goto done; 1507 lwkt_gettoken(&p->p_token); 1508 1509 if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred)) 1510 goto done; 1511 1512 if (req->newptr && curproc != p) { 1513 error = EPERM; 1514 goto done; 1515 } 1516 if (req->oldptr && (pa = p->p_args) != NULL) { 1517 refcount_acquire(&pa->ar_ref); 1518 error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length); 1519 if (refcount_release(&pa->ar_ref)) 1520 kfree(pa, M_PARGS); 1521 } 1522 if (req->newptr == NULL) 1523 goto done; 1524 1525 if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit) { 1526 goto done; 1527 } 1528 1529 pa = kmalloc(sizeof(struct pargs) + req->newlen, M_PARGS, M_WAITOK); 1530 refcount_init(&pa->ar_ref, 1); 1531 pa->ar_length = req->newlen; 1532 error = SYSCTL_IN(req, pa->ar_args, req->newlen); 1533 if (error) { 1534 kfree(pa, M_PARGS); 1535 goto done; 1536 } 1537 1538 1539 /* 1540 * Replace p_args with the new pa. p_args may have previously 1541 * been NULL. 1542 */ 1543 opa = p->p_args; 1544 p->p_args = pa; 1545 1546 if (opa) { 1547 KKASSERT(opa->ar_ref > 0); 1548 if (refcount_release(&opa->ar_ref)) { 1549 kfree(opa, M_PARGS); 1550 /* opa = NULL; */ 1551 } 1552 } 1553 done: 1554 if (p) { 1555 lwkt_reltoken(&p->p_token); 1556 PRELE(p); 1557 } 1558 return (error); 1559 } 1560 1561 static int 1562 sysctl_kern_proc_cwd(SYSCTL_HANDLER_ARGS) 1563 { 1564 int *name = (int*) arg1; 1565 u_int namelen = arg2; 1566 struct proc *p; 1567 int error = 0; 1568 char *fullpath, *freepath; 1569 struct ucred *cr1 = curproc->p_ucred; 1570 1571 if (namelen != 1) 1572 return (EINVAL); 1573 1574 p = pfind((pid_t)name[0]); 1575 if (p == NULL) 1576 goto done; 1577 lwkt_gettoken_shared(&p->p_token); 1578 1579 /* 1580 * If we are not allowed to see other args, we certainly shouldn't 1581 * get the cwd either. Also check the usual trespassing. 1582 */ 1583 if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred)) 1584 goto done; 1585 1586 if (req->oldptr && p->p_fd != NULL && p->p_fd->fd_ncdir.ncp) { 1587 struct nchandle nch; 1588 1589 cache_copy(&p->p_fd->fd_ncdir, &nch); 1590 error = cache_fullpath(p, &nch, NULL, 1591 &fullpath, &freepath, 0); 1592 cache_drop(&nch); 1593 if (error) 1594 goto done; 1595 error = SYSCTL_OUT(req, fullpath, strlen(fullpath) + 1); 1596 kfree(freepath, M_TEMP); 1597 } 1598 1599 done: 1600 if (p) { 1601 lwkt_reltoken(&p->p_token); 1602 PRELE(p); 1603 } 1604 return (error); 1605 } 1606 1607 /* 1608 * This sysctl allows a process to retrieve the path of the executable for 1609 * itself or another process. 1610 */ 1611 static int 1612 sysctl_kern_proc_pathname(SYSCTL_HANDLER_ARGS) 1613 { 1614 pid_t *pidp = (pid_t *)arg1; 1615 unsigned int arglen = arg2; 1616 struct proc *p; 1617 struct vnode *vp; 1618 char *retbuf, *freebuf; 1619 int error = 0; 1620 1621 if (arglen != 1) 1622 return (EINVAL); 1623 if (*pidp == -1) { /* -1 means this process */ 1624 p = curproc; 1625 } else { 1626 p = pfind(*pidp); 1627 if (p == NULL) 1628 return (ESRCH); 1629 } 1630 1631 vp = p->p_textvp; 1632 if (vp == NULL) 1633 goto done; 1634 1635 vref(vp); 1636 error = vn_fullpath(p, vp, &retbuf, &freebuf, 0); 1637 vrele(vp); 1638 if (error) 1639 goto done; 1640 error = SYSCTL_OUT(req, retbuf, strlen(retbuf) + 1); 1641 kfree(freebuf, M_TEMP); 1642 done: 1643 if(*pidp != -1) 1644 PRELE(p); 1645 1646 return (error); 1647 } 1648 1649 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD, 0, "Process table"); 1650 1651 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT, 1652 0, 0, sysctl_kern_proc, "S,proc", "Return entire process table"); 1653 1654 SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD, 1655 sysctl_kern_proc, "Process table"); 1656 1657 SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD, 1658 sysctl_kern_proc, "Process table"); 1659 1660 SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD, 1661 sysctl_kern_proc, "Process table"); 1662 1663 SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD, 1664 sysctl_kern_proc, "Process table"); 1665 1666 SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD, 1667 sysctl_kern_proc, "Process table"); 1668 1669 SYSCTL_NODE(_kern_proc, (KERN_PROC_ALL | KERN_PROC_FLAG_LWP), all_lwp, CTLFLAG_RD, 1670 sysctl_kern_proc, "Process table"); 1671 1672 SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_FLAG_LWP), pgrp_lwp, CTLFLAG_RD, 1673 sysctl_kern_proc, "Process table"); 1674 1675 SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_FLAG_LWP), tty_lwp, CTLFLAG_RD, 1676 sysctl_kern_proc, "Process table"); 1677 1678 SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_FLAG_LWP), uid_lwp, CTLFLAG_RD, 1679 sysctl_kern_proc, "Process table"); 1680 1681 SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_FLAG_LWP), ruid_lwp, CTLFLAG_RD, 1682 sysctl_kern_proc, "Process table"); 1683 1684 SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_FLAG_LWP), pid_lwp, CTLFLAG_RD, 1685 sysctl_kern_proc, "Process table"); 1686 1687 SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, CTLFLAG_RW | CTLFLAG_ANYBODY, 1688 sysctl_kern_proc_args, "Process argument list"); 1689 1690 SYSCTL_NODE(_kern_proc, KERN_PROC_CWD, cwd, CTLFLAG_RD | CTLFLAG_ANYBODY, 1691 sysctl_kern_proc_cwd, "Process argument list"); 1692 1693 static SYSCTL_NODE(_kern_proc, KERN_PROC_PATHNAME, pathname, CTLFLAG_RD, 1694 sysctl_kern_proc_pathname, "Process executable path"); 1695