1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/sysctl.h> 34 #include <sys/malloc.h> 35 #include <sys/proc.h> 36 #include <sys/vnode.h> 37 #include <sys/jail.h> 38 #include <sys/filedesc.h> 39 #include <sys/tty.h> 40 #include <sys/dsched.h> 41 #include <sys/signalvar.h> 42 #include <sys/spinlock.h> 43 #include <sys/random.h> 44 #include <sys/vnode.h> 45 #include <vm/vm.h> 46 #include <sys/lock.h> 47 #include <vm/pmap.h> 48 #include <vm/vm_map.h> 49 #include <sys/user.h> 50 #include <machine/smp.h> 51 52 #include <sys/refcount.h> 53 #include <sys/spinlock2.h> 54 #include <sys/mplock2.h> 55 56 /* 57 * Hash table size must be a power of two and is not currently dynamically 58 * sized. There is a trade-off between the linear scans which must iterate 59 * all HSIZE elements and the number of elements which might accumulate 60 * within each hash chain. 61 */ 62 #define ALLPROC_HSIZE 256 63 #define ALLPROC_HMASK (ALLPROC_HSIZE - 1) 64 #define ALLPROC_HASH(pid) (pid & ALLPROC_HMASK) 65 #define PGRP_HASH(pid) (pid & ALLPROC_HMASK) 66 #define SESS_HASH(pid) (pid & ALLPROC_HMASK) 67 68 LIST_HEAD(pidhashhead, proc); 69 70 static MALLOC_DEFINE(M_PGRP, "pgrp", "process group header"); 71 MALLOC_DEFINE(M_SESSION, "session", "session header"); 72 MALLOC_DEFINE(M_PROC, "proc", "Proc structures"); 73 MALLOC_DEFINE(M_LWP, "lwp", "lwp structures"); 74 MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures"); 75 76 int ps_showallprocs = 1; 77 static int ps_showallthreads = 1; 78 SYSCTL_INT(_security, OID_AUTO, ps_showallprocs, CTLFLAG_RW, 79 &ps_showallprocs, 0, 80 "Unprivileged processes can see processes with different UID/GID"); 81 SYSCTL_INT(_security, OID_AUTO, ps_showallthreads, CTLFLAG_RW, 82 &ps_showallthreads, 0, 83 "Unprivileged processes can see kernel threads"); 84 85 static void orphanpg(struct pgrp *pg); 86 static void proc_makepid(struct proc *p, int random_offset); 87 88 /* 89 * Other process lists 90 */ 91 static struct lwkt_token proc_tokens[ALLPROC_HSIZE]; 92 static struct proclist allprocs[ALLPROC_HSIZE]; /* locked by proc_tokens */ 93 static struct pgrplist allpgrps[ALLPROC_HSIZE]; /* locked by proc_tokens */ 94 static struct sesslist allsessn[ALLPROC_HSIZE]; /* locked by proc_tokens */ 95 96 /* 97 * Random component to nextpid generation. We mix in a random factor to make 98 * it a little harder to predict. We sanity check the modulus value to avoid 99 * doing it in critical paths. Don't let it be too small or we pointlessly 100 * waste randomness entropy, and don't let it be impossibly large. Using a 101 * modulus that is too big causes a LOT more process table scans and slows 102 * down fork processing as the pidchecked caching is defeated. 103 */ 104 static int randompid = 0; 105 106 /* 107 * No requirements. 108 */ 109 static int 110 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS) 111 { 112 int error, pid; 113 114 pid = randompid; 115 error = sysctl_handle_int(oidp, &pid, 0, req); 116 if (error || !req->newptr) 117 return (error); 118 if (pid < 0 || pid > PID_MAX - 100) /* out of range */ 119 pid = PID_MAX - 100; 120 else if (pid < 2) /* NOP */ 121 pid = 0; 122 else if (pid < 100) /* Make it reasonable */ 123 pid = 100; 124 randompid = pid; 125 return (error); 126 } 127 128 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW, 129 0, 0, sysctl_kern_randompid, "I", "Random PID modulus"); 130 131 /* 132 * Initialize global process hashing structures. 133 * 134 * These functions are ONLY called from the low level boot code and do 135 * not lock their operations. 136 */ 137 void 138 procinit(void) 139 { 140 u_long i; 141 142 for (i = 0; i < ALLPROC_HSIZE; ++i) { 143 LIST_INIT(&allprocs[i]); 144 LIST_INIT(&allsessn[i]); 145 LIST_INIT(&allpgrps[i]); 146 lwkt_token_init(&proc_tokens[i], "allproc"); 147 } 148 lwkt_init(); 149 uihashinit(); 150 } 151 152 void 153 procinsertinit(struct proc *p) 154 { 155 LIST_INSERT_HEAD(&allprocs[ALLPROC_HASH(p->p_pid)], p, p_list); 156 } 157 158 void 159 pgrpinsertinit(struct pgrp *pg) 160 { 161 LIST_INSERT_HEAD(&allpgrps[ALLPROC_HASH(pg->pg_id)], pg, pg_list); 162 } 163 164 void 165 sessinsertinit(struct session *sess) 166 { 167 LIST_INSERT_HEAD(&allsessn[ALLPROC_HASH(sess->s_sid)], sess, s_list); 168 } 169 170 /* 171 * Process hold/release support functions. Called via the PHOLD(), 172 * PRELE(), and PSTALL() macros. 173 * 174 * p->p_lock is a simple hold count with a waiting interlock. No wakeup() 175 * is issued unless someone is actually waiting for the process. 176 * 177 * Most holds are short-term, allowing a process scan or other similar 178 * operation to access a proc structure without it getting ripped out from 179 * under us. procfs and process-list sysctl ops also use the hold function 180 * interlocked with various p_flags to keep the vmspace intact when reading 181 * or writing a user process's address space. 182 * 183 * There are two situations where a hold count can be longer. Exiting lwps 184 * hold the process until the lwp is reaped, and the parent will hold the 185 * child during vfork()/exec() sequences while the child is marked P_PPWAIT. 186 * 187 * The kernel waits for the hold count to drop to 0 (or 1 in some cases) at 188 * various critical points in the fork/exec and exit paths before proceeding. 189 */ 190 #define PLOCK_ZOMB 0x20000000 191 #define PLOCK_WAITING 0x40000000 192 #define PLOCK_MASK 0x1FFFFFFF 193 194 void 195 pstall(struct proc *p, const char *wmesg, int count) 196 { 197 int o; 198 int n; 199 200 for (;;) { 201 o = p->p_lock; 202 cpu_ccfence(); 203 if ((o & PLOCK_MASK) <= count) 204 break; 205 n = o | PLOCK_WAITING; 206 tsleep_interlock(&p->p_lock, 0); 207 208 /* 209 * If someone is trying to single-step the process during 210 * an exec or an exit they can deadlock us because procfs 211 * sleeps with the process held. 212 */ 213 if (p->p_stops) { 214 if (p->p_flags & P_INEXEC) { 215 wakeup(&p->p_stype); 216 } else if (p->p_flags & P_POSTEXIT) { 217 spin_lock(&p->p_spin); 218 p->p_stops = 0; 219 p->p_step = 0; 220 spin_unlock(&p->p_spin); 221 wakeup(&p->p_stype); 222 } 223 } 224 225 if (atomic_cmpset_int(&p->p_lock, o, n)) { 226 tsleep(&p->p_lock, PINTERLOCKED, wmesg, 0); 227 } 228 } 229 } 230 231 void 232 phold(struct proc *p) 233 { 234 atomic_add_int(&p->p_lock, 1); 235 } 236 237 /* 238 * WARNING! On last release (p) can become instantly invalid due to 239 * MP races. 240 */ 241 void 242 prele(struct proc *p) 243 { 244 int o; 245 int n; 246 247 /* 248 * Fast path 249 */ 250 if (atomic_cmpset_int(&p->p_lock, 1, 0)) 251 return; 252 253 /* 254 * Slow path 255 */ 256 for (;;) { 257 o = p->p_lock; 258 KKASSERT((o & PLOCK_MASK) > 0); 259 cpu_ccfence(); 260 n = (o - 1) & ~PLOCK_WAITING; 261 if (atomic_cmpset_int(&p->p_lock, o, n)) { 262 if (o & PLOCK_WAITING) 263 wakeup(&p->p_lock); 264 break; 265 } 266 } 267 } 268 269 /* 270 * Hold and flag serialized for zombie reaping purposes. 271 * 272 * This function will fail if it has to block, returning non-zero with 273 * neither the flag set or the hold count bumped. Note that we must block 274 * without holding a ref, meaning that the caller must ensure that (p) 275 * remains valid through some other interlock (typically on its parent 276 * process's p_token). 277 * 278 * Zero is returned on success. The hold count will be incremented and 279 * the serialization flag acquired. Note that serialization is only against 280 * other pholdzomb() calls, not against phold() calls. 281 */ 282 int 283 pholdzomb(struct proc *p) 284 { 285 int o; 286 int n; 287 288 /* 289 * Fast path 290 */ 291 if (atomic_cmpset_int(&p->p_lock, 0, PLOCK_ZOMB | 1)) 292 return(0); 293 294 /* 295 * Slow path 296 */ 297 for (;;) { 298 o = p->p_lock; 299 cpu_ccfence(); 300 if ((o & PLOCK_ZOMB) == 0) { 301 n = (o + 1) | PLOCK_ZOMB; 302 if (atomic_cmpset_int(&p->p_lock, o, n)) 303 return(0); 304 } else { 305 KKASSERT((o & PLOCK_MASK) > 0); 306 n = o | PLOCK_WAITING; 307 tsleep_interlock(&p->p_lock, 0); 308 if (atomic_cmpset_int(&p->p_lock, o, n)) { 309 tsleep(&p->p_lock, PINTERLOCKED, "phldz", 0); 310 /* (p) can be ripped out at this point */ 311 return(1); 312 } 313 } 314 } 315 } 316 317 /* 318 * Release PLOCK_ZOMB and the hold count, waking up any waiters. 319 * 320 * WARNING! On last release (p) can become instantly invalid due to 321 * MP races. 322 */ 323 void 324 prelezomb(struct proc *p) 325 { 326 int o; 327 int n; 328 329 /* 330 * Fast path 331 */ 332 if (atomic_cmpset_int(&p->p_lock, PLOCK_ZOMB | 1, 0)) 333 return; 334 335 /* 336 * Slow path 337 */ 338 KKASSERT(p->p_lock & PLOCK_ZOMB); 339 for (;;) { 340 o = p->p_lock; 341 KKASSERT((o & PLOCK_MASK) > 0); 342 cpu_ccfence(); 343 n = (o - 1) & ~(PLOCK_ZOMB | PLOCK_WAITING); 344 if (atomic_cmpset_int(&p->p_lock, o, n)) { 345 if (o & PLOCK_WAITING) 346 wakeup(&p->p_lock); 347 break; 348 } 349 } 350 } 351 352 /* 353 * Is p an inferior of the current process? 354 * 355 * No requirements. 356 */ 357 int 358 inferior(struct proc *p) 359 { 360 struct proc *p2; 361 362 PHOLD(p); 363 lwkt_gettoken_shared(&p->p_token); 364 while (p != curproc) { 365 if (p->p_pid == 0) { 366 lwkt_reltoken(&p->p_token); 367 return (0); 368 } 369 p2 = p->p_pptr; 370 PHOLD(p2); 371 lwkt_reltoken(&p->p_token); 372 PRELE(p); 373 lwkt_gettoken_shared(&p2->p_token); 374 p = p2; 375 } 376 lwkt_reltoken(&p->p_token); 377 PRELE(p); 378 379 return (1); 380 } 381 382 /* 383 * Locate a process by number. The returned process will be referenced and 384 * must be released with PRELE(). 385 * 386 * No requirements. 387 */ 388 struct proc * 389 pfind(pid_t pid) 390 { 391 struct proc *p = curproc; 392 int n; 393 394 /* 395 * Shortcut the current process 396 */ 397 if (p && p->p_pid == pid) { 398 PHOLD(p); 399 return (p); 400 } 401 402 /* 403 * Otherwise find it in the hash table. 404 */ 405 n = ALLPROC_HASH(pid); 406 407 lwkt_gettoken_shared(&proc_tokens[n]); 408 LIST_FOREACH(p, &allprocs[n], p_list) { 409 if (p->p_stat == SZOMB) 410 continue; 411 if (p->p_pid == pid) { 412 PHOLD(p); 413 lwkt_reltoken(&proc_tokens[n]); 414 return (p); 415 } 416 } 417 lwkt_reltoken(&proc_tokens[n]); 418 419 return (NULL); 420 } 421 422 /* 423 * Locate a process by number. The returned process is NOT referenced. 424 * The result will not be stable and is typically only used to validate 425 * against a process that the caller has in-hand. 426 * 427 * No requirements. 428 */ 429 struct proc * 430 pfindn(pid_t pid) 431 { 432 struct proc *p = curproc; 433 int n; 434 435 /* 436 * Shortcut the current process 437 */ 438 if (p && p->p_pid == pid) 439 return (p); 440 441 /* 442 * Otherwise find it in the hash table. 443 */ 444 n = ALLPROC_HASH(pid); 445 446 lwkt_gettoken_shared(&proc_tokens[n]); 447 LIST_FOREACH(p, &allprocs[n], p_list) { 448 if (p->p_stat == SZOMB) 449 continue; 450 if (p->p_pid == pid) { 451 lwkt_reltoken(&proc_tokens[n]); 452 return (p); 453 } 454 } 455 lwkt_reltoken(&proc_tokens[n]); 456 457 return (NULL); 458 } 459 460 /* 461 * Locate a process on the zombie list. Return a process or NULL. 462 * The returned process will be referenced and the caller must release 463 * it with PRELE(). 464 * 465 * No other requirements. 466 */ 467 struct proc * 468 zpfind(pid_t pid) 469 { 470 struct proc *p = curproc; 471 int n; 472 473 /* 474 * Shortcut the current process 475 */ 476 if (p && p->p_pid == pid) { 477 PHOLD(p); 478 return (p); 479 } 480 481 /* 482 * Otherwise find it in the hash table. 483 */ 484 n = ALLPROC_HASH(pid); 485 486 lwkt_gettoken_shared(&proc_tokens[n]); 487 LIST_FOREACH(p, &allprocs[n], p_list) { 488 if (p->p_stat != SZOMB) 489 continue; 490 if (p->p_pid == pid) { 491 PHOLD(p); 492 lwkt_reltoken(&proc_tokens[n]); 493 return (p); 494 } 495 } 496 lwkt_reltoken(&proc_tokens[n]); 497 498 return (NULL); 499 } 500 501 502 void 503 pgref(struct pgrp *pgrp) 504 { 505 refcount_acquire(&pgrp->pg_refs); 506 } 507 508 void 509 pgrel(struct pgrp *pgrp) 510 { 511 int count; 512 int n; 513 514 n = PGRP_HASH(pgrp->pg_id); 515 for (;;) { 516 count = pgrp->pg_refs; 517 cpu_ccfence(); 518 KKASSERT(count > 0); 519 if (count == 1) { 520 lwkt_gettoken(&proc_tokens[n]); 521 if (atomic_cmpset_int(&pgrp->pg_refs, 1, 0)) 522 break; 523 lwkt_reltoken(&proc_tokens[n]); 524 /* retry */ 525 } else { 526 if (atomic_cmpset_int(&pgrp->pg_refs, count, count - 1)) 527 return; 528 /* retry */ 529 } 530 } 531 532 /* 533 * Successful 1->0 transition, pghash_spin is held. 534 */ 535 LIST_REMOVE(pgrp, pg_list); 536 537 /* 538 * Reset any sigio structures pointing to us as a result of 539 * F_SETOWN with our pgid. 540 */ 541 funsetownlst(&pgrp->pg_sigiolst); 542 543 if (pgrp->pg_session->s_ttyp != NULL && 544 pgrp->pg_session->s_ttyp->t_pgrp == pgrp) { 545 pgrp->pg_session->s_ttyp->t_pgrp = NULL; 546 } 547 lwkt_reltoken(&proc_tokens[n]); 548 549 sess_rele(pgrp->pg_session); 550 kfree(pgrp, M_PGRP); 551 } 552 553 /* 554 * Locate a process group by number. The returned process group will be 555 * referenced w/pgref() and must be released with pgrel() (or assigned 556 * somewhere if you wish to keep the reference). 557 * 558 * No requirements. 559 */ 560 struct pgrp * 561 pgfind(pid_t pgid) 562 { 563 struct pgrp *pgrp; 564 int n; 565 566 n = PGRP_HASH(pgid); 567 lwkt_gettoken_shared(&proc_tokens[n]); 568 569 LIST_FOREACH(pgrp, &allpgrps[n], pg_list) { 570 if (pgrp->pg_id == pgid) { 571 refcount_acquire(&pgrp->pg_refs); 572 lwkt_reltoken(&proc_tokens[n]); 573 return (pgrp); 574 } 575 } 576 lwkt_reltoken(&proc_tokens[n]); 577 return (NULL); 578 } 579 580 /* 581 * Move p to a new or existing process group (and session) 582 * 583 * No requirements. 584 */ 585 int 586 enterpgrp(struct proc *p, pid_t pgid, int mksess) 587 { 588 struct pgrp *pgrp; 589 struct pgrp *opgrp; 590 int error; 591 592 pgrp = pgfind(pgid); 593 594 KASSERT(pgrp == NULL || !mksess, 595 ("enterpgrp: setsid into non-empty pgrp")); 596 KASSERT(!SESS_LEADER(p), 597 ("enterpgrp: session leader attempted setpgrp")); 598 599 if (pgrp == NULL) { 600 pid_t savepid = p->p_pid; 601 struct proc *np; 602 int n; 603 604 /* 605 * new process group 606 */ 607 KASSERT(p->p_pid == pgid, 608 ("enterpgrp: new pgrp and pid != pgid")); 609 pgrp = kmalloc(sizeof(struct pgrp), M_PGRP, M_WAITOK | M_ZERO); 610 pgrp->pg_id = pgid; 611 LIST_INIT(&pgrp->pg_members); 612 pgrp->pg_jobc = 0; 613 SLIST_INIT(&pgrp->pg_sigiolst); 614 lwkt_token_init(&pgrp->pg_token, "pgrp_token"); 615 refcount_init(&pgrp->pg_refs, 1); 616 lockinit(&pgrp->pg_lock, "pgwt", 0, 0); 617 618 n = PGRP_HASH(pgid); 619 620 if ((np = pfindn(savepid)) == NULL || np != p) { 621 lwkt_reltoken(&proc_tokens[n]); 622 error = ESRCH; 623 kfree(pgrp, M_PGRP); 624 goto fatal; 625 } 626 627 lwkt_gettoken(&proc_tokens[n]); 628 if (mksess) { 629 struct session *sess; 630 631 /* 632 * new session 633 */ 634 sess = kmalloc(sizeof(struct session), M_SESSION, 635 M_WAITOK | M_ZERO); 636 lwkt_gettoken(&p->p_token); 637 sess->s_leader = p; 638 sess->s_sid = p->p_pid; 639 sess->s_count = 1; 640 sess->s_ttyvp = NULL; 641 sess->s_ttyp = NULL; 642 bcopy(p->p_session->s_login, sess->s_login, 643 sizeof(sess->s_login)); 644 pgrp->pg_session = sess; 645 KASSERT(p == curproc, 646 ("enterpgrp: mksession and p != curproc")); 647 p->p_flags &= ~P_CONTROLT; 648 LIST_INSERT_HEAD(&allsessn[n], sess, s_list); 649 lwkt_reltoken(&p->p_token); 650 } else { 651 lwkt_gettoken(&p->p_token); 652 pgrp->pg_session = p->p_session; 653 sess_hold(pgrp->pg_session); 654 lwkt_reltoken(&p->p_token); 655 } 656 LIST_INSERT_HEAD(&allpgrps[n], pgrp, pg_list); 657 658 lwkt_reltoken(&proc_tokens[n]); 659 } else if (pgrp == p->p_pgrp) { 660 pgrel(pgrp); 661 goto done; 662 } /* else pgfind() referenced the pgrp */ 663 664 lwkt_gettoken(&pgrp->pg_token); 665 lwkt_gettoken(&p->p_token); 666 667 /* 668 * Replace p->p_pgrp, handling any races that occur. 669 */ 670 while ((opgrp = p->p_pgrp) != NULL) { 671 pgref(opgrp); 672 lwkt_gettoken(&opgrp->pg_token); 673 if (opgrp != p->p_pgrp) { 674 lwkt_reltoken(&opgrp->pg_token); 675 pgrel(opgrp); 676 continue; 677 } 678 LIST_REMOVE(p, p_pglist); 679 break; 680 } 681 p->p_pgrp = pgrp; 682 LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist); 683 684 /* 685 * Adjust eligibility of affected pgrps to participate in job control. 686 * Increment eligibility counts before decrementing, otherwise we 687 * could reach 0 spuriously during the first call. 688 */ 689 fixjobc(p, pgrp, 1); 690 if (opgrp) { 691 fixjobc(p, opgrp, 0); 692 lwkt_reltoken(&opgrp->pg_token); 693 pgrel(opgrp); /* manual pgref */ 694 pgrel(opgrp); /* p->p_pgrp ref */ 695 } 696 lwkt_reltoken(&p->p_token); 697 lwkt_reltoken(&pgrp->pg_token); 698 done: 699 error = 0; 700 fatal: 701 return (error); 702 } 703 704 /* 705 * Remove process from process group 706 * 707 * No requirements. 708 */ 709 int 710 leavepgrp(struct proc *p) 711 { 712 struct pgrp *pg = p->p_pgrp; 713 714 lwkt_gettoken(&p->p_token); 715 while ((pg = p->p_pgrp) != NULL) { 716 pgref(pg); 717 lwkt_gettoken(&pg->pg_token); 718 if (p->p_pgrp != pg) { 719 lwkt_reltoken(&pg->pg_token); 720 pgrel(pg); 721 continue; 722 } 723 p->p_pgrp = NULL; 724 LIST_REMOVE(p, p_pglist); 725 lwkt_reltoken(&pg->pg_token); 726 pgrel(pg); /* manual pgref */ 727 pgrel(pg); /* p->p_pgrp ref */ 728 break; 729 } 730 lwkt_reltoken(&p->p_token); 731 732 return (0); 733 } 734 735 /* 736 * Adjust the ref count on a session structure. When the ref count falls to 737 * zero the tty is disassociated from the session and the session structure 738 * is freed. Note that tty assocation is not itself ref-counted. 739 * 740 * No requirements. 741 */ 742 void 743 sess_hold(struct session *sp) 744 { 745 atomic_add_int(&sp->s_count, 1); 746 } 747 748 /* 749 * No requirements. 750 */ 751 void 752 sess_rele(struct session *sess) 753 { 754 struct tty *tp; 755 int count; 756 int n; 757 758 n = SESS_HASH(sess->s_sid); 759 for (;;) { 760 count = sess->s_count; 761 cpu_ccfence(); 762 KKASSERT(count > 0); 763 if (count == 1) { 764 lwkt_gettoken(&tty_token); 765 lwkt_gettoken(&proc_tokens[n]); 766 if (atomic_cmpset_int(&sess->s_count, 1, 0)) 767 break; 768 lwkt_reltoken(&proc_tokens[n]); 769 lwkt_reltoken(&tty_token); 770 /* retry */ 771 } else { 772 if (atomic_cmpset_int(&sess->s_count, count, count - 1)) 773 return; 774 /* retry */ 775 } 776 } 777 778 /* 779 * Successful 1->0 transition and tty_token is held. 780 */ 781 LIST_REMOVE(sess, s_list); 782 783 if (sess->s_ttyp && sess->s_ttyp->t_session) { 784 #ifdef TTY_DO_FULL_CLOSE 785 /* FULL CLOSE, see ttyclearsession() */ 786 KKASSERT(sess->s_ttyp->t_session == sess); 787 sess->s_ttyp->t_session = NULL; 788 #else 789 /* HALF CLOSE, see ttyclearsession() */ 790 if (sess->s_ttyp->t_session == sess) 791 sess->s_ttyp->t_session = NULL; 792 #endif 793 } 794 if ((tp = sess->s_ttyp) != NULL) { 795 sess->s_ttyp = NULL; 796 ttyunhold(tp); 797 } 798 lwkt_reltoken(&proc_tokens[n]); 799 lwkt_reltoken(&tty_token); 800 801 kfree(sess, M_SESSION); 802 } 803 804 /* 805 * Adjust pgrp jobc counters when specified process changes process group. 806 * We count the number of processes in each process group that "qualify" 807 * the group for terminal job control (those with a parent in a different 808 * process group of the same session). If that count reaches zero, the 809 * process group becomes orphaned. Check both the specified process' 810 * process group and that of its children. 811 * entering == 0 => p is leaving specified group. 812 * entering == 1 => p is entering specified group. 813 * 814 * No requirements. 815 */ 816 void 817 fixjobc(struct proc *p, struct pgrp *pgrp, int entering) 818 { 819 struct pgrp *hispgrp; 820 struct session *mysession; 821 struct proc *np; 822 823 /* 824 * Check p's parent to see whether p qualifies its own process 825 * group; if so, adjust count for p's process group. 826 */ 827 lwkt_gettoken(&p->p_token); /* p_children scan */ 828 lwkt_gettoken(&pgrp->pg_token); 829 830 mysession = pgrp->pg_session; 831 if ((hispgrp = p->p_pptr->p_pgrp) != pgrp && 832 hispgrp->pg_session == mysession) { 833 if (entering) 834 pgrp->pg_jobc++; 835 else if (--pgrp->pg_jobc == 0) 836 orphanpg(pgrp); 837 } 838 839 /* 840 * Check this process' children to see whether they qualify 841 * their process groups; if so, adjust counts for children's 842 * process groups. 843 */ 844 LIST_FOREACH(np, &p->p_children, p_sibling) { 845 PHOLD(np); 846 lwkt_gettoken(&np->p_token); 847 if ((hispgrp = np->p_pgrp) != pgrp && 848 hispgrp->pg_session == mysession && 849 np->p_stat != SZOMB) { 850 pgref(hispgrp); 851 lwkt_gettoken(&hispgrp->pg_token); 852 if (entering) 853 hispgrp->pg_jobc++; 854 else if (--hispgrp->pg_jobc == 0) 855 orphanpg(hispgrp); 856 lwkt_reltoken(&hispgrp->pg_token); 857 pgrel(hispgrp); 858 } 859 lwkt_reltoken(&np->p_token); 860 PRELE(np); 861 } 862 KKASSERT(pgrp->pg_refs > 0); 863 lwkt_reltoken(&pgrp->pg_token); 864 lwkt_reltoken(&p->p_token); 865 } 866 867 /* 868 * A process group has become orphaned; 869 * if there are any stopped processes in the group, 870 * hang-up all process in that group. 871 * 872 * The caller must hold pg_token. 873 */ 874 static void 875 orphanpg(struct pgrp *pg) 876 { 877 struct proc *p; 878 879 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 880 if (p->p_stat == SSTOP) { 881 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 882 ksignal(p, SIGHUP); 883 ksignal(p, SIGCONT); 884 } 885 return; 886 } 887 } 888 } 889 890 /* 891 * Add a new process to the allproc list and the PID hash. This 892 * also assigns a pid to the new process. 893 * 894 * No requirements. 895 */ 896 void 897 proc_add_allproc(struct proc *p) 898 { 899 int random_offset; 900 901 if ((random_offset = randompid) != 0) { 902 read_random(&random_offset, sizeof(random_offset)); 903 random_offset = (random_offset & 0x7FFFFFFF) % randompid; 904 } 905 proc_makepid(p, random_offset); 906 } 907 908 /* 909 * Calculate a new process pid. This function is integrated into 910 * proc_add_allproc() to guarentee that the new pid is not reused before 911 * the new process can be added to the allproc list. 912 * 913 * p_pid is assigned and the process is added to the allproc hash table 914 */ 915 static 916 void 917 proc_makepid(struct proc *p, int random_offset) 918 { 919 static pid_t nextpid; /* heuristic, allowed to race */ 920 struct pgrp *pg; 921 struct proc *ps; 922 struct session *sess; 923 pid_t base; 924 int n; 925 926 /* 927 * Calculate a hash index and find an unused process id within 928 * the table, looping if we cannot find one. 929 */ 930 if (random_offset) 931 atomic_add_int(&nextpid, random_offset); 932 retry: 933 base = atomic_fetchadd_int(&nextpid, 1) + 1; 934 if (base >= PID_MAX) { 935 base = base % PID_MAX; 936 if (base < 100) 937 base += 100; 938 } 939 n = ALLPROC_HASH(base); 940 lwkt_gettoken(&proc_tokens[n]); 941 942 LIST_FOREACH(ps, &allprocs[n], p_list) { 943 if (ps->p_pid == base) { 944 base += ALLPROC_HSIZE; 945 if (base >= PID_MAX) { 946 lwkt_reltoken(&proc_tokens[n]); 947 goto retry; 948 } 949 } 950 } 951 LIST_FOREACH(pg, &allpgrps[n], pg_list) { 952 if (pg->pg_id == base) { 953 base += ALLPROC_HSIZE; 954 if (base >= PID_MAX) { 955 lwkt_reltoken(&proc_tokens[n]); 956 goto retry; 957 } 958 } 959 } 960 LIST_FOREACH(sess, &allsessn[n], s_list) { 961 if (sess->s_sid == base) { 962 base += ALLPROC_HSIZE; 963 if (base >= PID_MAX) { 964 lwkt_reltoken(&proc_tokens[n]); 965 goto retry; 966 } 967 } 968 } 969 970 /* 971 * Assign the pid and insert the process. 972 */ 973 p->p_pid = base; 974 LIST_INSERT_HEAD(&allprocs[n], p, p_list); 975 lwkt_reltoken(&proc_tokens[n]); 976 } 977 978 /* 979 * Called from exit1 to place the process into a zombie state. 980 * The process is removed from the pid hash and p_stat is set 981 * to SZOMB. Normal pfind[n]() calls will not find it any more. 982 * 983 * Caller must hold p->p_token. We are required to wait until p_lock 984 * becomes zero before we can manipulate the list, allowing allproc 985 * scans to guarantee consistency during a list scan. 986 */ 987 void 988 proc_move_allproc_zombie(struct proc *p) 989 { 990 int n; 991 992 n = ALLPROC_HASH(p->p_pid); 993 PSTALL(p, "reap1", 0); 994 lwkt_gettoken(&proc_tokens[n]); 995 996 PSTALL(p, "reap1a", 0); 997 p->p_stat = SZOMB; 998 999 lwkt_reltoken(&proc_tokens[n]); 1000 dsched_exit_proc(p); 1001 } 1002 1003 /* 1004 * This routine is called from kern_wait() and will remove the process 1005 * from the zombie list and the sibling list. This routine will block 1006 * if someone has a lock on the proces (p_lock). 1007 * 1008 * Caller must hold p->p_token. We are required to wait until p_lock 1009 * becomes zero before we can manipulate the list, allowing allproc 1010 * scans to guarantee consistency during a list scan. 1011 */ 1012 void 1013 proc_remove_zombie(struct proc *p) 1014 { 1015 int n; 1016 1017 n = ALLPROC_HASH(p->p_pid); 1018 1019 PSTALL(p, "reap2", 0); 1020 lwkt_gettoken(&proc_tokens[n]); 1021 PSTALL(p, "reap2a", 0); 1022 LIST_REMOVE(p, p_list); /* from remove master list */ 1023 LIST_REMOVE(p, p_sibling); /* and from sibling list */ 1024 p->p_pptr = NULL; 1025 lwkt_reltoken(&proc_tokens[n]); 1026 } 1027 1028 /* 1029 * Handle various requirements prior to returning to usermode. Called from 1030 * platform trap and system call code. 1031 */ 1032 void 1033 lwpuserret(struct lwp *lp) 1034 { 1035 struct proc *p = lp->lwp_proc; 1036 1037 if (lp->lwp_mpflags & LWP_MP_VNLRU) { 1038 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_VNLRU); 1039 allocvnode_gc(); 1040 } 1041 if (lp->lwp_mpflags & LWP_MP_WEXIT) { 1042 lwkt_gettoken(&p->p_token); 1043 lwp_exit(0, NULL); 1044 lwkt_reltoken(&p->p_token); /* NOT REACHED */ 1045 } 1046 } 1047 1048 /* 1049 * Kernel threads run from user processes can also accumulate deferred 1050 * actions which need to be acted upon. Callers include: 1051 * 1052 * nfsd - Can allocate lots of vnodes 1053 */ 1054 void 1055 lwpkthreaddeferred(void) 1056 { 1057 struct lwp *lp = curthread->td_lwp; 1058 1059 if (lp) { 1060 if (lp->lwp_mpflags & LWP_MP_VNLRU) { 1061 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_VNLRU); 1062 allocvnode_gc(); 1063 } 1064 } 1065 } 1066 1067 /* 1068 * Scan all processes on the allproc list. The process is automatically 1069 * held for the callback. A return value of -1 terminates the loop. 1070 * Zombie procs are skipped. 1071 * 1072 * The callback is made with the process held and proc_token held. 1073 * 1074 * We limit the scan to the number of processes as-of the start of 1075 * the scan so as not to get caught up in an endless loop if new processes 1076 * are created more quickly than we can scan the old ones. Add a little 1077 * slop to try to catch edge cases since nprocs can race. 1078 * 1079 * No requirements. 1080 */ 1081 void 1082 allproc_scan(int (*callback)(struct proc *, void *), void *data) 1083 { 1084 int limit = nprocs + ncpus; 1085 struct proc *p; 1086 int r; 1087 int n; 1088 1089 /* 1090 * proc_tokens[n] protects the allproc list and PHOLD() prevents the 1091 * process from being removed from the allproc list or the zombproc 1092 * list. 1093 */ 1094 for (n = 0; n < ALLPROC_HSIZE; ++n) { 1095 if (LIST_FIRST(&allprocs[n]) == NULL) 1096 continue; 1097 lwkt_gettoken(&proc_tokens[n]); 1098 LIST_FOREACH(p, &allprocs[n], p_list) { 1099 if (p->p_stat == SZOMB) 1100 continue; 1101 PHOLD(p); 1102 r = callback(p, data); 1103 PRELE(p); 1104 if (r < 0) 1105 break; 1106 if (--limit < 0) 1107 break; 1108 } 1109 lwkt_reltoken(&proc_tokens[n]); 1110 1111 /* 1112 * Check if asked to stop early 1113 */ 1114 if (p) 1115 break; 1116 } 1117 } 1118 1119 /* 1120 * Scan all lwps of processes on the allproc list. The lwp is automatically 1121 * held for the callback. A return value of -1 terminates the loop. 1122 * 1123 * The callback is made with the proces and lwp both held, and proc_token held. 1124 * 1125 * No requirements. 1126 */ 1127 void 1128 alllwp_scan(int (*callback)(struct lwp *, void *), void *data) 1129 { 1130 struct proc *p; 1131 struct lwp *lp; 1132 int r = 0; 1133 int n; 1134 1135 for (n = 0; n < ALLPROC_HSIZE; ++n) { 1136 if (LIST_FIRST(&allprocs[n]) == NULL) 1137 continue; 1138 lwkt_gettoken(&proc_tokens[n]); 1139 LIST_FOREACH(p, &allprocs[n], p_list) { 1140 if (p->p_stat == SZOMB) 1141 continue; 1142 PHOLD(p); 1143 lwkt_gettoken(&p->p_token); 1144 FOREACH_LWP_IN_PROC(lp, p) { 1145 LWPHOLD(lp); 1146 r = callback(lp, data); 1147 LWPRELE(lp); 1148 } 1149 lwkt_reltoken(&p->p_token); 1150 PRELE(p); 1151 if (r < 0) 1152 break; 1153 } 1154 lwkt_reltoken(&proc_tokens[n]); 1155 1156 /* 1157 * Asked to exit early 1158 */ 1159 if (p) 1160 break; 1161 } 1162 } 1163 1164 /* 1165 * Scan all processes on the zombproc list. The process is automatically 1166 * held for the callback. A return value of -1 terminates the loop. 1167 * 1168 * No requirements. 1169 * The callback is made with the proces held and proc_token held. 1170 */ 1171 void 1172 zombproc_scan(int (*callback)(struct proc *, void *), void *data) 1173 { 1174 struct proc *p; 1175 int r; 1176 int n; 1177 1178 /* 1179 * proc_tokens[n] protects the allproc list and PHOLD() prevents the 1180 * process from being removed from the allproc list or the zombproc 1181 * list. 1182 */ 1183 for (n = 0; n < ALLPROC_HSIZE; ++n) { 1184 if (LIST_FIRST(&allprocs[n]) == NULL) 1185 continue; 1186 lwkt_gettoken(&proc_tokens[n]); 1187 LIST_FOREACH(p, &allprocs[n], p_list) { 1188 if (p->p_stat != SZOMB) 1189 continue; 1190 PHOLD(p); 1191 r = callback(p, data); 1192 PRELE(p); 1193 if (r < 0) 1194 break; 1195 } 1196 lwkt_reltoken(&proc_tokens[n]); 1197 1198 /* 1199 * Check if asked to stop early 1200 */ 1201 if (p) 1202 break; 1203 } 1204 } 1205 1206 #include "opt_ddb.h" 1207 #ifdef DDB 1208 #include <ddb/ddb.h> 1209 1210 /* 1211 * Debugging only 1212 */ 1213 DB_SHOW_COMMAND(pgrpdump, pgrpdump) 1214 { 1215 struct pgrp *pgrp; 1216 struct proc *p; 1217 int i; 1218 1219 for (i = 0; i < ALLPROC_HSIZE; ++i) { 1220 if (LIST_EMPTY(&allpgrps[i])) 1221 continue; 1222 kprintf("\tindx %d\n", i); 1223 LIST_FOREACH(pgrp, &allpgrps[i], pg_list) { 1224 kprintf("\tpgrp %p, pgid %ld, sess %p, " 1225 "sesscnt %d, mem %p\n", 1226 (void *)pgrp, (long)pgrp->pg_id, 1227 (void *)pgrp->pg_session, 1228 pgrp->pg_session->s_count, 1229 (void *)LIST_FIRST(&pgrp->pg_members)); 1230 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 1231 kprintf("\t\tpid %ld addr %p pgrp %p\n", 1232 (long)p->p_pid, (void *)p, 1233 (void *)p->p_pgrp); 1234 } 1235 } 1236 } 1237 } 1238 #endif /* DDB */ 1239 1240 /* 1241 * The caller must hold proc_token. 1242 */ 1243 static int 1244 sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags) 1245 { 1246 struct kinfo_proc ki; 1247 struct lwp *lp; 1248 int skp = 0, had_output = 0; 1249 int error; 1250 1251 bzero(&ki, sizeof(ki)); 1252 lwkt_gettoken_shared(&p->p_token); 1253 fill_kinfo_proc(p, &ki); 1254 if ((flags & KERN_PROC_FLAG_LWP) == 0) 1255 skp = 1; 1256 error = 0; 1257 FOREACH_LWP_IN_PROC(lp, p) { 1258 LWPHOLD(lp); 1259 fill_kinfo_lwp(lp, &ki.kp_lwp); 1260 had_output = 1; 1261 error = SYSCTL_OUT(req, &ki, sizeof(ki)); 1262 LWPRELE(lp); 1263 if (error) 1264 break; 1265 if (skp) 1266 break; 1267 } 1268 lwkt_reltoken(&p->p_token); 1269 /* We need to output at least the proc, even if there is no lwp. */ 1270 if (had_output == 0) { 1271 error = SYSCTL_OUT(req, &ki, sizeof(ki)); 1272 } 1273 return (error); 1274 } 1275 1276 /* 1277 * The caller must hold proc_token. 1278 */ 1279 static int 1280 sysctl_out_proc_kthread(struct thread *td, struct sysctl_req *req) 1281 { 1282 struct kinfo_proc ki; 1283 int error; 1284 1285 fill_kinfo_proc_kthread(td, &ki); 1286 error = SYSCTL_OUT(req, &ki, sizeof(ki)); 1287 if (error) 1288 return error; 1289 return(0); 1290 } 1291 1292 /* 1293 * No requirements. 1294 */ 1295 static int 1296 sysctl_kern_proc(SYSCTL_HANDLER_ARGS) 1297 { 1298 int *name = (int *)arg1; 1299 int oid = oidp->oid_number; 1300 u_int namelen = arg2; 1301 struct proc *p; 1302 struct thread *td; 1303 struct thread *marker; 1304 int flags = 0; 1305 int error = 0; 1306 int n; 1307 int origcpu; 1308 struct ucred *cr1 = curproc->p_ucred; 1309 1310 flags = oid & KERN_PROC_FLAGMASK; 1311 oid &= ~KERN_PROC_FLAGMASK; 1312 1313 if ((oid == KERN_PROC_ALL && namelen != 0) || 1314 (oid != KERN_PROC_ALL && namelen != 1)) { 1315 return (EINVAL); 1316 } 1317 1318 /* 1319 * proc_token protects the allproc list and PHOLD() prevents the 1320 * process from being removed from the allproc list or the zombproc 1321 * list. 1322 */ 1323 if (oid == KERN_PROC_PID) { 1324 p = pfind((pid_t)name[0]); 1325 if (p) { 1326 if (PRISON_CHECK(cr1, p->p_ucred)) 1327 error = sysctl_out_proc(p, req, flags); 1328 PRELE(p); 1329 } 1330 goto post_threads; 1331 } 1332 p = NULL; 1333 1334 if (!req->oldptr) { 1335 /* overestimate by 5 procs */ 1336 error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5); 1337 if (error) 1338 goto post_threads; 1339 } 1340 1341 for (n = 0; n < ALLPROC_HSIZE; ++n) { 1342 if (LIST_EMPTY(&allprocs[n])) 1343 continue; 1344 lwkt_gettoken_shared(&proc_tokens[n]); 1345 LIST_FOREACH(p, &allprocs[n], p_list) { 1346 /* 1347 * Show a user only their processes. 1348 */ 1349 if ((!ps_showallprocs) && p_trespass(cr1, p->p_ucred)) 1350 continue; 1351 /* 1352 * Skip embryonic processes. 1353 */ 1354 if (p->p_stat == SIDL) 1355 continue; 1356 /* 1357 * TODO - make more efficient (see notes below). 1358 * do by session. 1359 */ 1360 switch (oid) { 1361 case KERN_PROC_PGRP: 1362 /* could do this by traversing pgrp */ 1363 if (p->p_pgrp == NULL || 1364 p->p_pgrp->pg_id != (pid_t)name[0]) 1365 continue; 1366 break; 1367 1368 case KERN_PROC_TTY: 1369 if ((p->p_flags & P_CONTROLT) == 0 || 1370 p->p_session == NULL || 1371 p->p_session->s_ttyp == NULL || 1372 dev2udev(p->p_session->s_ttyp->t_dev) != 1373 (udev_t)name[0]) 1374 continue; 1375 break; 1376 1377 case KERN_PROC_UID: 1378 if (p->p_ucred == NULL || 1379 p->p_ucred->cr_uid != (uid_t)name[0]) 1380 continue; 1381 break; 1382 1383 case KERN_PROC_RUID: 1384 if (p->p_ucred == NULL || 1385 p->p_ucred->cr_ruid != (uid_t)name[0]) 1386 continue; 1387 break; 1388 } 1389 1390 if (!PRISON_CHECK(cr1, p->p_ucred)) 1391 continue; 1392 PHOLD(p); 1393 error = sysctl_out_proc(p, req, flags); 1394 PRELE(p); 1395 if (error) { 1396 lwkt_reltoken(&proc_tokens[n]); 1397 goto post_threads; 1398 } 1399 } 1400 lwkt_reltoken(&proc_tokens[n]); 1401 } 1402 1403 /* 1404 * Iterate over all active cpus and scan their thread list. Start 1405 * with the next logical cpu and end with our original cpu. We 1406 * migrate our own thread to each target cpu in order to safely scan 1407 * its thread list. In the last loop we migrate back to our original 1408 * cpu. 1409 */ 1410 origcpu = mycpu->gd_cpuid; 1411 if (!ps_showallthreads || jailed(cr1)) 1412 goto post_threads; 1413 1414 marker = kmalloc(sizeof(struct thread), M_TEMP, M_WAITOK|M_ZERO); 1415 marker->td_flags = TDF_MARKER; 1416 error = 0; 1417 1418 for (n = 1; n <= ncpus; ++n) { 1419 globaldata_t rgd; 1420 int nid; 1421 1422 nid = (origcpu + n) % ncpus; 1423 if ((smp_active_mask & CPUMASK(nid)) == 0) 1424 continue; 1425 rgd = globaldata_find(nid); 1426 lwkt_setcpu_self(rgd); 1427 1428 crit_enter(); 1429 TAILQ_INSERT_TAIL(&rgd->gd_tdallq, marker, td_allq); 1430 1431 while ((td = TAILQ_PREV(marker, lwkt_queue, td_allq)) != NULL) { 1432 TAILQ_REMOVE(&rgd->gd_tdallq, marker, td_allq); 1433 TAILQ_INSERT_BEFORE(td, marker, td_allq); 1434 if (td->td_flags & TDF_MARKER) 1435 continue; 1436 if (td->td_proc) 1437 continue; 1438 1439 lwkt_hold(td); 1440 crit_exit(); 1441 1442 switch (oid) { 1443 case KERN_PROC_PGRP: 1444 case KERN_PROC_TTY: 1445 case KERN_PROC_UID: 1446 case KERN_PROC_RUID: 1447 break; 1448 default: 1449 error = sysctl_out_proc_kthread(td, req); 1450 break; 1451 } 1452 lwkt_rele(td); 1453 crit_enter(); 1454 if (error) 1455 break; 1456 } 1457 TAILQ_REMOVE(&rgd->gd_tdallq, marker, td_allq); 1458 crit_exit(); 1459 1460 if (error) 1461 break; 1462 } 1463 1464 /* 1465 * Userland scheduler expects us to return on the same cpu we 1466 * started on. 1467 */ 1468 if (mycpu->gd_cpuid != origcpu) 1469 lwkt_setcpu_self(globaldata_find(origcpu)); 1470 1471 kfree(marker, M_TEMP); 1472 1473 post_threads: 1474 return (error); 1475 } 1476 1477 /* 1478 * This sysctl allows a process to retrieve the argument list or process 1479 * title for another process without groping around in the address space 1480 * of the other process. It also allow a process to set its own "process 1481 * title to a string of its own choice. 1482 * 1483 * No requirements. 1484 */ 1485 static int 1486 sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS) 1487 { 1488 int *name = (int*) arg1; 1489 u_int namelen = arg2; 1490 struct proc *p; 1491 struct pargs *opa; 1492 struct pargs *pa; 1493 int error = 0; 1494 struct ucred *cr1 = curproc->p_ucred; 1495 1496 if (namelen != 1) 1497 return (EINVAL); 1498 1499 p = pfind((pid_t)name[0]); 1500 if (p == NULL) 1501 goto done; 1502 lwkt_gettoken(&p->p_token); 1503 1504 if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred)) 1505 goto done; 1506 1507 if (req->newptr && curproc != p) { 1508 error = EPERM; 1509 goto done; 1510 } 1511 if (req->oldptr && (pa = p->p_args) != NULL) { 1512 refcount_acquire(&pa->ar_ref); 1513 error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length); 1514 if (refcount_release(&pa->ar_ref)) 1515 kfree(pa, M_PARGS); 1516 } 1517 if (req->newptr == NULL) 1518 goto done; 1519 1520 if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit) { 1521 goto done; 1522 } 1523 1524 pa = kmalloc(sizeof(struct pargs) + req->newlen, M_PARGS, M_WAITOK); 1525 refcount_init(&pa->ar_ref, 1); 1526 pa->ar_length = req->newlen; 1527 error = SYSCTL_IN(req, pa->ar_args, req->newlen); 1528 if (error) { 1529 kfree(pa, M_PARGS); 1530 goto done; 1531 } 1532 1533 1534 /* 1535 * Replace p_args with the new pa. p_args may have previously 1536 * been NULL. 1537 */ 1538 opa = p->p_args; 1539 p->p_args = pa; 1540 1541 if (opa) { 1542 KKASSERT(opa->ar_ref > 0); 1543 if (refcount_release(&opa->ar_ref)) { 1544 kfree(opa, M_PARGS); 1545 /* opa = NULL; */ 1546 } 1547 } 1548 done: 1549 if (p) { 1550 lwkt_reltoken(&p->p_token); 1551 PRELE(p); 1552 } 1553 return (error); 1554 } 1555 1556 static int 1557 sysctl_kern_proc_cwd(SYSCTL_HANDLER_ARGS) 1558 { 1559 int *name = (int*) arg1; 1560 u_int namelen = arg2; 1561 struct proc *p; 1562 int error = 0; 1563 char *fullpath, *freepath; 1564 struct ucred *cr1 = curproc->p_ucred; 1565 1566 if (namelen != 1) 1567 return (EINVAL); 1568 1569 p = pfind((pid_t)name[0]); 1570 if (p == NULL) 1571 goto done; 1572 lwkt_gettoken_shared(&p->p_token); 1573 1574 /* 1575 * If we are not allowed to see other args, we certainly shouldn't 1576 * get the cwd either. Also check the usual trespassing. 1577 */ 1578 if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred)) 1579 goto done; 1580 1581 if (req->oldptr && p->p_fd != NULL && p->p_fd->fd_ncdir.ncp) { 1582 struct nchandle nch; 1583 1584 cache_copy(&p->p_fd->fd_ncdir, &nch); 1585 error = cache_fullpath(p, &nch, NULL, 1586 &fullpath, &freepath, 0); 1587 cache_drop(&nch); 1588 if (error) 1589 goto done; 1590 error = SYSCTL_OUT(req, fullpath, strlen(fullpath) + 1); 1591 kfree(freepath, M_TEMP); 1592 } 1593 1594 done: 1595 if (p) { 1596 lwkt_reltoken(&p->p_token); 1597 PRELE(p); 1598 } 1599 return (error); 1600 } 1601 1602 /* 1603 * This sysctl allows a process to retrieve the path of the executable for 1604 * itself or another process. 1605 */ 1606 static int 1607 sysctl_kern_proc_pathname(SYSCTL_HANDLER_ARGS) 1608 { 1609 pid_t *pidp = (pid_t *)arg1; 1610 unsigned int arglen = arg2; 1611 struct proc *p; 1612 struct vnode *vp; 1613 char *retbuf, *freebuf; 1614 int error; 1615 1616 if (arglen != 1) 1617 return (EINVAL); 1618 if (*pidp == -1) { /* -1 means this process */ 1619 p = curproc; 1620 } else { 1621 p = pfind(*pidp); 1622 if (p == NULL) 1623 return (ESRCH); 1624 } 1625 1626 vp = p->p_textvp; 1627 if (vp == NULL) { 1628 return (0); 1629 } 1630 vref(vp); 1631 error = vn_fullpath(p, vp, &retbuf, &freebuf, 0); 1632 vrele(vp); 1633 if (error) 1634 return (error); 1635 error = SYSCTL_OUT(req, retbuf, strlen(retbuf) + 1); 1636 kfree(freebuf, M_TEMP); 1637 return (error); 1638 } 1639 1640 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD, 0, "Process table"); 1641 1642 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT, 1643 0, 0, sysctl_kern_proc, "S,proc", "Return entire process table"); 1644 1645 SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD, 1646 sysctl_kern_proc, "Process table"); 1647 1648 SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD, 1649 sysctl_kern_proc, "Process table"); 1650 1651 SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD, 1652 sysctl_kern_proc, "Process table"); 1653 1654 SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD, 1655 sysctl_kern_proc, "Process table"); 1656 1657 SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD, 1658 sysctl_kern_proc, "Process table"); 1659 1660 SYSCTL_NODE(_kern_proc, (KERN_PROC_ALL | KERN_PROC_FLAG_LWP), all_lwp, CTLFLAG_RD, 1661 sysctl_kern_proc, "Process table"); 1662 1663 SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_FLAG_LWP), pgrp_lwp, CTLFLAG_RD, 1664 sysctl_kern_proc, "Process table"); 1665 1666 SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_FLAG_LWP), tty_lwp, CTLFLAG_RD, 1667 sysctl_kern_proc, "Process table"); 1668 1669 SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_FLAG_LWP), uid_lwp, CTLFLAG_RD, 1670 sysctl_kern_proc, "Process table"); 1671 1672 SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_FLAG_LWP), ruid_lwp, CTLFLAG_RD, 1673 sysctl_kern_proc, "Process table"); 1674 1675 SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_FLAG_LWP), pid_lwp, CTLFLAG_RD, 1676 sysctl_kern_proc, "Process table"); 1677 1678 SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, CTLFLAG_RW | CTLFLAG_ANYBODY, 1679 sysctl_kern_proc_args, "Process argument list"); 1680 1681 SYSCTL_NODE(_kern_proc, KERN_PROC_CWD, cwd, CTLFLAG_RD | CTLFLAG_ANYBODY, 1682 sysctl_kern_proc_cwd, "Process argument list"); 1683 1684 static SYSCTL_NODE(_kern_proc, KERN_PROC_PATHNAME, pathname, CTLFLAG_RD, 1685 sysctl_kern_proc_pathname, "Process executable path"); 1686