1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/sysctl.h> 34 #include <sys/malloc.h> 35 #include <sys/proc.h> 36 #include <sys/vnode.h> 37 #include <sys/jail.h> 38 #include <sys/filedesc.h> 39 #include <sys/tty.h> 40 #include <sys/dsched.h> 41 #include <sys/signalvar.h> 42 #include <sys/spinlock.h> 43 #include <sys/random.h> 44 #include <sys/exec.h> 45 #include <vm/vm.h> 46 #include <sys/lock.h> 47 #include <sys/kinfo.h> 48 #include <vm/pmap.h> 49 #include <vm/vm_map.h> 50 #include <machine/smp.h> 51 52 #include <sys/refcount.h> 53 #include <sys/spinlock2.h> 54 55 /* 56 * Hash table size must be a power of two and is not currently dynamically 57 * sized. There is a trade-off between the linear scans which must iterate 58 * all HSIZE elements and the number of elements which might accumulate 59 * within each hash chain. 60 */ 61 #define ALLPROC_HSIZE 256 62 #define ALLPROC_HMASK (ALLPROC_HSIZE - 1) 63 #define ALLPROC_HASH(pid) (pid & ALLPROC_HMASK) 64 #define PGRP_HASH(pid) (pid & ALLPROC_HMASK) 65 #define SESS_HASH(pid) (pid & ALLPROC_HMASK) 66 67 /* 68 * pid_doms[] management, used to control how quickly a PID can be recycled. 69 * Must be a multiple of ALLPROC_HSIZE for the proc_makepid() inner loops. 70 * 71 * WARNING! PIDDOM_DELAY should not be defined > 20 or so unless you change 72 * the array from int8_t's to int16_t's. 73 */ 74 #define PIDDOM_COUNT 10 /* 10 pids per domain - reduce array size */ 75 #define PIDDOM_DELAY 10 /* min 10 seconds after exit before reuse */ 76 #define PIDDOM_SCALE 10 /* (10,000*SCALE)/sec performance guarantee */ 77 #define PIDSEL_DOMAINS rounddown(PID_MAX * PIDDOM_SCALE / PIDDOM_COUNT, ALLPROC_HSIZE) 78 79 /* Used by libkvm */ 80 int allproc_hsize = ALLPROC_HSIZE; 81 82 LIST_HEAD(pidhashhead, proc); 83 84 static MALLOC_DEFINE(M_PGRP, "pgrp", "process group header"); 85 MALLOC_DEFINE(M_SESSION, "session", "session header"); 86 MALLOC_DEFINE(M_PROC, "proc", "Proc structures"); 87 MALLOC_DEFINE(M_LWP, "lwp", "lwp structures"); 88 MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures"); 89 90 int ps_showallprocs = 1; 91 static int ps_showallthreads = 1; 92 SYSCTL_INT(_security, OID_AUTO, ps_showallprocs, CTLFLAG_RW, 93 &ps_showallprocs, 0, 94 "Unprivileged processes can see processes with different UID/GID"); 95 SYSCTL_INT(_security, OID_AUTO, ps_showallthreads, CTLFLAG_RW, 96 &ps_showallthreads, 0, 97 "Unprivileged processes can see kernel threads"); 98 static u_int pid_domain_skips; 99 SYSCTL_UINT(_kern, OID_AUTO, pid_domain_skips, CTLFLAG_RW, 100 &pid_domain_skips, 0, 101 "Number of pid_doms[] skipped"); 102 static u_int pid_inner_skips; 103 SYSCTL_UINT(_kern, OID_AUTO, pid_inner_skips, CTLFLAG_RW, 104 &pid_inner_skips, 0, 105 "Number of pid_doms[] skipped"); 106 107 static void orphanpg(struct pgrp *pg); 108 static void proc_makepid(struct proc *p, int random_offset); 109 110 /* 111 * Process related lists (for proc_token, allproc, allpgrp, and allsess) 112 */ 113 typedef struct procglob procglob_t; 114 115 static procglob_t procglob[ALLPROC_HSIZE]; 116 117 /* 118 * We try our best to avoid recycling a PID too quickly. We do this by 119 * storing (uint8_t)time_second in the related pid domain on-reap and then 120 * using that to skip-over the domain on-allocate. 121 * 122 * This array has to be fairly large to support a high fork/exec rate. 123 * A ~100,000 entry array will support a 10-second reuse latency at 124 * 10,000 execs/second, worst case. Best-case multiply by PIDDOM_COUNT 125 * (approximately 100,000 execs/second). 126 * 127 * Currently we allocate around a megabyte, making the worst-case fork 128 * rate around 100,000/second. 129 */ 130 static uint8_t *pid_doms; 131 132 /* 133 * Random component to nextpid generation. We mix in a random factor to make 134 * it a little harder to predict. We sanity check the modulus value to avoid 135 * doing it in critical paths. Don't let it be too small or we pointlessly 136 * waste randomness entropy, and don't let it be impossibly large. Using a 137 * modulus that is too big causes a LOT more process table scans and slows 138 * down fork processing as the pidchecked caching is defeated. 139 */ 140 static int randompid = 0; 141 142 static __inline 143 struct ucred * 144 pcredcache(struct ucred *cr, struct proc *p) 145 { 146 if (cr != p->p_ucred) { 147 if (cr) 148 crfree(cr); 149 spin_lock(&p->p_spin); 150 if ((cr = p->p_ucred) != NULL) 151 crhold(cr); 152 spin_unlock(&p->p_spin); 153 } 154 return cr; 155 } 156 157 /* 158 * No requirements. 159 */ 160 static int 161 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS) 162 { 163 int error, pid; 164 165 pid = randompid; 166 error = sysctl_handle_int(oidp, &pid, 0, req); 167 if (error || !req->newptr) 168 return (error); 169 if (pid < 0 || pid > PID_MAX - 100) /* out of range */ 170 pid = PID_MAX - 100; 171 else if (pid < 2) /* NOP */ 172 pid = 0; 173 else if (pid < 100) /* Make it reasonable */ 174 pid = 100; 175 randompid = pid; 176 return (error); 177 } 178 179 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW, 180 0, 0, sysctl_kern_randompid, "I", "Random PID modulus"); 181 182 /* 183 * Initialize global process hashing structures. 184 * 185 * These functions are ONLY called from the low level boot code and do 186 * not lock their operations. 187 */ 188 void 189 procinit(void) 190 { 191 u_long i; 192 193 /* 194 * Allocate dynamically. This array can be large (~1MB) so don't 195 * waste boot loader space. 196 */ 197 pid_doms = kmalloc(sizeof(pid_doms[0]) * PIDSEL_DOMAINS, 198 M_PROC, M_WAITOK | M_ZERO); 199 200 /* 201 * Avoid unnecessary stalls due to pid_doms[] values all being 202 * the same. Make sure that the allocation of pid 1 and pid 2 203 * succeeds. 204 */ 205 for (i = 0; i < PIDSEL_DOMAINS; ++i) 206 pid_doms[i] = (int8_t)i - (int8_t)(PIDDOM_DELAY + 1); 207 208 /* 209 * Other misc init. 210 */ 211 for (i = 0; i < ALLPROC_HSIZE; ++i) { 212 procglob_t *prg = &procglob[i]; 213 LIST_INIT(&prg->allproc); 214 LIST_INIT(&prg->allsess); 215 LIST_INIT(&prg->allpgrp); 216 lwkt_token_init(&prg->proc_token, "allproc"); 217 } 218 uihashinit(); 219 } 220 221 void 222 procinsertinit(struct proc *p) 223 { 224 LIST_INSERT_HEAD(&procglob[ALLPROC_HASH(p->p_pid)].allproc, 225 p, p_list); 226 } 227 228 void 229 pgrpinsertinit(struct pgrp *pg) 230 { 231 LIST_INSERT_HEAD(&procglob[ALLPROC_HASH(pg->pg_id)].allpgrp, 232 pg, pg_list); 233 } 234 235 void 236 sessinsertinit(struct session *sess) 237 { 238 LIST_INSERT_HEAD(&procglob[ALLPROC_HASH(sess->s_sid)].allsess, 239 sess, s_list); 240 } 241 242 /* 243 * Process hold/release support functions. Called via the PHOLD(), 244 * PRELE(), and PSTALL() macros. 245 * 246 * p->p_lock is a simple hold count with a waiting interlock. No wakeup() 247 * is issued unless someone is actually waiting for the process. 248 * 249 * Most holds are short-term, allowing a process scan or other similar 250 * operation to access a proc structure without it getting ripped out from 251 * under us. procfs and process-list sysctl ops also use the hold function 252 * interlocked with various p_flags to keep the vmspace intact when reading 253 * or writing a user process's address space. 254 * 255 * There are two situations where a hold count can be longer. Exiting lwps 256 * hold the process until the lwp is reaped, and the parent will hold the 257 * child during vfork()/exec() sequences while the child is marked P_PPWAIT. 258 * 259 * The kernel waits for the hold count to drop to 0 (or 1 in some cases) at 260 * various critical points in the fork/exec and exit paths before proceeding. 261 */ 262 #define PLOCK_ZOMB 0x20000000 263 #define PLOCK_WAITING 0x40000000 264 #define PLOCK_MASK 0x1FFFFFFF 265 266 void 267 pstall(struct proc *p, const char *wmesg, int count) 268 { 269 int o; 270 int n; 271 272 for (;;) { 273 o = p->p_lock; 274 cpu_ccfence(); 275 if ((o & PLOCK_MASK) <= count) 276 break; 277 n = o | PLOCK_WAITING; 278 tsleep_interlock(&p->p_lock, 0); 279 280 /* 281 * If someone is trying to single-step the process during 282 * an exec or an exit they can deadlock us because procfs 283 * sleeps with the process held. 284 */ 285 if (p->p_stops) { 286 if (p->p_flags & P_INEXEC) { 287 wakeup(&p->p_stype); 288 } else if (p->p_flags & P_POSTEXIT) { 289 spin_lock(&p->p_spin); 290 p->p_stops = 0; 291 p->p_step = 0; 292 spin_unlock(&p->p_spin); 293 wakeup(&p->p_stype); 294 } 295 } 296 297 if (atomic_cmpset_int(&p->p_lock, o, n)) { 298 tsleep(&p->p_lock, PINTERLOCKED, wmesg, 0); 299 } 300 } 301 } 302 303 void 304 phold(struct proc *p) 305 { 306 atomic_add_int(&p->p_lock, 1); 307 } 308 309 /* 310 * WARNING! On last release (p) can become instantly invalid due to 311 * MP races. 312 */ 313 void 314 prele(struct proc *p) 315 { 316 int o; 317 int n; 318 319 /* 320 * Fast path 321 */ 322 if (atomic_cmpset_int(&p->p_lock, 1, 0)) 323 return; 324 325 /* 326 * Slow path 327 */ 328 for (;;) { 329 o = p->p_lock; 330 KKASSERT((o & PLOCK_MASK) > 0); 331 cpu_ccfence(); 332 n = (o - 1) & ~PLOCK_WAITING; 333 if (atomic_cmpset_int(&p->p_lock, o, n)) { 334 if (o & PLOCK_WAITING) 335 wakeup(&p->p_lock); 336 break; 337 } 338 } 339 } 340 341 /* 342 * Hold and flag serialized for zombie reaping purposes. 343 * 344 * This function will fail if it has to block, returning non-zero with 345 * neither the flag set or the hold count bumped. Note that (p) may 346 * not be valid in this case if the caller does not have some other 347 * reference on (p). 348 * 349 * This function does not block on other PHOLD()s, only on other 350 * PHOLDZOMB()s. 351 * 352 * Zero is returned on success. The hold count will be incremented and 353 * the serialization flag acquired. Note that serialization is only against 354 * other pholdzomb() calls, not against phold() calls. 355 */ 356 int 357 pholdzomb(struct proc *p) 358 { 359 int o; 360 int n; 361 362 /* 363 * Fast path 364 */ 365 if (atomic_cmpset_int(&p->p_lock, 0, PLOCK_ZOMB | 1)) 366 return(0); 367 368 /* 369 * Slow path 370 */ 371 for (;;) { 372 o = p->p_lock; 373 cpu_ccfence(); 374 if ((o & PLOCK_ZOMB) == 0) { 375 n = (o + 1) | PLOCK_ZOMB; 376 if (atomic_cmpset_int(&p->p_lock, o, n)) 377 return(0); 378 } else { 379 KKASSERT((o & PLOCK_MASK) > 0); 380 n = o | PLOCK_WAITING; 381 tsleep_interlock(&p->p_lock, 0); 382 if (atomic_cmpset_int(&p->p_lock, o, n)) { 383 tsleep(&p->p_lock, PINTERLOCKED, "phldz", 0); 384 /* (p) can be ripped out at this point */ 385 return(1); 386 } 387 } 388 } 389 } 390 391 /* 392 * Release PLOCK_ZOMB and the hold count, waking up any waiters. 393 * 394 * WARNING! On last release (p) can become instantly invalid due to 395 * MP races. 396 */ 397 void 398 prelezomb(struct proc *p) 399 { 400 int o; 401 int n; 402 403 /* 404 * Fast path 405 */ 406 if (atomic_cmpset_int(&p->p_lock, PLOCK_ZOMB | 1, 0)) 407 return; 408 409 /* 410 * Slow path 411 */ 412 KKASSERT(p->p_lock & PLOCK_ZOMB); 413 for (;;) { 414 o = p->p_lock; 415 KKASSERT((o & PLOCK_MASK) > 0); 416 cpu_ccfence(); 417 n = (o - 1) & ~(PLOCK_ZOMB | PLOCK_WAITING); 418 if (atomic_cmpset_int(&p->p_lock, o, n)) { 419 if (o & PLOCK_WAITING) 420 wakeup(&p->p_lock); 421 break; 422 } 423 } 424 } 425 426 /* 427 * Is p an inferior of the current process? 428 * 429 * No requirements. 430 */ 431 int 432 inferior(struct proc *p) 433 { 434 struct proc *p2; 435 436 PHOLD(p); 437 lwkt_gettoken_shared(&p->p_token); 438 while (p != curproc) { 439 if (p->p_pid == 0) { 440 lwkt_reltoken(&p->p_token); 441 return (0); 442 } 443 p2 = p->p_pptr; 444 PHOLD(p2); 445 lwkt_reltoken(&p->p_token); 446 PRELE(p); 447 lwkt_gettoken_shared(&p2->p_token); 448 p = p2; 449 } 450 lwkt_reltoken(&p->p_token); 451 PRELE(p); 452 453 return (1); 454 } 455 456 /* 457 * Locate a process by number. The returned process will be referenced and 458 * must be released with PRELE(). 459 * 460 * No requirements. 461 */ 462 struct proc * 463 pfind(pid_t pid) 464 { 465 struct proc *p = curproc; 466 procglob_t *prg; 467 int n; 468 469 /* 470 * Shortcut the current process 471 */ 472 if (p && p->p_pid == pid) { 473 PHOLD(p); 474 return (p); 475 } 476 477 /* 478 * Otherwise find it in the hash table. 479 */ 480 n = ALLPROC_HASH(pid); 481 prg = &procglob[n]; 482 483 lwkt_gettoken_shared(&prg->proc_token); 484 LIST_FOREACH(p, &prg->allproc, p_list) { 485 if (p->p_stat == SZOMB) 486 continue; 487 if (p->p_pid == pid) { 488 PHOLD(p); 489 lwkt_reltoken(&prg->proc_token); 490 return (p); 491 } 492 } 493 lwkt_reltoken(&prg->proc_token); 494 495 return (NULL); 496 } 497 498 /* 499 * Locate a process by number. The returned process is NOT referenced. 500 * The result will not be stable and is typically only used to validate 501 * against a process that the caller has in-hand. 502 * 503 * No requirements. 504 */ 505 struct proc * 506 pfindn(pid_t pid) 507 { 508 struct proc *p = curproc; 509 procglob_t *prg; 510 int n; 511 512 /* 513 * Shortcut the current process 514 */ 515 if (p && p->p_pid == pid) 516 return (p); 517 518 /* 519 * Otherwise find it in the hash table. 520 */ 521 n = ALLPROC_HASH(pid); 522 prg = &procglob[n]; 523 524 lwkt_gettoken_shared(&prg->proc_token); 525 LIST_FOREACH(p, &prg->allproc, p_list) { 526 if (p->p_stat == SZOMB) 527 continue; 528 if (p->p_pid == pid) { 529 lwkt_reltoken(&prg->proc_token); 530 return (p); 531 } 532 } 533 lwkt_reltoken(&prg->proc_token); 534 535 return (NULL); 536 } 537 538 /* 539 * Locate a process on the zombie list. Return a process or NULL. 540 * The returned process will be referenced and the caller must release 541 * it with PRELE(). 542 * 543 * No other requirements. 544 */ 545 struct proc * 546 zpfind(pid_t pid) 547 { 548 struct proc *p = curproc; 549 procglob_t *prg; 550 int n; 551 552 /* 553 * Shortcut the current process 554 */ 555 if (p && p->p_pid == pid) { 556 PHOLD(p); 557 return (p); 558 } 559 560 /* 561 * Otherwise find it in the hash table. 562 */ 563 n = ALLPROC_HASH(pid); 564 prg = &procglob[n]; 565 566 lwkt_gettoken_shared(&prg->proc_token); 567 LIST_FOREACH(p, &prg->allproc, p_list) { 568 if (p->p_stat != SZOMB) 569 continue; 570 if (p->p_pid == pid) { 571 PHOLD(p); 572 lwkt_reltoken(&prg->proc_token); 573 return (p); 574 } 575 } 576 lwkt_reltoken(&prg->proc_token); 577 578 return (NULL); 579 } 580 581 582 void 583 pgref(struct pgrp *pgrp) 584 { 585 refcount_acquire(&pgrp->pg_refs); 586 } 587 588 void 589 pgrel(struct pgrp *pgrp) 590 { 591 procglob_t *prg; 592 int count; 593 int n; 594 595 n = PGRP_HASH(pgrp->pg_id); 596 prg = &procglob[n]; 597 598 for (;;) { 599 count = pgrp->pg_refs; 600 cpu_ccfence(); 601 KKASSERT(count > 0); 602 if (count == 1) { 603 lwkt_gettoken(&prg->proc_token); 604 if (atomic_cmpset_int(&pgrp->pg_refs, 1, 0)) 605 break; 606 lwkt_reltoken(&prg->proc_token); 607 /* retry */ 608 } else { 609 if (atomic_cmpset_int(&pgrp->pg_refs, count, count - 1)) 610 return; 611 /* retry */ 612 } 613 } 614 615 /* 616 * Successful 1->0 transition, pghash_spin is held. 617 */ 618 LIST_REMOVE(pgrp, pg_list); 619 if (pid_doms[pgrp->pg_id % PIDSEL_DOMAINS] != (uint8_t)time_second) 620 pid_doms[pgrp->pg_id % PIDSEL_DOMAINS] = (uint8_t)time_second; 621 622 /* 623 * Reset any sigio structures pointing to us as a result of 624 * F_SETOWN with our pgid. 625 */ 626 funsetownlst(&pgrp->pg_sigiolst); 627 628 if (pgrp->pg_session->s_ttyp != NULL && 629 pgrp->pg_session->s_ttyp->t_pgrp == pgrp) { 630 pgrp->pg_session->s_ttyp->t_pgrp = NULL; 631 } 632 lwkt_reltoken(&prg->proc_token); 633 634 sess_rele(pgrp->pg_session); 635 kfree(pgrp, M_PGRP); 636 } 637 638 /* 639 * Locate a process group by number. The returned process group will be 640 * referenced w/pgref() and must be released with pgrel() (or assigned 641 * somewhere if you wish to keep the reference). 642 * 643 * No requirements. 644 */ 645 struct pgrp * 646 pgfind(pid_t pgid) 647 { 648 struct pgrp *pgrp; 649 procglob_t *prg; 650 int n; 651 652 n = PGRP_HASH(pgid); 653 prg = &procglob[n]; 654 lwkt_gettoken_shared(&prg->proc_token); 655 656 LIST_FOREACH(pgrp, &prg->allpgrp, pg_list) { 657 if (pgrp->pg_id == pgid) { 658 refcount_acquire(&pgrp->pg_refs); 659 lwkt_reltoken(&prg->proc_token); 660 return (pgrp); 661 } 662 } 663 lwkt_reltoken(&prg->proc_token); 664 return (NULL); 665 } 666 667 /* 668 * Move p to a new or existing process group (and session) 669 * 670 * No requirements. 671 */ 672 int 673 enterpgrp(struct proc *p, pid_t pgid, int mksess) 674 { 675 struct pgrp *pgrp; 676 struct pgrp *opgrp; 677 int error; 678 679 pgrp = pgfind(pgid); 680 681 KASSERT(pgrp == NULL || !mksess, 682 ("enterpgrp: setsid into non-empty pgrp")); 683 KASSERT(!SESS_LEADER(p), 684 ("enterpgrp: session leader attempted setpgrp")); 685 686 if (pgrp == NULL) { 687 pid_t savepid = p->p_pid; 688 struct proc *np; 689 procglob_t *prg; 690 int n; 691 692 /* 693 * new process group 694 */ 695 KASSERT(p->p_pid == pgid, 696 ("enterpgrp: new pgrp and pid != pgid")); 697 pgrp = kmalloc(sizeof(struct pgrp), M_PGRP, M_WAITOK | M_ZERO); 698 pgrp->pg_id = pgid; 699 LIST_INIT(&pgrp->pg_members); 700 pgrp->pg_jobc = 0; 701 SLIST_INIT(&pgrp->pg_sigiolst); 702 lwkt_token_init(&pgrp->pg_token, "pgrp_token"); 703 refcount_init(&pgrp->pg_refs, 1); 704 lockinit(&pgrp->pg_lock, "pgwt", 0, 0); 705 706 n = PGRP_HASH(pgid); 707 prg = &procglob[n]; 708 709 if ((np = pfindn(savepid)) == NULL || np != p) { 710 lwkt_reltoken(&prg->proc_token); 711 error = ESRCH; 712 kfree(pgrp, M_PGRP); 713 goto fatal; 714 } 715 716 lwkt_gettoken(&prg->proc_token); 717 if (mksess) { 718 struct session *sess; 719 720 /* 721 * new session 722 */ 723 sess = kmalloc(sizeof(struct session), M_SESSION, 724 M_WAITOK | M_ZERO); 725 lwkt_gettoken(&p->p_token); 726 sess->s_prg = prg; 727 sess->s_leader = p; 728 sess->s_sid = p->p_pid; 729 sess->s_count = 1; 730 sess->s_ttyvp = NULL; 731 sess->s_ttyp = NULL; 732 bcopy(p->p_session->s_login, sess->s_login, 733 sizeof(sess->s_login)); 734 pgrp->pg_session = sess; 735 KASSERT(p == curproc, 736 ("enterpgrp: mksession and p != curproc")); 737 p->p_flags &= ~P_CONTROLT; 738 LIST_INSERT_HEAD(&prg->allsess, sess, s_list); 739 lwkt_reltoken(&p->p_token); 740 } else { 741 lwkt_gettoken(&p->p_token); 742 pgrp->pg_session = p->p_session; 743 sess_hold(pgrp->pg_session); 744 lwkt_reltoken(&p->p_token); 745 } 746 LIST_INSERT_HEAD(&prg->allpgrp, pgrp, pg_list); 747 748 lwkt_reltoken(&prg->proc_token); 749 } else if (pgrp == p->p_pgrp) { 750 pgrel(pgrp); 751 goto done; 752 } /* else pgfind() referenced the pgrp */ 753 754 lwkt_gettoken(&pgrp->pg_token); 755 lwkt_gettoken(&p->p_token); 756 757 /* 758 * Replace p->p_pgrp, handling any races that occur. 759 */ 760 while ((opgrp = p->p_pgrp) != NULL) { 761 pgref(opgrp); 762 lwkt_gettoken(&opgrp->pg_token); 763 if (opgrp != p->p_pgrp) { 764 lwkt_reltoken(&opgrp->pg_token); 765 pgrel(opgrp); 766 continue; 767 } 768 LIST_REMOVE(p, p_pglist); 769 break; 770 } 771 p->p_pgrp = pgrp; 772 LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist); 773 774 /* 775 * Adjust eligibility of affected pgrps to participate in job control. 776 * Increment eligibility counts before decrementing, otherwise we 777 * could reach 0 spuriously during the first call. 778 */ 779 fixjobc(p, pgrp, 1); 780 if (opgrp) { 781 fixjobc(p, opgrp, 0); 782 lwkt_reltoken(&opgrp->pg_token); 783 pgrel(opgrp); /* manual pgref */ 784 pgrel(opgrp); /* p->p_pgrp ref */ 785 } 786 lwkt_reltoken(&p->p_token); 787 lwkt_reltoken(&pgrp->pg_token); 788 done: 789 error = 0; 790 fatal: 791 return (error); 792 } 793 794 /* 795 * Remove process from process group 796 * 797 * No requirements. 798 */ 799 int 800 leavepgrp(struct proc *p) 801 { 802 struct pgrp *pg = p->p_pgrp; 803 804 lwkt_gettoken(&p->p_token); 805 while ((pg = p->p_pgrp) != NULL) { 806 pgref(pg); 807 lwkt_gettoken(&pg->pg_token); 808 if (p->p_pgrp != pg) { 809 lwkt_reltoken(&pg->pg_token); 810 pgrel(pg); 811 continue; 812 } 813 p->p_pgrp = NULL; 814 LIST_REMOVE(p, p_pglist); 815 lwkt_reltoken(&pg->pg_token); 816 pgrel(pg); /* manual pgref */ 817 pgrel(pg); /* p->p_pgrp ref */ 818 break; 819 } 820 lwkt_reltoken(&p->p_token); 821 822 return (0); 823 } 824 825 /* 826 * Adjust the ref count on a session structure. When the ref count falls to 827 * zero the tty is disassociated from the session and the session structure 828 * is freed. Note that tty assocation is not itself ref-counted. 829 * 830 * No requirements. 831 */ 832 void 833 sess_hold(struct session *sp) 834 { 835 atomic_add_int(&sp->s_count, 1); 836 } 837 838 /* 839 * No requirements. 840 */ 841 void 842 sess_rele(struct session *sess) 843 { 844 procglob_t *prg; 845 struct tty *tp; 846 int count; 847 int n; 848 849 n = SESS_HASH(sess->s_sid); 850 prg = &procglob[n]; 851 852 for (;;) { 853 count = sess->s_count; 854 cpu_ccfence(); 855 KKASSERT(count > 0); 856 if (count == 1) { 857 lwkt_gettoken(&prg->proc_token); 858 if (atomic_cmpset_int(&sess->s_count, 1, 0)) 859 break; 860 lwkt_reltoken(&prg->proc_token); 861 /* retry */ 862 } else { 863 if (atomic_cmpset_int(&sess->s_count, count, count - 1)) 864 return; 865 /* retry */ 866 } 867 } 868 869 /* 870 * Successful 1->0 transition and prg->proc_token is held. 871 */ 872 LIST_REMOVE(sess, s_list); 873 if (pid_doms[sess->s_sid % PIDSEL_DOMAINS] != (uint8_t)time_second) 874 pid_doms[sess->s_sid % PIDSEL_DOMAINS] = (uint8_t)time_second; 875 876 if (sess->s_ttyp && sess->s_ttyp->t_session) { 877 #ifdef TTY_DO_FULL_CLOSE 878 /* FULL CLOSE, see ttyclearsession() */ 879 KKASSERT(sess->s_ttyp->t_session == sess); 880 sess->s_ttyp->t_session = NULL; 881 #else 882 /* HALF CLOSE, see ttyclearsession() */ 883 if (sess->s_ttyp->t_session == sess) 884 sess->s_ttyp->t_session = NULL; 885 #endif 886 } 887 if ((tp = sess->s_ttyp) != NULL) { 888 sess->s_ttyp = NULL; 889 ttyunhold(tp); 890 } 891 lwkt_reltoken(&prg->proc_token); 892 893 kfree(sess, M_SESSION); 894 } 895 896 /* 897 * Adjust pgrp jobc counters when specified process changes process group. 898 * We count the number of processes in each process group that "qualify" 899 * the group for terminal job control (those with a parent in a different 900 * process group of the same session). If that count reaches zero, the 901 * process group becomes orphaned. Check both the specified process' 902 * process group and that of its children. 903 * entering == 0 => p is leaving specified group. 904 * entering == 1 => p is entering specified group. 905 * 906 * No requirements. 907 */ 908 void 909 fixjobc(struct proc *p, struct pgrp *pgrp, int entering) 910 { 911 struct pgrp *hispgrp; 912 struct session *mysession; 913 struct proc *np; 914 915 /* 916 * Check p's parent to see whether p qualifies its own process 917 * group; if so, adjust count for p's process group. 918 */ 919 lwkt_gettoken(&p->p_token); /* p_children scan */ 920 lwkt_gettoken(&pgrp->pg_token); 921 922 mysession = pgrp->pg_session; 923 if ((hispgrp = p->p_pptr->p_pgrp) != pgrp && 924 hispgrp->pg_session == mysession) { 925 if (entering) 926 pgrp->pg_jobc++; 927 else if (--pgrp->pg_jobc == 0) 928 orphanpg(pgrp); 929 } 930 931 /* 932 * Check this process' children to see whether they qualify 933 * their process groups; if so, adjust counts for children's 934 * process groups. 935 */ 936 LIST_FOREACH(np, &p->p_children, p_sibling) { 937 PHOLD(np); 938 lwkt_gettoken(&np->p_token); 939 if ((hispgrp = np->p_pgrp) != pgrp && 940 hispgrp->pg_session == mysession && 941 np->p_stat != SZOMB) { 942 pgref(hispgrp); 943 lwkt_gettoken(&hispgrp->pg_token); 944 if (entering) 945 hispgrp->pg_jobc++; 946 else if (--hispgrp->pg_jobc == 0) 947 orphanpg(hispgrp); 948 lwkt_reltoken(&hispgrp->pg_token); 949 pgrel(hispgrp); 950 } 951 lwkt_reltoken(&np->p_token); 952 PRELE(np); 953 } 954 KKASSERT(pgrp->pg_refs > 0); 955 lwkt_reltoken(&pgrp->pg_token); 956 lwkt_reltoken(&p->p_token); 957 } 958 959 /* 960 * A process group has become orphaned; 961 * if there are any stopped processes in the group, 962 * hang-up all process in that group. 963 * 964 * The caller must hold pg_token. 965 */ 966 static void 967 orphanpg(struct pgrp *pg) 968 { 969 struct proc *p; 970 971 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 972 if (p->p_stat == SSTOP) { 973 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 974 ksignal(p, SIGHUP); 975 ksignal(p, SIGCONT); 976 } 977 return; 978 } 979 } 980 } 981 982 /* 983 * Add a new process to the allproc list and the PID hash. This 984 * also assigns a pid to the new process. 985 * 986 * No requirements. 987 */ 988 void 989 proc_add_allproc(struct proc *p) 990 { 991 int random_offset; 992 993 if ((random_offset = randompid) != 0) { 994 read_random(&random_offset, sizeof(random_offset)); 995 random_offset = (random_offset & 0x7FFFFFFF) % randompid; 996 } 997 proc_makepid(p, random_offset); 998 } 999 1000 /* 1001 * Calculate a new process pid. This function is integrated into 1002 * proc_add_allproc() to guarentee that the new pid is not reused before 1003 * the new process can be added to the allproc list. 1004 * 1005 * p_pid is assigned and the process is added to the allproc hash table 1006 * 1007 * WARNING! We need to allocate PIDs sequentially during early boot. 1008 * In particular, init needs to have a pid of 1. 1009 */ 1010 static 1011 void 1012 proc_makepid(struct proc *p, int random_offset) 1013 { 1014 static pid_t nextpid = 1; /* heuristic, allowed to race */ 1015 procglob_t *prg; 1016 struct pgrp *pg; 1017 struct proc *ps; 1018 struct session *sess; 1019 pid_t base; 1020 int8_t delta8; 1021 int retries; 1022 int n; 1023 1024 /* 1025 * Select the next pid base candidate. 1026 * 1027 * Check cyclement, do not allow a pid < 100. 1028 */ 1029 retries = 0; 1030 retry: 1031 base = atomic_fetchadd_int(&nextpid, 1) + random_offset; 1032 if (base <= 0 || base >= PID_MAX) { 1033 base = base % PID_MAX; 1034 if (base < 0) 1035 base = 100; 1036 if (base < 100) 1037 base += 100; 1038 nextpid = base; /* reset (SMP race ok) */ 1039 } 1040 1041 /* 1042 * Do not allow a base pid to be selected from a domain that has 1043 * recently seen a pid/pgid/sessid reap. Sleep a little if we looped 1044 * through all available domains. 1045 * 1046 * WARNING: We want the early pids to be allocated linearly, 1047 * particularly pid 1 and pid 2. 1048 */ 1049 if (++retries >= PIDSEL_DOMAINS) 1050 tsleep(&nextpid, 0, "makepid", 1); 1051 if (base >= 100) { 1052 delta8 = (int8_t)time_second - 1053 (int8_t)pid_doms[base % PIDSEL_DOMAINS]; 1054 if (delta8 >= 0 && delta8 <= PIDDOM_DELAY) { 1055 ++pid_domain_skips; 1056 goto retry; 1057 } 1058 } 1059 1060 /* 1061 * Calculate a hash index and find an unused process id within 1062 * the table, looping if we cannot find one. 1063 * 1064 * The inner loop increments by ALLPROC_HSIZE which keeps the 1065 * PID at the same pid_doms[] index as well as the same hash index. 1066 */ 1067 n = ALLPROC_HASH(base); 1068 prg = &procglob[n]; 1069 lwkt_gettoken(&prg->proc_token); 1070 1071 restart1: 1072 LIST_FOREACH(ps, &prg->allproc, p_list) { 1073 if (ps->p_pid == base) { 1074 base += ALLPROC_HSIZE; 1075 if (base >= PID_MAX) { 1076 lwkt_reltoken(&prg->proc_token); 1077 goto retry; 1078 } 1079 ++pid_inner_skips; 1080 goto restart1; 1081 } 1082 } 1083 LIST_FOREACH(pg, &prg->allpgrp, pg_list) { 1084 if (pg->pg_id == base) { 1085 base += ALLPROC_HSIZE; 1086 if (base >= PID_MAX) { 1087 lwkt_reltoken(&prg->proc_token); 1088 goto retry; 1089 } 1090 ++pid_inner_skips; 1091 goto restart1; 1092 } 1093 } 1094 LIST_FOREACH(sess, &prg->allsess, s_list) { 1095 if (sess->s_sid == base) { 1096 base += ALLPROC_HSIZE; 1097 if (base >= PID_MAX) { 1098 lwkt_reltoken(&prg->proc_token); 1099 goto retry; 1100 } 1101 ++pid_inner_skips; 1102 goto restart1; 1103 } 1104 } 1105 1106 /* 1107 * Assign the pid and insert the process. 1108 */ 1109 p->p_pid = base; 1110 LIST_INSERT_HEAD(&prg->allproc, p, p_list); 1111 lwkt_reltoken(&prg->proc_token); 1112 } 1113 1114 /* 1115 * Called from exit1 to place the process into a zombie state. 1116 * The process is removed from the pid hash and p_stat is set 1117 * to SZOMB. Normal pfind[n]() calls will not find it any more. 1118 * 1119 * Caller must hold p->p_token. We are required to wait until p_lock 1120 * becomes zero before we can manipulate the list, allowing allproc 1121 * scans to guarantee consistency during a list scan. 1122 */ 1123 void 1124 proc_move_allproc_zombie(struct proc *p) 1125 { 1126 procglob_t *prg; 1127 int n; 1128 1129 n = ALLPROC_HASH(p->p_pid); 1130 prg = &procglob[n]; 1131 PSTALL(p, "reap1", 0); 1132 lwkt_gettoken(&prg->proc_token); 1133 1134 PSTALL(p, "reap1a", 0); 1135 p->p_stat = SZOMB; 1136 1137 lwkt_reltoken(&prg->proc_token); 1138 dsched_exit_proc(p); 1139 } 1140 1141 /* 1142 * This routine is called from kern_wait() and will remove the process 1143 * from the zombie list and the sibling list. This routine will block 1144 * if someone has a lock on the proces (p_lock). 1145 * 1146 * Caller must hold p->p_token. We are required to wait until p_lock 1147 * becomes one before we can manipulate the list, allowing allproc 1148 * scans to guarantee consistency during a list scan. 1149 * 1150 * Assumes caller has one ref. 1151 */ 1152 void 1153 proc_remove_zombie(struct proc *p) 1154 { 1155 procglob_t *prg; 1156 int n; 1157 1158 n = ALLPROC_HASH(p->p_pid); 1159 prg = &procglob[n]; 1160 1161 PSTALL(p, "reap2", 1); 1162 lwkt_gettoken(&prg->proc_token); 1163 PSTALL(p, "reap2a", 1); 1164 LIST_REMOVE(p, p_list); /* from remove master list */ 1165 LIST_REMOVE(p, p_sibling); /* and from sibling list */ 1166 p->p_pptr = NULL; 1167 p->p_ppid = 0; 1168 if (pid_doms[p->p_pid % PIDSEL_DOMAINS] != (uint8_t)time_second) 1169 pid_doms[p->p_pid % PIDSEL_DOMAINS] = (uint8_t)time_second; 1170 lwkt_reltoken(&prg->proc_token); 1171 } 1172 1173 /* 1174 * Handle various requirements prior to returning to usermode. Called from 1175 * platform trap and system call code. 1176 */ 1177 void 1178 lwpuserret(struct lwp *lp) 1179 { 1180 struct proc *p = lp->lwp_proc; 1181 1182 if (lp->lwp_mpflags & LWP_MP_VNLRU) { 1183 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_VNLRU); 1184 allocvnode_gc(); 1185 } 1186 if (lp->lwp_mpflags & LWP_MP_WEXIT) { 1187 lwkt_gettoken(&p->p_token); 1188 lwp_exit(0, NULL); 1189 lwkt_reltoken(&p->p_token); /* NOT REACHED */ 1190 } 1191 } 1192 1193 /* 1194 * Kernel threads run from user processes can also accumulate deferred 1195 * actions which need to be acted upon. Callers include: 1196 * 1197 * nfsd - Can allocate lots of vnodes 1198 */ 1199 void 1200 lwpkthreaddeferred(void) 1201 { 1202 struct lwp *lp = curthread->td_lwp; 1203 1204 if (lp) { 1205 if (lp->lwp_mpflags & LWP_MP_VNLRU) { 1206 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_VNLRU); 1207 allocvnode_gc(); 1208 } 1209 } 1210 } 1211 1212 void 1213 proc_usermap(struct proc *p, int invfork) 1214 { 1215 struct sys_upmap *upmap; 1216 1217 lwkt_gettoken(&p->p_token); 1218 upmap = kmalloc(roundup2(sizeof(*upmap), PAGE_SIZE), M_PROC, 1219 M_WAITOK | M_ZERO); 1220 if (p->p_upmap == NULL) { 1221 upmap->header[0].type = UKPTYPE_VERSION; 1222 upmap->header[0].offset = offsetof(struct sys_upmap, version); 1223 upmap->header[1].type = UPTYPE_RUNTICKS; 1224 upmap->header[1].offset = offsetof(struct sys_upmap, runticks); 1225 upmap->header[2].type = UPTYPE_FORKID; 1226 upmap->header[2].offset = offsetof(struct sys_upmap, forkid); 1227 upmap->header[3].type = UPTYPE_PID; 1228 upmap->header[3].offset = offsetof(struct sys_upmap, pid); 1229 upmap->header[4].type = UPTYPE_PROC_TITLE; 1230 upmap->header[4].offset = offsetof(struct sys_upmap,proc_title); 1231 upmap->header[5].type = UPTYPE_INVFORK; 1232 upmap->header[5].offset = offsetof(struct sys_upmap, invfork); 1233 1234 upmap->version = UPMAP_VERSION; 1235 upmap->pid = p->p_pid; 1236 upmap->forkid = p->p_forkid; 1237 upmap->invfork = invfork; 1238 p->p_upmap = upmap; 1239 } else { 1240 kfree(upmap, M_PROC); 1241 } 1242 lwkt_reltoken(&p->p_token); 1243 } 1244 1245 void 1246 proc_userunmap(struct proc *p) 1247 { 1248 struct sys_upmap *upmap; 1249 1250 lwkt_gettoken(&p->p_token); 1251 if ((upmap = p->p_upmap) != NULL) { 1252 p->p_upmap = NULL; 1253 kfree(upmap, M_PROC); 1254 } 1255 lwkt_reltoken(&p->p_token); 1256 } 1257 1258 /* 1259 * Scan all processes on the allproc list. The process is automatically 1260 * held for the callback. A return value of -1 terminates the loop. 1261 * Zombie procs are skipped. 1262 * 1263 * The callback is made with the process held and proc_token held. 1264 * 1265 * We limit the scan to the number of processes as-of the start of 1266 * the scan so as not to get caught up in an endless loop if new processes 1267 * are created more quickly than we can scan the old ones. Add a little 1268 * slop to try to catch edge cases since nprocs can race. 1269 * 1270 * No requirements. 1271 */ 1272 void 1273 allproc_scan(int (*callback)(struct proc *, void *), void *data, int segmented) 1274 { 1275 int limit = nprocs + ncpus; 1276 struct proc *p; 1277 int ns; 1278 int ne; 1279 int r; 1280 int n; 1281 1282 if (segmented) { 1283 int id = mycpu->gd_cpuid; 1284 ns = id * ALLPROC_HSIZE / ncpus; 1285 ne = (id + 1) * ALLPROC_HSIZE / ncpus; 1286 } else { 1287 ns = 0; 1288 ne = ALLPROC_HSIZE; 1289 } 1290 1291 /* 1292 * prg->proc_token protects the allproc list and PHOLD() prevents the 1293 * process from being removed from the allproc list or the zombproc 1294 * list. 1295 */ 1296 for (n = ns; n < ne; ++n) { 1297 procglob_t *prg = &procglob[n]; 1298 if (LIST_FIRST(&prg->allproc) == NULL) 1299 continue; 1300 lwkt_gettoken(&prg->proc_token); 1301 LIST_FOREACH(p, &prg->allproc, p_list) { 1302 if (p->p_stat == SZOMB) 1303 continue; 1304 PHOLD(p); 1305 r = callback(p, data); 1306 PRELE(p); 1307 if (r < 0) 1308 break; 1309 if (--limit < 0) 1310 break; 1311 } 1312 lwkt_reltoken(&prg->proc_token); 1313 1314 /* 1315 * Check if asked to stop early 1316 */ 1317 if (p) 1318 break; 1319 } 1320 } 1321 1322 /* 1323 * Scan all lwps of processes on the allproc list. The lwp is automatically 1324 * held for the callback. A return value of -1 terminates the loop. 1325 * 1326 * The callback is made with the proces and lwp both held, and proc_token held. 1327 * 1328 * No requirements. 1329 */ 1330 void 1331 alllwp_scan(int (*callback)(struct lwp *, void *), void *data, int segmented) 1332 { 1333 struct proc *p; 1334 struct lwp *lp; 1335 int ns; 1336 int ne; 1337 int r = 0; 1338 int n; 1339 1340 if (segmented) { 1341 int id = mycpu->gd_cpuid; 1342 ns = id * ALLPROC_HSIZE / ncpus; 1343 ne = (id + 1) * ALLPROC_HSIZE / ncpus; 1344 } else { 1345 ns = 0; 1346 ne = ALLPROC_HSIZE; 1347 } 1348 1349 for (n = ns; n < ne; ++n) { 1350 procglob_t *prg = &procglob[n]; 1351 1352 if (LIST_FIRST(&prg->allproc) == NULL) 1353 continue; 1354 lwkt_gettoken(&prg->proc_token); 1355 LIST_FOREACH(p, &prg->allproc, p_list) { 1356 if (p->p_stat == SZOMB) 1357 continue; 1358 PHOLD(p); 1359 lwkt_gettoken(&p->p_token); 1360 FOREACH_LWP_IN_PROC(lp, p) { 1361 LWPHOLD(lp); 1362 r = callback(lp, data); 1363 LWPRELE(lp); 1364 } 1365 lwkt_reltoken(&p->p_token); 1366 PRELE(p); 1367 if (r < 0) 1368 break; 1369 } 1370 lwkt_reltoken(&prg->proc_token); 1371 1372 /* 1373 * Asked to exit early 1374 */ 1375 if (p) 1376 break; 1377 } 1378 } 1379 1380 /* 1381 * Scan all processes on the zombproc list. The process is automatically 1382 * held for the callback. A return value of -1 terminates the loop. 1383 * 1384 * No requirements. 1385 * The callback is made with the proces held and proc_token held. 1386 */ 1387 void 1388 zombproc_scan(int (*callback)(struct proc *, void *), void *data) 1389 { 1390 struct proc *p; 1391 int r; 1392 int n; 1393 1394 /* 1395 * prg->proc_token protects the allproc list and PHOLD() prevents the 1396 * process from being removed from the allproc list or the zombproc 1397 * list. 1398 */ 1399 for (n = 0; n < ALLPROC_HSIZE; ++n) { 1400 procglob_t *prg = &procglob[n]; 1401 1402 if (LIST_FIRST(&prg->allproc) == NULL) 1403 continue; 1404 lwkt_gettoken(&prg->proc_token); 1405 LIST_FOREACH(p, &prg->allproc, p_list) { 1406 if (p->p_stat != SZOMB) 1407 continue; 1408 PHOLD(p); 1409 r = callback(p, data); 1410 PRELE(p); 1411 if (r < 0) 1412 break; 1413 } 1414 lwkt_reltoken(&prg->proc_token); 1415 1416 /* 1417 * Check if asked to stop early 1418 */ 1419 if (p) 1420 break; 1421 } 1422 } 1423 1424 #include "opt_ddb.h" 1425 #ifdef DDB 1426 #include <ddb/ddb.h> 1427 1428 /* 1429 * Debugging only 1430 */ 1431 DB_SHOW_COMMAND(pgrpdump, pgrpdump) 1432 { 1433 struct pgrp *pgrp; 1434 struct proc *p; 1435 procglob_t *prg; 1436 int i; 1437 1438 for (i = 0; i < ALLPROC_HSIZE; ++i) { 1439 prg = &procglob[i]; 1440 1441 if (LIST_EMPTY(&prg->allpgrp)) 1442 continue; 1443 kprintf("\tindx %d\n", i); 1444 LIST_FOREACH(pgrp, &prg->allpgrp, pg_list) { 1445 kprintf("\tpgrp %p, pgid %ld, sess %p, " 1446 "sesscnt %d, mem %p\n", 1447 (void *)pgrp, (long)pgrp->pg_id, 1448 (void *)pgrp->pg_session, 1449 pgrp->pg_session->s_count, 1450 (void *)LIST_FIRST(&pgrp->pg_members)); 1451 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 1452 kprintf("\t\tpid %ld addr %p pgrp %p\n", 1453 (long)p->p_pid, (void *)p, 1454 (void *)p->p_pgrp); 1455 } 1456 } 1457 } 1458 } 1459 #endif /* DDB */ 1460 1461 /* 1462 * The caller must hold proc_token. 1463 */ 1464 static int 1465 sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags) 1466 { 1467 struct kinfo_proc ki; 1468 struct lwp *lp; 1469 int skp = 0, had_output = 0; 1470 int error; 1471 1472 bzero(&ki, sizeof(ki)); 1473 lwkt_gettoken_shared(&p->p_token); 1474 fill_kinfo_proc(p, &ki); 1475 if ((flags & KERN_PROC_FLAG_LWP) == 0) 1476 skp = 1; 1477 error = 0; 1478 FOREACH_LWP_IN_PROC(lp, p) { 1479 LWPHOLD(lp); 1480 fill_kinfo_lwp(lp, &ki.kp_lwp); 1481 had_output = 1; 1482 if (skp == 0) { 1483 error = SYSCTL_OUT(req, &ki, sizeof(ki)); 1484 bzero(&ki.kp_lwp, sizeof(ki.kp_lwp)); 1485 } 1486 LWPRELE(lp); 1487 if (error) 1488 break; 1489 } 1490 lwkt_reltoken(&p->p_token); 1491 1492 /* 1493 * If aggregating threads, set the tid field to -1. 1494 */ 1495 if (skp) 1496 ki.kp_lwp.kl_tid = -1; 1497 1498 /* 1499 * We need to output at least the proc, even if there is no lwp. 1500 * If skp is non-zero we aggregated the lwps and need to output 1501 * the result. 1502 */ 1503 if (had_output == 0 || skp) { 1504 error = SYSCTL_OUT(req, &ki, sizeof(ki)); 1505 } 1506 return (error); 1507 } 1508 1509 /* 1510 * The caller must hold proc_token. 1511 */ 1512 static int 1513 sysctl_out_proc_kthread(struct thread *td, struct sysctl_req *req) 1514 { 1515 struct kinfo_proc ki; 1516 int error; 1517 1518 fill_kinfo_proc_kthread(td, &ki); 1519 error = SYSCTL_OUT(req, &ki, sizeof(ki)); 1520 if (error) 1521 return error; 1522 return(0); 1523 } 1524 1525 /* 1526 * No requirements. 1527 */ 1528 static int 1529 sysctl_kern_proc(SYSCTL_HANDLER_ARGS) 1530 { 1531 int *name = (int *)arg1; 1532 int oid = oidp->oid_number; 1533 u_int namelen = arg2; 1534 struct proc *p; 1535 struct thread *td; 1536 struct thread *marker; 1537 int flags = 0; 1538 int error = 0; 1539 int n; 1540 int origcpu; 1541 struct ucred *cr1 = curproc->p_ucred; 1542 struct ucred *crcache = NULL; 1543 1544 flags = oid & KERN_PROC_FLAGMASK; 1545 oid &= ~KERN_PROC_FLAGMASK; 1546 1547 if ((oid == KERN_PROC_ALL && namelen != 0) || 1548 (oid != KERN_PROC_ALL && namelen != 1)) { 1549 return (EINVAL); 1550 } 1551 1552 /* 1553 * proc_token protects the allproc list and PHOLD() prevents the 1554 * process from being removed from the allproc list or the zombproc 1555 * list. 1556 */ 1557 if (oid == KERN_PROC_PID) { 1558 p = pfind((pid_t)name[0]); 1559 if (p) { 1560 crcache = pcredcache(crcache, p); 1561 if (PRISON_CHECK(cr1, crcache)) 1562 error = sysctl_out_proc(p, req, flags); 1563 PRELE(p); 1564 } 1565 goto post_threads; 1566 } 1567 p = NULL; 1568 1569 if (!req->oldptr) { 1570 /* overestimate by 5 procs */ 1571 error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5); 1572 if (error) 1573 goto post_threads; 1574 } 1575 1576 for (n = 0; n < ALLPROC_HSIZE; ++n) { 1577 procglob_t *prg = &procglob[n]; 1578 1579 if (LIST_EMPTY(&prg->allproc)) 1580 continue; 1581 lwkt_gettoken_shared(&prg->proc_token); 1582 LIST_FOREACH(p, &prg->allproc, p_list) { 1583 /* 1584 * Show a user only their processes. 1585 */ 1586 if (ps_showallprocs == 0) { 1587 crcache = pcredcache(crcache, p); 1588 if (crcache == NULL || 1589 p_trespass(cr1, crcache)) { 1590 continue; 1591 } 1592 } 1593 1594 /* 1595 * Skip embryonic processes. 1596 */ 1597 if (p->p_stat == SIDL) 1598 continue; 1599 /* 1600 * TODO - make more efficient (see notes below). 1601 * do by session. 1602 */ 1603 switch (oid) { 1604 case KERN_PROC_PGRP: 1605 /* could do this by traversing pgrp */ 1606 if (p->p_pgrp == NULL || 1607 p->p_pgrp->pg_id != (pid_t)name[0]) 1608 continue; 1609 break; 1610 1611 case KERN_PROC_TTY: 1612 if ((p->p_flags & P_CONTROLT) == 0 || 1613 p->p_session == NULL || 1614 p->p_session->s_ttyp == NULL || 1615 dev2udev(p->p_session->s_ttyp->t_dev) != 1616 (udev_t)name[0]) 1617 continue; 1618 break; 1619 1620 case KERN_PROC_UID: 1621 crcache = pcredcache(crcache, p); 1622 if (crcache == NULL || 1623 crcache->cr_uid != (uid_t)name[0]) { 1624 continue; 1625 } 1626 break; 1627 1628 case KERN_PROC_RUID: 1629 crcache = pcredcache(crcache, p); 1630 if (crcache == NULL || 1631 crcache->cr_ruid != (uid_t)name[0]) { 1632 continue; 1633 } 1634 break; 1635 } 1636 1637 crcache = pcredcache(crcache, p); 1638 if (!PRISON_CHECK(cr1, crcache)) 1639 continue; 1640 PHOLD(p); 1641 error = sysctl_out_proc(p, req, flags); 1642 PRELE(p); 1643 if (error) { 1644 lwkt_reltoken(&prg->proc_token); 1645 goto post_threads; 1646 } 1647 } 1648 lwkt_reltoken(&prg->proc_token); 1649 } 1650 1651 /* 1652 * Iterate over all active cpus and scan their thread list. Start 1653 * with the next logical cpu and end with our original cpu. We 1654 * migrate our own thread to each target cpu in order to safely scan 1655 * its thread list. In the last loop we migrate back to our original 1656 * cpu. 1657 */ 1658 origcpu = mycpu->gd_cpuid; 1659 if (!ps_showallthreads || jailed(cr1)) 1660 goto post_threads; 1661 1662 marker = kmalloc(sizeof(struct thread), M_TEMP, M_WAITOK|M_ZERO); 1663 marker->td_flags = TDF_MARKER; 1664 error = 0; 1665 1666 for (n = 1; n <= ncpus; ++n) { 1667 globaldata_t rgd; 1668 int nid; 1669 1670 nid = (origcpu + n) % ncpus; 1671 if (CPUMASK_TESTBIT(smp_active_mask, nid) == 0) 1672 continue; 1673 rgd = globaldata_find(nid); 1674 lwkt_setcpu_self(rgd); 1675 1676 crit_enter(); 1677 TAILQ_INSERT_TAIL(&rgd->gd_tdallq, marker, td_allq); 1678 1679 while ((td = TAILQ_PREV(marker, lwkt_queue, td_allq)) != NULL) { 1680 TAILQ_REMOVE(&rgd->gd_tdallq, marker, td_allq); 1681 TAILQ_INSERT_BEFORE(td, marker, td_allq); 1682 if (td->td_flags & TDF_MARKER) 1683 continue; 1684 if (td->td_proc) 1685 continue; 1686 1687 lwkt_hold(td); 1688 crit_exit(); 1689 1690 switch (oid) { 1691 case KERN_PROC_PGRP: 1692 case KERN_PROC_TTY: 1693 case KERN_PROC_UID: 1694 case KERN_PROC_RUID: 1695 break; 1696 default: 1697 error = sysctl_out_proc_kthread(td, req); 1698 break; 1699 } 1700 lwkt_rele(td); 1701 crit_enter(); 1702 if (error) 1703 break; 1704 } 1705 TAILQ_REMOVE(&rgd->gd_tdallq, marker, td_allq); 1706 crit_exit(); 1707 1708 if (error) 1709 break; 1710 } 1711 1712 /* 1713 * Userland scheduler expects us to return on the same cpu we 1714 * started on. 1715 */ 1716 if (mycpu->gd_cpuid != origcpu) 1717 lwkt_setcpu_self(globaldata_find(origcpu)); 1718 1719 kfree(marker, M_TEMP); 1720 1721 post_threads: 1722 if (crcache) 1723 crfree(crcache); 1724 return (error); 1725 } 1726 1727 /* 1728 * This sysctl allows a process to retrieve the argument list or process 1729 * title for another process without groping around in the address space 1730 * of the other process. It also allow a process to set its own "process 1731 * title to a string of its own choice. 1732 * 1733 * No requirements. 1734 */ 1735 static int 1736 sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS) 1737 { 1738 int *name = (int*) arg1; 1739 u_int namelen = arg2; 1740 struct proc *p; 1741 struct pargs *opa; 1742 struct pargs *pa; 1743 int error = 0; 1744 struct ucred *cr1 = curproc->p_ucred; 1745 1746 if (namelen != 1) 1747 return (EINVAL); 1748 1749 p = pfind((pid_t)name[0]); 1750 if (p == NULL) 1751 goto done; 1752 lwkt_gettoken(&p->p_token); 1753 1754 if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred)) 1755 goto done; 1756 1757 if (req->newptr && curproc != p) { 1758 error = EPERM; 1759 goto done; 1760 } 1761 if (req->oldptr) { 1762 if (p->p_upmap != NULL && p->p_upmap->proc_title[0]) { 1763 /* 1764 * Args set via writable user process mmap. 1765 * We must calculate the string length manually 1766 * because the user data can change at any time. 1767 */ 1768 size_t n; 1769 char *base; 1770 1771 base = p->p_upmap->proc_title; 1772 for (n = 0; n < UPMAP_MAXPROCTITLE - 1; ++n) { 1773 if (base[n] == 0) 1774 break; 1775 } 1776 error = SYSCTL_OUT(req, base, n); 1777 if (error == 0) 1778 error = SYSCTL_OUT(req, "", 1); 1779 } else if ((pa = p->p_args) != NULL) { 1780 /* 1781 * Args set by setproctitle() sysctl. 1782 */ 1783 refcount_acquire(&pa->ar_ref); 1784 error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length); 1785 if (refcount_release(&pa->ar_ref)) 1786 kfree(pa, M_PARGS); 1787 } 1788 } 1789 if (req->newptr == NULL) 1790 goto done; 1791 1792 if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit) { 1793 goto done; 1794 } 1795 1796 pa = kmalloc(sizeof(struct pargs) + req->newlen, M_PARGS, M_WAITOK); 1797 refcount_init(&pa->ar_ref, 1); 1798 pa->ar_length = req->newlen; 1799 error = SYSCTL_IN(req, pa->ar_args, req->newlen); 1800 if (error) { 1801 kfree(pa, M_PARGS); 1802 goto done; 1803 } 1804 1805 1806 /* 1807 * Replace p_args with the new pa. p_args may have previously 1808 * been NULL. 1809 */ 1810 opa = p->p_args; 1811 p->p_args = pa; 1812 1813 if (opa) { 1814 KKASSERT(opa->ar_ref > 0); 1815 if (refcount_release(&opa->ar_ref)) { 1816 kfree(opa, M_PARGS); 1817 /* opa = NULL; */ 1818 } 1819 } 1820 done: 1821 if (p) { 1822 lwkt_reltoken(&p->p_token); 1823 PRELE(p); 1824 } 1825 return (error); 1826 } 1827 1828 static int 1829 sysctl_kern_proc_cwd(SYSCTL_HANDLER_ARGS) 1830 { 1831 int *name = (int*) arg1; 1832 u_int namelen = arg2; 1833 struct proc *p; 1834 int error = 0; 1835 char *fullpath, *freepath; 1836 struct ucred *cr1 = curproc->p_ucred; 1837 1838 if (namelen != 1) 1839 return (EINVAL); 1840 1841 p = pfind((pid_t)name[0]); 1842 if (p == NULL) 1843 goto done; 1844 lwkt_gettoken_shared(&p->p_token); 1845 1846 /* 1847 * If we are not allowed to see other args, we certainly shouldn't 1848 * get the cwd either. Also check the usual trespassing. 1849 */ 1850 if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred)) 1851 goto done; 1852 1853 if (req->oldptr && p->p_fd != NULL && p->p_fd->fd_ncdir.ncp) { 1854 struct nchandle nch; 1855 1856 cache_copy(&p->p_fd->fd_ncdir, &nch); 1857 error = cache_fullpath(p, &nch, NULL, 1858 &fullpath, &freepath, 0); 1859 cache_drop(&nch); 1860 if (error) 1861 goto done; 1862 error = SYSCTL_OUT(req, fullpath, strlen(fullpath) + 1); 1863 kfree(freepath, M_TEMP); 1864 } 1865 1866 done: 1867 if (p) { 1868 lwkt_reltoken(&p->p_token); 1869 PRELE(p); 1870 } 1871 return (error); 1872 } 1873 1874 /* 1875 * This sysctl allows a process to retrieve the path of the executable for 1876 * itself or another process. 1877 */ 1878 static int 1879 sysctl_kern_proc_pathname(SYSCTL_HANDLER_ARGS) 1880 { 1881 pid_t *pidp = (pid_t *)arg1; 1882 unsigned int arglen = arg2; 1883 struct proc *p; 1884 char *retbuf, *freebuf; 1885 int error = 0; 1886 struct nchandle nch; 1887 1888 if (arglen != 1) 1889 return (EINVAL); 1890 if (*pidp == -1) { /* -1 means this process */ 1891 p = curproc; 1892 } else { 1893 p = pfind(*pidp); 1894 if (p == NULL) 1895 return (ESRCH); 1896 } 1897 1898 cache_copy(&p->p_textnch, &nch); 1899 error = cache_fullpath(p, &nch, NULL, &retbuf, &freebuf, 0); 1900 cache_drop(&nch); 1901 if (error) 1902 goto done; 1903 error = SYSCTL_OUT(req, retbuf, strlen(retbuf) + 1); 1904 kfree(freebuf, M_TEMP); 1905 done: 1906 if (*pidp != -1) 1907 PRELE(p); 1908 1909 return (error); 1910 } 1911 1912 static int 1913 sysctl_kern_proc_sigtramp(SYSCTL_HANDLER_ARGS) 1914 { 1915 /*int *name = (int *)arg1;*/ 1916 u_int namelen = arg2; 1917 struct kinfo_sigtramp kst; 1918 const struct sysentvec *sv; 1919 int error; 1920 1921 if (namelen > 1) 1922 return (EINVAL); 1923 /* ignore pid if passed in (freebsd compatibility) */ 1924 1925 sv = curproc->p_sysent; 1926 bzero(&kst, sizeof(kst)); 1927 if (sv->sv_szsigcode) { 1928 intptr_t sigbase; 1929 1930 sigbase = trunc_page64((intptr_t)PS_STRINGS - 1931 *sv->sv_szsigcode); 1932 sigbase -= SZSIGCODE_EXTRA_BYTES; 1933 1934 kst.ksigtramp_start = (void *)sigbase; 1935 kst.ksigtramp_end = (void *)(sigbase + *sv->sv_szsigcode); 1936 } 1937 error = SYSCTL_OUT(req, &kst, sizeof(kst)); 1938 1939 return (error); 1940 } 1941 1942 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD, 0, "Process table"); 1943 1944 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, 1945 CTLFLAG_RD | CTLTYPE_STRUCT | CTLFLAG_NOLOCK, 1946 0, 0, sysctl_kern_proc, "S,proc", "Return entire process table"); 1947 1948 SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, 1949 CTLFLAG_RD | CTLFLAG_NOLOCK, 1950 sysctl_kern_proc, "Process table"); 1951 1952 SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, 1953 CTLFLAG_RD | CTLFLAG_NOLOCK, 1954 sysctl_kern_proc, "Process table"); 1955 1956 SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, 1957 CTLFLAG_RD | CTLFLAG_NOLOCK, 1958 sysctl_kern_proc, "Process table"); 1959 1960 SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, 1961 CTLFLAG_RD | CTLFLAG_NOLOCK, 1962 sysctl_kern_proc, "Process table"); 1963 1964 SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, 1965 CTLFLAG_RD | CTLFLAG_NOLOCK, 1966 sysctl_kern_proc, "Process table"); 1967 1968 SYSCTL_NODE(_kern_proc, (KERN_PROC_ALL | KERN_PROC_FLAG_LWP), all_lwp, 1969 CTLFLAG_RD | CTLFLAG_NOLOCK, 1970 sysctl_kern_proc, "Process table"); 1971 1972 SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_FLAG_LWP), pgrp_lwp, 1973 CTLFLAG_RD | CTLFLAG_NOLOCK, 1974 sysctl_kern_proc, "Process table"); 1975 1976 SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_FLAG_LWP), tty_lwp, 1977 CTLFLAG_RD | CTLFLAG_NOLOCK, 1978 sysctl_kern_proc, "Process table"); 1979 1980 SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_FLAG_LWP), uid_lwp, 1981 CTLFLAG_RD | CTLFLAG_NOLOCK, 1982 sysctl_kern_proc, "Process table"); 1983 1984 SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_FLAG_LWP), ruid_lwp, 1985 CTLFLAG_RD | CTLFLAG_NOLOCK, 1986 sysctl_kern_proc, "Process table"); 1987 1988 SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_FLAG_LWP), pid_lwp, 1989 CTLFLAG_RD | CTLFLAG_NOLOCK, 1990 sysctl_kern_proc, "Process table"); 1991 1992 SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, 1993 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_NOLOCK, 1994 sysctl_kern_proc_args, "Process argument list"); 1995 1996 SYSCTL_NODE(_kern_proc, KERN_PROC_CWD, cwd, 1997 CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_NOLOCK, 1998 sysctl_kern_proc_cwd, "Process argument list"); 1999 2000 static SYSCTL_NODE(_kern_proc, KERN_PROC_PATHNAME, pathname, 2001 CTLFLAG_RD | CTLFLAG_NOLOCK, 2002 sysctl_kern_proc_pathname, "Process executable path"); 2003 2004 SYSCTL_PROC(_kern_proc, KERN_PROC_SIGTRAMP, sigtramp, 2005 CTLFLAG_RD | CTLTYPE_STRUCT | CTLFLAG_NOLOCK, 2006 0, 0, sysctl_kern_proc_sigtramp, "S,sigtramp", 2007 "Return sigtramp address range"); 2008