1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/sysctl.h> 34 #include <sys/malloc.h> 35 #include <sys/proc.h> 36 #include <sys/vnode.h> 37 #include <sys/jail.h> 38 #include <sys/filedesc.h> 39 #include <sys/tty.h> 40 #include <sys/dsched.h> 41 #include <sys/signalvar.h> 42 #include <sys/spinlock.h> 43 #include <sys/random.h> 44 #include <sys/vnode.h> 45 #include <vm/vm.h> 46 #include <sys/lock.h> 47 #include <vm/pmap.h> 48 #include <vm/vm_map.h> 49 #include <sys/user.h> 50 #include <machine/smp.h> 51 52 #include <sys/refcount.h> 53 #include <sys/spinlock2.h> 54 #include <sys/mplock2.h> 55 56 /* 57 * Hash table size must be a power of two and is not currently dynamically 58 * sized. There is a trade-off between the linear scans which must iterate 59 * all HSIZE elements and the number of elements which might accumulate 60 * within each hash chain. 61 */ 62 #define ALLPROC_HSIZE 256 63 #define ALLPROC_HMASK (ALLPROC_HSIZE - 1) 64 #define ALLPROC_HASH(pid) (pid & ALLPROC_HMASK) 65 #define PGRP_HASH(pid) (pid & ALLPROC_HMASK) 66 #define SESS_HASH(pid) (pid & ALLPROC_HMASK) 67 68 /* 69 * pid_doms[] management, used to control how quickly a PID can be recycled. 70 * Must be a multiple of ALLPROC_HSIZE for the proc_makepid() inner loops. 71 * 72 * WARNING! PIDDOM_DELAY should not be defined > 20 or so unless you change 73 * the array from int8_t's to int16_t's. 74 */ 75 #define PIDDOM_COUNT 10 /* 10 pids per domain - reduce array size */ 76 #define PIDDOM_DELAY 10 /* min 10 seconds after exit before reuse */ 77 #define PIDSEL_DOMAINS (PID_MAX / PIDDOM_COUNT / ALLPROC_HSIZE * ALLPROC_HSIZE) 78 79 /* Used by libkvm */ 80 int allproc_hsize = ALLPROC_HSIZE; 81 82 LIST_HEAD(pidhashhead, proc); 83 84 static MALLOC_DEFINE(M_PGRP, "pgrp", "process group header"); 85 MALLOC_DEFINE(M_SESSION, "session", "session header"); 86 MALLOC_DEFINE(M_PROC, "proc", "Proc structures"); 87 MALLOC_DEFINE(M_LWP, "lwp", "lwp structures"); 88 MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures"); 89 90 int ps_showallprocs = 1; 91 static int ps_showallthreads = 1; 92 SYSCTL_INT(_security, OID_AUTO, ps_showallprocs, CTLFLAG_RW, 93 &ps_showallprocs, 0, 94 "Unprivileged processes can see processes with different UID/GID"); 95 SYSCTL_INT(_security, OID_AUTO, ps_showallthreads, CTLFLAG_RW, 96 &ps_showallthreads, 0, 97 "Unprivileged processes can see kernel threads"); 98 static u_int pid_domain_skips; 99 SYSCTL_UINT(_kern, OID_AUTO, pid_domain_skips, CTLFLAG_RW, 100 &pid_domain_skips, 0, 101 "Number of pid_doms[] skipped"); 102 static u_int pid_inner_skips; 103 SYSCTL_UINT(_kern, OID_AUTO, pid_inner_skips, CTLFLAG_RW, 104 &pid_inner_skips, 0, 105 "Number of pid_doms[] skipped"); 106 107 static void orphanpg(struct pgrp *pg); 108 static void proc_makepid(struct proc *p, int random_offset); 109 110 /* 111 * Other process lists 112 */ 113 static struct lwkt_token proc_tokens[ALLPROC_HSIZE]; 114 static struct proclist allprocs[ALLPROC_HSIZE]; /* locked by proc_tokens */ 115 static struct pgrplist allpgrps[ALLPROC_HSIZE]; /* locked by proc_tokens */ 116 static struct sesslist allsessn[ALLPROC_HSIZE]; /* locked by proc_tokens */ 117 118 /* 119 * We try our best to avoid recycling a PID too quickly. We do this by 120 * storing (uint8_t)time_second in the related pid domain on-reap and then 121 * using that to skip-over the domain on-allocate. 122 * 123 * This array has to be fairly large to support a high fork/exec rate. 124 * We want ~100,000 entries or so to support a 10-second reuse latency 125 * at 10,000 execs/second, worst case. Best-case multiply by PIDDOM_COUNT 126 * (approximately 100,000 execs/second). 127 */ 128 static uint8_t pid_doms[PIDSEL_DOMAINS]; /* ~100,000 entries */ 129 130 /* 131 * Random component to nextpid generation. We mix in a random factor to make 132 * it a little harder to predict. We sanity check the modulus value to avoid 133 * doing it in critical paths. Don't let it be too small or we pointlessly 134 * waste randomness entropy, and don't let it be impossibly large. Using a 135 * modulus that is too big causes a LOT more process table scans and slows 136 * down fork processing as the pidchecked caching is defeated. 137 */ 138 static int randompid = 0; 139 140 /* 141 * No requirements. 142 */ 143 static int 144 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS) 145 { 146 int error, pid; 147 148 pid = randompid; 149 error = sysctl_handle_int(oidp, &pid, 0, req); 150 if (error || !req->newptr) 151 return (error); 152 if (pid < 0 || pid > PID_MAX - 100) /* out of range */ 153 pid = PID_MAX - 100; 154 else if (pid < 2) /* NOP */ 155 pid = 0; 156 else if (pid < 100) /* Make it reasonable */ 157 pid = 100; 158 randompid = pid; 159 return (error); 160 } 161 162 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW, 163 0, 0, sysctl_kern_randompid, "I", "Random PID modulus"); 164 165 /* 166 * Initialize global process hashing structures. 167 * 168 * These functions are ONLY called from the low level boot code and do 169 * not lock their operations. 170 */ 171 void 172 procinit(void) 173 { 174 u_long i; 175 176 /* 177 * Avoid unnecessary stalls due to pid_doms[] values all being 178 * the same. Make sure that the allocation of pid 1 and pid 2 179 * succeeds. 180 */ 181 for (i = 0; i < PIDSEL_DOMAINS; ++i) 182 pid_doms[i] = (int8_t)i - (int8_t)(PIDDOM_DELAY + 1); 183 184 /* 185 * Other misc init. 186 */ 187 for (i = 0; i < ALLPROC_HSIZE; ++i) { 188 LIST_INIT(&allprocs[i]); 189 LIST_INIT(&allsessn[i]); 190 LIST_INIT(&allpgrps[i]); 191 lwkt_token_init(&proc_tokens[i], "allproc"); 192 } 193 lwkt_init(); 194 uihashinit(); 195 } 196 197 void 198 procinsertinit(struct proc *p) 199 { 200 LIST_INSERT_HEAD(&allprocs[ALLPROC_HASH(p->p_pid)], p, p_list); 201 } 202 203 void 204 pgrpinsertinit(struct pgrp *pg) 205 { 206 LIST_INSERT_HEAD(&allpgrps[ALLPROC_HASH(pg->pg_id)], pg, pg_list); 207 } 208 209 void 210 sessinsertinit(struct session *sess) 211 { 212 LIST_INSERT_HEAD(&allsessn[ALLPROC_HASH(sess->s_sid)], sess, s_list); 213 } 214 215 /* 216 * Process hold/release support functions. Called via the PHOLD(), 217 * PRELE(), and PSTALL() macros. 218 * 219 * p->p_lock is a simple hold count with a waiting interlock. No wakeup() 220 * is issued unless someone is actually waiting for the process. 221 * 222 * Most holds are short-term, allowing a process scan or other similar 223 * operation to access a proc structure without it getting ripped out from 224 * under us. procfs and process-list sysctl ops also use the hold function 225 * interlocked with various p_flags to keep the vmspace intact when reading 226 * or writing a user process's address space. 227 * 228 * There are two situations where a hold count can be longer. Exiting lwps 229 * hold the process until the lwp is reaped, and the parent will hold the 230 * child during vfork()/exec() sequences while the child is marked P_PPWAIT. 231 * 232 * The kernel waits for the hold count to drop to 0 (or 1 in some cases) at 233 * various critical points in the fork/exec and exit paths before proceeding. 234 */ 235 #define PLOCK_ZOMB 0x20000000 236 #define PLOCK_WAITING 0x40000000 237 #define PLOCK_MASK 0x1FFFFFFF 238 239 void 240 pstall(struct proc *p, const char *wmesg, int count) 241 { 242 int o; 243 int n; 244 245 for (;;) { 246 o = p->p_lock; 247 cpu_ccfence(); 248 if ((o & PLOCK_MASK) <= count) 249 break; 250 n = o | PLOCK_WAITING; 251 tsleep_interlock(&p->p_lock, 0); 252 253 /* 254 * If someone is trying to single-step the process during 255 * an exec or an exit they can deadlock us because procfs 256 * sleeps with the process held. 257 */ 258 if (p->p_stops) { 259 if (p->p_flags & P_INEXEC) { 260 wakeup(&p->p_stype); 261 } else if (p->p_flags & P_POSTEXIT) { 262 spin_lock(&p->p_spin); 263 p->p_stops = 0; 264 p->p_step = 0; 265 spin_unlock(&p->p_spin); 266 wakeup(&p->p_stype); 267 } 268 } 269 270 if (atomic_cmpset_int(&p->p_lock, o, n)) { 271 tsleep(&p->p_lock, PINTERLOCKED, wmesg, 0); 272 } 273 } 274 } 275 276 void 277 phold(struct proc *p) 278 { 279 atomic_add_int(&p->p_lock, 1); 280 } 281 282 /* 283 * WARNING! On last release (p) can become instantly invalid due to 284 * MP races. 285 */ 286 void 287 prele(struct proc *p) 288 { 289 int o; 290 int n; 291 292 /* 293 * Fast path 294 */ 295 if (atomic_cmpset_int(&p->p_lock, 1, 0)) 296 return; 297 298 /* 299 * Slow path 300 */ 301 for (;;) { 302 o = p->p_lock; 303 KKASSERT((o & PLOCK_MASK) > 0); 304 cpu_ccfence(); 305 n = (o - 1) & ~PLOCK_WAITING; 306 if (atomic_cmpset_int(&p->p_lock, o, n)) { 307 if (o & PLOCK_WAITING) 308 wakeup(&p->p_lock); 309 break; 310 } 311 } 312 } 313 314 /* 315 * Hold and flag serialized for zombie reaping purposes. 316 * 317 * This function will fail if it has to block, returning non-zero with 318 * neither the flag set or the hold count bumped. Note that we must block 319 * without holding a ref, meaning that the caller must ensure that (p) 320 * remains valid through some other interlock (typically on its parent 321 * process's p_token). 322 * 323 * Zero is returned on success. The hold count will be incremented and 324 * the serialization flag acquired. Note that serialization is only against 325 * other pholdzomb() calls, not against phold() calls. 326 */ 327 int 328 pholdzomb(struct proc *p) 329 { 330 int o; 331 int n; 332 333 /* 334 * Fast path 335 */ 336 if (atomic_cmpset_int(&p->p_lock, 0, PLOCK_ZOMB | 1)) 337 return(0); 338 339 /* 340 * Slow path 341 */ 342 for (;;) { 343 o = p->p_lock; 344 cpu_ccfence(); 345 if ((o & PLOCK_ZOMB) == 0) { 346 n = (o + 1) | PLOCK_ZOMB; 347 if (atomic_cmpset_int(&p->p_lock, o, n)) 348 return(0); 349 } else { 350 KKASSERT((o & PLOCK_MASK) > 0); 351 n = o | PLOCK_WAITING; 352 tsleep_interlock(&p->p_lock, 0); 353 if (atomic_cmpset_int(&p->p_lock, o, n)) { 354 tsleep(&p->p_lock, PINTERLOCKED, "phldz", 0); 355 /* (p) can be ripped out at this point */ 356 return(1); 357 } 358 } 359 } 360 } 361 362 /* 363 * Release PLOCK_ZOMB and the hold count, waking up any waiters. 364 * 365 * WARNING! On last release (p) can become instantly invalid due to 366 * MP races. 367 */ 368 void 369 prelezomb(struct proc *p) 370 { 371 int o; 372 int n; 373 374 /* 375 * Fast path 376 */ 377 if (atomic_cmpset_int(&p->p_lock, PLOCK_ZOMB | 1, 0)) 378 return; 379 380 /* 381 * Slow path 382 */ 383 KKASSERT(p->p_lock & PLOCK_ZOMB); 384 for (;;) { 385 o = p->p_lock; 386 KKASSERT((o & PLOCK_MASK) > 0); 387 cpu_ccfence(); 388 n = (o - 1) & ~(PLOCK_ZOMB | PLOCK_WAITING); 389 if (atomic_cmpset_int(&p->p_lock, o, n)) { 390 if (o & PLOCK_WAITING) 391 wakeup(&p->p_lock); 392 break; 393 } 394 } 395 } 396 397 /* 398 * Is p an inferior of the current process? 399 * 400 * No requirements. 401 */ 402 int 403 inferior(struct proc *p) 404 { 405 struct proc *p2; 406 407 PHOLD(p); 408 lwkt_gettoken_shared(&p->p_token); 409 while (p != curproc) { 410 if (p->p_pid == 0) { 411 lwkt_reltoken(&p->p_token); 412 return (0); 413 } 414 p2 = p->p_pptr; 415 PHOLD(p2); 416 lwkt_reltoken(&p->p_token); 417 PRELE(p); 418 lwkt_gettoken_shared(&p2->p_token); 419 p = p2; 420 } 421 lwkt_reltoken(&p->p_token); 422 PRELE(p); 423 424 return (1); 425 } 426 427 /* 428 * Locate a process by number. The returned process will be referenced and 429 * must be released with PRELE(). 430 * 431 * No requirements. 432 */ 433 struct proc * 434 pfind(pid_t pid) 435 { 436 struct proc *p = curproc; 437 int n; 438 439 /* 440 * Shortcut the current process 441 */ 442 if (p && p->p_pid == pid) { 443 PHOLD(p); 444 return (p); 445 } 446 447 /* 448 * Otherwise find it in the hash table. 449 */ 450 n = ALLPROC_HASH(pid); 451 452 lwkt_gettoken_shared(&proc_tokens[n]); 453 LIST_FOREACH(p, &allprocs[n], p_list) { 454 if (p->p_stat == SZOMB) 455 continue; 456 if (p->p_pid == pid) { 457 PHOLD(p); 458 lwkt_reltoken(&proc_tokens[n]); 459 return (p); 460 } 461 } 462 lwkt_reltoken(&proc_tokens[n]); 463 464 return (NULL); 465 } 466 467 /* 468 * Locate a process by number. The returned process is NOT referenced. 469 * The result will not be stable and is typically only used to validate 470 * against a process that the caller has in-hand. 471 * 472 * No requirements. 473 */ 474 struct proc * 475 pfindn(pid_t pid) 476 { 477 struct proc *p = curproc; 478 int n; 479 480 /* 481 * Shortcut the current process 482 */ 483 if (p && p->p_pid == pid) 484 return (p); 485 486 /* 487 * Otherwise find it in the hash table. 488 */ 489 n = ALLPROC_HASH(pid); 490 491 lwkt_gettoken_shared(&proc_tokens[n]); 492 LIST_FOREACH(p, &allprocs[n], p_list) { 493 if (p->p_stat == SZOMB) 494 continue; 495 if (p->p_pid == pid) { 496 lwkt_reltoken(&proc_tokens[n]); 497 return (p); 498 } 499 } 500 lwkt_reltoken(&proc_tokens[n]); 501 502 return (NULL); 503 } 504 505 /* 506 * Locate a process on the zombie list. Return a process or NULL. 507 * The returned process will be referenced and the caller must release 508 * it with PRELE(). 509 * 510 * No other requirements. 511 */ 512 struct proc * 513 zpfind(pid_t pid) 514 { 515 struct proc *p = curproc; 516 int n; 517 518 /* 519 * Shortcut the current process 520 */ 521 if (p && p->p_pid == pid) { 522 PHOLD(p); 523 return (p); 524 } 525 526 /* 527 * Otherwise find it in the hash table. 528 */ 529 n = ALLPROC_HASH(pid); 530 531 lwkt_gettoken_shared(&proc_tokens[n]); 532 LIST_FOREACH(p, &allprocs[n], p_list) { 533 if (p->p_stat != SZOMB) 534 continue; 535 if (p->p_pid == pid) { 536 PHOLD(p); 537 lwkt_reltoken(&proc_tokens[n]); 538 return (p); 539 } 540 } 541 lwkt_reltoken(&proc_tokens[n]); 542 543 return (NULL); 544 } 545 546 547 void 548 pgref(struct pgrp *pgrp) 549 { 550 refcount_acquire(&pgrp->pg_refs); 551 } 552 553 void 554 pgrel(struct pgrp *pgrp) 555 { 556 int count; 557 int n; 558 559 n = PGRP_HASH(pgrp->pg_id); 560 for (;;) { 561 count = pgrp->pg_refs; 562 cpu_ccfence(); 563 KKASSERT(count > 0); 564 if (count == 1) { 565 lwkt_gettoken(&proc_tokens[n]); 566 if (atomic_cmpset_int(&pgrp->pg_refs, 1, 0)) 567 break; 568 lwkt_reltoken(&proc_tokens[n]); 569 /* retry */ 570 } else { 571 if (atomic_cmpset_int(&pgrp->pg_refs, count, count - 1)) 572 return; 573 /* retry */ 574 } 575 } 576 577 /* 578 * Successful 1->0 transition, pghash_spin is held. 579 */ 580 LIST_REMOVE(pgrp, pg_list); 581 pid_doms[pgrp->pg_id % PIDSEL_DOMAINS] = (uint8_t)time_second; 582 583 /* 584 * Reset any sigio structures pointing to us as a result of 585 * F_SETOWN with our pgid. 586 */ 587 funsetownlst(&pgrp->pg_sigiolst); 588 589 if (pgrp->pg_session->s_ttyp != NULL && 590 pgrp->pg_session->s_ttyp->t_pgrp == pgrp) { 591 pgrp->pg_session->s_ttyp->t_pgrp = NULL; 592 } 593 lwkt_reltoken(&proc_tokens[n]); 594 595 sess_rele(pgrp->pg_session); 596 kfree(pgrp, M_PGRP); 597 } 598 599 /* 600 * Locate a process group by number. The returned process group will be 601 * referenced w/pgref() and must be released with pgrel() (or assigned 602 * somewhere if you wish to keep the reference). 603 * 604 * No requirements. 605 */ 606 struct pgrp * 607 pgfind(pid_t pgid) 608 { 609 struct pgrp *pgrp; 610 int n; 611 612 n = PGRP_HASH(pgid); 613 lwkt_gettoken_shared(&proc_tokens[n]); 614 615 LIST_FOREACH(pgrp, &allpgrps[n], pg_list) { 616 if (pgrp->pg_id == pgid) { 617 refcount_acquire(&pgrp->pg_refs); 618 lwkt_reltoken(&proc_tokens[n]); 619 return (pgrp); 620 } 621 } 622 lwkt_reltoken(&proc_tokens[n]); 623 return (NULL); 624 } 625 626 /* 627 * Move p to a new or existing process group (and session) 628 * 629 * No requirements. 630 */ 631 int 632 enterpgrp(struct proc *p, pid_t pgid, int mksess) 633 { 634 struct pgrp *pgrp; 635 struct pgrp *opgrp; 636 int error; 637 638 pgrp = pgfind(pgid); 639 640 KASSERT(pgrp == NULL || !mksess, 641 ("enterpgrp: setsid into non-empty pgrp")); 642 KASSERT(!SESS_LEADER(p), 643 ("enterpgrp: session leader attempted setpgrp")); 644 645 if (pgrp == NULL) { 646 pid_t savepid = p->p_pid; 647 struct proc *np; 648 int n; 649 650 /* 651 * new process group 652 */ 653 KASSERT(p->p_pid == pgid, 654 ("enterpgrp: new pgrp and pid != pgid")); 655 pgrp = kmalloc(sizeof(struct pgrp), M_PGRP, M_WAITOK | M_ZERO); 656 pgrp->pg_id = pgid; 657 LIST_INIT(&pgrp->pg_members); 658 pgrp->pg_jobc = 0; 659 SLIST_INIT(&pgrp->pg_sigiolst); 660 lwkt_token_init(&pgrp->pg_token, "pgrp_token"); 661 refcount_init(&pgrp->pg_refs, 1); 662 lockinit(&pgrp->pg_lock, "pgwt", 0, 0); 663 664 n = PGRP_HASH(pgid); 665 666 if ((np = pfindn(savepid)) == NULL || np != p) { 667 lwkt_reltoken(&proc_tokens[n]); 668 error = ESRCH; 669 kfree(pgrp, M_PGRP); 670 goto fatal; 671 } 672 673 lwkt_gettoken(&proc_tokens[n]); 674 if (mksess) { 675 struct session *sess; 676 677 /* 678 * new session 679 */ 680 sess = kmalloc(sizeof(struct session), M_SESSION, 681 M_WAITOK | M_ZERO); 682 lwkt_gettoken(&p->p_token); 683 sess->s_leader = p; 684 sess->s_sid = p->p_pid; 685 sess->s_count = 1; 686 sess->s_ttyvp = NULL; 687 sess->s_ttyp = NULL; 688 bcopy(p->p_session->s_login, sess->s_login, 689 sizeof(sess->s_login)); 690 pgrp->pg_session = sess; 691 KASSERT(p == curproc, 692 ("enterpgrp: mksession and p != curproc")); 693 p->p_flags &= ~P_CONTROLT; 694 LIST_INSERT_HEAD(&allsessn[n], sess, s_list); 695 lwkt_reltoken(&p->p_token); 696 } else { 697 lwkt_gettoken(&p->p_token); 698 pgrp->pg_session = p->p_session; 699 sess_hold(pgrp->pg_session); 700 lwkt_reltoken(&p->p_token); 701 } 702 LIST_INSERT_HEAD(&allpgrps[n], pgrp, pg_list); 703 704 lwkt_reltoken(&proc_tokens[n]); 705 } else if (pgrp == p->p_pgrp) { 706 pgrel(pgrp); 707 goto done; 708 } /* else pgfind() referenced the pgrp */ 709 710 lwkt_gettoken(&pgrp->pg_token); 711 lwkt_gettoken(&p->p_token); 712 713 /* 714 * Replace p->p_pgrp, handling any races that occur. 715 */ 716 while ((opgrp = p->p_pgrp) != NULL) { 717 pgref(opgrp); 718 lwkt_gettoken(&opgrp->pg_token); 719 if (opgrp != p->p_pgrp) { 720 lwkt_reltoken(&opgrp->pg_token); 721 pgrel(opgrp); 722 continue; 723 } 724 LIST_REMOVE(p, p_pglist); 725 break; 726 } 727 p->p_pgrp = pgrp; 728 LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist); 729 730 /* 731 * Adjust eligibility of affected pgrps to participate in job control. 732 * Increment eligibility counts before decrementing, otherwise we 733 * could reach 0 spuriously during the first call. 734 */ 735 fixjobc(p, pgrp, 1); 736 if (opgrp) { 737 fixjobc(p, opgrp, 0); 738 lwkt_reltoken(&opgrp->pg_token); 739 pgrel(opgrp); /* manual pgref */ 740 pgrel(opgrp); /* p->p_pgrp ref */ 741 } 742 lwkt_reltoken(&p->p_token); 743 lwkt_reltoken(&pgrp->pg_token); 744 done: 745 error = 0; 746 fatal: 747 return (error); 748 } 749 750 /* 751 * Remove process from process group 752 * 753 * No requirements. 754 */ 755 int 756 leavepgrp(struct proc *p) 757 { 758 struct pgrp *pg = p->p_pgrp; 759 760 lwkt_gettoken(&p->p_token); 761 while ((pg = p->p_pgrp) != NULL) { 762 pgref(pg); 763 lwkt_gettoken(&pg->pg_token); 764 if (p->p_pgrp != pg) { 765 lwkt_reltoken(&pg->pg_token); 766 pgrel(pg); 767 continue; 768 } 769 p->p_pgrp = NULL; 770 LIST_REMOVE(p, p_pglist); 771 lwkt_reltoken(&pg->pg_token); 772 pgrel(pg); /* manual pgref */ 773 pgrel(pg); /* p->p_pgrp ref */ 774 break; 775 } 776 lwkt_reltoken(&p->p_token); 777 778 return (0); 779 } 780 781 /* 782 * Adjust the ref count on a session structure. When the ref count falls to 783 * zero the tty is disassociated from the session and the session structure 784 * is freed. Note that tty assocation is not itself ref-counted. 785 * 786 * No requirements. 787 */ 788 void 789 sess_hold(struct session *sp) 790 { 791 atomic_add_int(&sp->s_count, 1); 792 } 793 794 /* 795 * No requirements. 796 */ 797 void 798 sess_rele(struct session *sess) 799 { 800 struct tty *tp; 801 int count; 802 int n; 803 804 n = SESS_HASH(sess->s_sid); 805 for (;;) { 806 count = sess->s_count; 807 cpu_ccfence(); 808 KKASSERT(count > 0); 809 if (count == 1) { 810 lwkt_gettoken(&tty_token); 811 lwkt_gettoken(&proc_tokens[n]); 812 if (atomic_cmpset_int(&sess->s_count, 1, 0)) 813 break; 814 lwkt_reltoken(&proc_tokens[n]); 815 lwkt_reltoken(&tty_token); 816 /* retry */ 817 } else { 818 if (atomic_cmpset_int(&sess->s_count, count, count - 1)) 819 return; 820 /* retry */ 821 } 822 } 823 824 /* 825 * Successful 1->0 transition and tty_token is held. 826 */ 827 LIST_REMOVE(sess, s_list); 828 pid_doms[sess->s_sid % PIDSEL_DOMAINS] = (uint8_t)time_second; 829 830 if (sess->s_ttyp && sess->s_ttyp->t_session) { 831 #ifdef TTY_DO_FULL_CLOSE 832 /* FULL CLOSE, see ttyclearsession() */ 833 KKASSERT(sess->s_ttyp->t_session == sess); 834 sess->s_ttyp->t_session = NULL; 835 #else 836 /* HALF CLOSE, see ttyclearsession() */ 837 if (sess->s_ttyp->t_session == sess) 838 sess->s_ttyp->t_session = NULL; 839 #endif 840 } 841 if ((tp = sess->s_ttyp) != NULL) { 842 sess->s_ttyp = NULL; 843 ttyunhold(tp); 844 } 845 lwkt_reltoken(&proc_tokens[n]); 846 lwkt_reltoken(&tty_token); 847 848 kfree(sess, M_SESSION); 849 } 850 851 /* 852 * Adjust pgrp jobc counters when specified process changes process group. 853 * We count the number of processes in each process group that "qualify" 854 * the group for terminal job control (those with a parent in a different 855 * process group of the same session). If that count reaches zero, the 856 * process group becomes orphaned. Check both the specified process' 857 * process group and that of its children. 858 * entering == 0 => p is leaving specified group. 859 * entering == 1 => p is entering specified group. 860 * 861 * No requirements. 862 */ 863 void 864 fixjobc(struct proc *p, struct pgrp *pgrp, int entering) 865 { 866 struct pgrp *hispgrp; 867 struct session *mysession; 868 struct proc *np; 869 870 /* 871 * Check p's parent to see whether p qualifies its own process 872 * group; if so, adjust count for p's process group. 873 */ 874 lwkt_gettoken(&p->p_token); /* p_children scan */ 875 lwkt_gettoken(&pgrp->pg_token); 876 877 mysession = pgrp->pg_session; 878 if ((hispgrp = p->p_pptr->p_pgrp) != pgrp && 879 hispgrp->pg_session == mysession) { 880 if (entering) 881 pgrp->pg_jobc++; 882 else if (--pgrp->pg_jobc == 0) 883 orphanpg(pgrp); 884 } 885 886 /* 887 * Check this process' children to see whether they qualify 888 * their process groups; if so, adjust counts for children's 889 * process groups. 890 */ 891 LIST_FOREACH(np, &p->p_children, p_sibling) { 892 PHOLD(np); 893 lwkt_gettoken(&np->p_token); 894 if ((hispgrp = np->p_pgrp) != pgrp && 895 hispgrp->pg_session == mysession && 896 np->p_stat != SZOMB) { 897 pgref(hispgrp); 898 lwkt_gettoken(&hispgrp->pg_token); 899 if (entering) 900 hispgrp->pg_jobc++; 901 else if (--hispgrp->pg_jobc == 0) 902 orphanpg(hispgrp); 903 lwkt_reltoken(&hispgrp->pg_token); 904 pgrel(hispgrp); 905 } 906 lwkt_reltoken(&np->p_token); 907 PRELE(np); 908 } 909 KKASSERT(pgrp->pg_refs > 0); 910 lwkt_reltoken(&pgrp->pg_token); 911 lwkt_reltoken(&p->p_token); 912 } 913 914 /* 915 * A process group has become orphaned; 916 * if there are any stopped processes in the group, 917 * hang-up all process in that group. 918 * 919 * The caller must hold pg_token. 920 */ 921 static void 922 orphanpg(struct pgrp *pg) 923 { 924 struct proc *p; 925 926 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 927 if (p->p_stat == SSTOP) { 928 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 929 ksignal(p, SIGHUP); 930 ksignal(p, SIGCONT); 931 } 932 return; 933 } 934 } 935 } 936 937 /* 938 * Add a new process to the allproc list and the PID hash. This 939 * also assigns a pid to the new process. 940 * 941 * No requirements. 942 */ 943 void 944 proc_add_allproc(struct proc *p) 945 { 946 int random_offset; 947 948 if ((random_offset = randompid) != 0) { 949 read_random(&random_offset, sizeof(random_offset)); 950 random_offset = (random_offset & 0x7FFFFFFF) % randompid; 951 } 952 proc_makepid(p, random_offset); 953 } 954 955 /* 956 * Calculate a new process pid. This function is integrated into 957 * proc_add_allproc() to guarentee that the new pid is not reused before 958 * the new process can be added to the allproc list. 959 * 960 * p_pid is assigned and the process is added to the allproc hash table 961 * 962 * WARNING! We need to allocate PIDs sequentially during early boot. 963 * In particular, init needs to have a pid of 1. 964 */ 965 static 966 void 967 proc_makepid(struct proc *p, int random_offset) 968 { 969 static pid_t nextpid = 1; /* heuristic, allowed to race */ 970 struct pgrp *pg; 971 struct proc *ps; 972 struct session *sess; 973 pid_t base; 974 int8_t delta8; 975 int retries; 976 int n; 977 978 /* 979 * Select the next pid base candidate. 980 * 981 * Check cyclement, do not allow a pid < 100. 982 */ 983 retries = 0; 984 retry: 985 base = atomic_fetchadd_int(&nextpid, 1) + random_offset; 986 if (base <= 0 || base >= PID_MAX) { 987 base = base % PID_MAX; 988 if (base < 0) 989 base = 100; 990 if (base < 100) 991 base += 100; 992 nextpid = base; /* reset (SMP race ok) */ 993 } 994 995 /* 996 * Do not allow a base pid to be selected from a domain that has 997 * recently seen a pid/pgid/sessid reap. Sleep a little if we looped 998 * through all available domains. 999 * 1000 * WARNING: We want the early pids to be allocated linearly, 1001 * particularly pid 1 and pid 2. 1002 */ 1003 if (++retries >= PIDSEL_DOMAINS) 1004 tsleep(&nextpid, 0, "makepid", 1); 1005 if (base >= 100) { 1006 delta8 = (int8_t)time_second - 1007 (int8_t)pid_doms[base % PIDSEL_DOMAINS]; 1008 if (delta8 >= 0 && delta8 <= PIDDOM_DELAY) { 1009 ++pid_domain_skips; 1010 goto retry; 1011 } 1012 } 1013 1014 /* 1015 * Calculate a hash index and find an unused process id within 1016 * the table, looping if we cannot find one. 1017 * 1018 * The inner loop increments by ALLPROC_HSIZE which keeps the 1019 * PID at the same pid_doms[] index as well as the same hash index. 1020 */ 1021 n = ALLPROC_HASH(base); 1022 lwkt_gettoken(&proc_tokens[n]); 1023 1024 restart1: 1025 LIST_FOREACH(ps, &allprocs[n], p_list) { 1026 if (ps->p_pid == base) { 1027 base += ALLPROC_HSIZE; 1028 if (base >= PID_MAX) { 1029 lwkt_reltoken(&proc_tokens[n]); 1030 goto retry; 1031 } 1032 ++pid_inner_skips; 1033 goto restart1; 1034 } 1035 } 1036 LIST_FOREACH(pg, &allpgrps[n], pg_list) { 1037 if (pg->pg_id == base) { 1038 base += ALLPROC_HSIZE; 1039 if (base >= PID_MAX) { 1040 lwkt_reltoken(&proc_tokens[n]); 1041 goto retry; 1042 } 1043 ++pid_inner_skips; 1044 goto restart1; 1045 } 1046 } 1047 LIST_FOREACH(sess, &allsessn[n], s_list) { 1048 if (sess->s_sid == base) { 1049 base += ALLPROC_HSIZE; 1050 if (base >= PID_MAX) { 1051 lwkt_reltoken(&proc_tokens[n]); 1052 goto retry; 1053 } 1054 ++pid_inner_skips; 1055 goto restart1; 1056 } 1057 } 1058 1059 /* 1060 * Assign the pid and insert the process. 1061 */ 1062 p->p_pid = base; 1063 LIST_INSERT_HEAD(&allprocs[n], p, p_list); 1064 lwkt_reltoken(&proc_tokens[n]); 1065 } 1066 1067 /* 1068 * Called from exit1 to place the process into a zombie state. 1069 * The process is removed from the pid hash and p_stat is set 1070 * to SZOMB. Normal pfind[n]() calls will not find it any more. 1071 * 1072 * Caller must hold p->p_token. We are required to wait until p_lock 1073 * becomes zero before we can manipulate the list, allowing allproc 1074 * scans to guarantee consistency during a list scan. 1075 */ 1076 void 1077 proc_move_allproc_zombie(struct proc *p) 1078 { 1079 int n; 1080 1081 n = ALLPROC_HASH(p->p_pid); 1082 PSTALL(p, "reap1", 0); 1083 lwkt_gettoken(&proc_tokens[n]); 1084 1085 PSTALL(p, "reap1a", 0); 1086 p->p_stat = SZOMB; 1087 1088 lwkt_reltoken(&proc_tokens[n]); 1089 dsched_exit_proc(p); 1090 } 1091 1092 /* 1093 * This routine is called from kern_wait() and will remove the process 1094 * from the zombie list and the sibling list. This routine will block 1095 * if someone has a lock on the proces (p_lock). 1096 * 1097 * Caller must hold p->p_token. We are required to wait until p_lock 1098 * becomes zero before we can manipulate the list, allowing allproc 1099 * scans to guarantee consistency during a list scan. 1100 */ 1101 void 1102 proc_remove_zombie(struct proc *p) 1103 { 1104 int n; 1105 1106 n = ALLPROC_HASH(p->p_pid); 1107 1108 PSTALL(p, "reap2", 0); 1109 lwkt_gettoken(&proc_tokens[n]); 1110 PSTALL(p, "reap2a", 0); 1111 LIST_REMOVE(p, p_list); /* from remove master list */ 1112 LIST_REMOVE(p, p_sibling); /* and from sibling list */ 1113 p->p_pptr = NULL; 1114 pid_doms[p->p_pid % PIDSEL_DOMAINS] = (uint8_t)time_second; 1115 lwkt_reltoken(&proc_tokens[n]); 1116 } 1117 1118 /* 1119 * Handle various requirements prior to returning to usermode. Called from 1120 * platform trap and system call code. 1121 */ 1122 void 1123 lwpuserret(struct lwp *lp) 1124 { 1125 struct proc *p = lp->lwp_proc; 1126 1127 if (lp->lwp_mpflags & LWP_MP_VNLRU) { 1128 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_VNLRU); 1129 allocvnode_gc(); 1130 } 1131 if (lp->lwp_mpflags & LWP_MP_WEXIT) { 1132 lwkt_gettoken(&p->p_token); 1133 lwp_exit(0, NULL); 1134 lwkt_reltoken(&p->p_token); /* NOT REACHED */ 1135 } 1136 } 1137 1138 /* 1139 * Kernel threads run from user processes can also accumulate deferred 1140 * actions which need to be acted upon. Callers include: 1141 * 1142 * nfsd - Can allocate lots of vnodes 1143 */ 1144 void 1145 lwpkthreaddeferred(void) 1146 { 1147 struct lwp *lp = curthread->td_lwp; 1148 1149 if (lp) { 1150 if (lp->lwp_mpflags & LWP_MP_VNLRU) { 1151 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_VNLRU); 1152 allocvnode_gc(); 1153 } 1154 } 1155 } 1156 1157 /* 1158 * Scan all processes on the allproc list. The process is automatically 1159 * held for the callback. A return value of -1 terminates the loop. 1160 * Zombie procs are skipped. 1161 * 1162 * The callback is made with the process held and proc_token held. 1163 * 1164 * We limit the scan to the number of processes as-of the start of 1165 * the scan so as not to get caught up in an endless loop if new processes 1166 * are created more quickly than we can scan the old ones. Add a little 1167 * slop to try to catch edge cases since nprocs can race. 1168 * 1169 * No requirements. 1170 */ 1171 void 1172 allproc_scan(int (*callback)(struct proc *, void *), void *data) 1173 { 1174 int limit = nprocs + ncpus; 1175 struct proc *p; 1176 int r; 1177 int n; 1178 1179 /* 1180 * proc_tokens[n] protects the allproc list and PHOLD() prevents the 1181 * process from being removed from the allproc list or the zombproc 1182 * list. 1183 */ 1184 for (n = 0; n < ALLPROC_HSIZE; ++n) { 1185 if (LIST_FIRST(&allprocs[n]) == NULL) 1186 continue; 1187 lwkt_gettoken(&proc_tokens[n]); 1188 LIST_FOREACH(p, &allprocs[n], p_list) { 1189 if (p->p_stat == SZOMB) 1190 continue; 1191 PHOLD(p); 1192 r = callback(p, data); 1193 PRELE(p); 1194 if (r < 0) 1195 break; 1196 if (--limit < 0) 1197 break; 1198 } 1199 lwkt_reltoken(&proc_tokens[n]); 1200 1201 /* 1202 * Check if asked to stop early 1203 */ 1204 if (p) 1205 break; 1206 } 1207 } 1208 1209 /* 1210 * Scan all lwps of processes on the allproc list. The lwp is automatically 1211 * held for the callback. A return value of -1 terminates the loop. 1212 * 1213 * The callback is made with the proces and lwp both held, and proc_token held. 1214 * 1215 * No requirements. 1216 */ 1217 void 1218 alllwp_scan(int (*callback)(struct lwp *, void *), void *data) 1219 { 1220 struct proc *p; 1221 struct lwp *lp; 1222 int r = 0; 1223 int n; 1224 1225 for (n = 0; n < ALLPROC_HSIZE; ++n) { 1226 if (LIST_FIRST(&allprocs[n]) == NULL) 1227 continue; 1228 lwkt_gettoken(&proc_tokens[n]); 1229 LIST_FOREACH(p, &allprocs[n], p_list) { 1230 if (p->p_stat == SZOMB) 1231 continue; 1232 PHOLD(p); 1233 lwkt_gettoken(&p->p_token); 1234 FOREACH_LWP_IN_PROC(lp, p) { 1235 LWPHOLD(lp); 1236 r = callback(lp, data); 1237 LWPRELE(lp); 1238 } 1239 lwkt_reltoken(&p->p_token); 1240 PRELE(p); 1241 if (r < 0) 1242 break; 1243 } 1244 lwkt_reltoken(&proc_tokens[n]); 1245 1246 /* 1247 * Asked to exit early 1248 */ 1249 if (p) 1250 break; 1251 } 1252 } 1253 1254 /* 1255 * Scan all processes on the zombproc list. The process is automatically 1256 * held for the callback. A return value of -1 terminates the loop. 1257 * 1258 * No requirements. 1259 * The callback is made with the proces held and proc_token held. 1260 */ 1261 void 1262 zombproc_scan(int (*callback)(struct proc *, void *), void *data) 1263 { 1264 struct proc *p; 1265 int r; 1266 int n; 1267 1268 /* 1269 * proc_tokens[n] protects the allproc list and PHOLD() prevents the 1270 * process from being removed from the allproc list or the zombproc 1271 * list. 1272 */ 1273 for (n = 0; n < ALLPROC_HSIZE; ++n) { 1274 if (LIST_FIRST(&allprocs[n]) == NULL) 1275 continue; 1276 lwkt_gettoken(&proc_tokens[n]); 1277 LIST_FOREACH(p, &allprocs[n], p_list) { 1278 if (p->p_stat != SZOMB) 1279 continue; 1280 PHOLD(p); 1281 r = callback(p, data); 1282 PRELE(p); 1283 if (r < 0) 1284 break; 1285 } 1286 lwkt_reltoken(&proc_tokens[n]); 1287 1288 /* 1289 * Check if asked to stop early 1290 */ 1291 if (p) 1292 break; 1293 } 1294 } 1295 1296 #include "opt_ddb.h" 1297 #ifdef DDB 1298 #include <ddb/ddb.h> 1299 1300 /* 1301 * Debugging only 1302 */ 1303 DB_SHOW_COMMAND(pgrpdump, pgrpdump) 1304 { 1305 struct pgrp *pgrp; 1306 struct proc *p; 1307 int i; 1308 1309 for (i = 0; i < ALLPROC_HSIZE; ++i) { 1310 if (LIST_EMPTY(&allpgrps[i])) 1311 continue; 1312 kprintf("\tindx %d\n", i); 1313 LIST_FOREACH(pgrp, &allpgrps[i], pg_list) { 1314 kprintf("\tpgrp %p, pgid %ld, sess %p, " 1315 "sesscnt %d, mem %p\n", 1316 (void *)pgrp, (long)pgrp->pg_id, 1317 (void *)pgrp->pg_session, 1318 pgrp->pg_session->s_count, 1319 (void *)LIST_FIRST(&pgrp->pg_members)); 1320 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 1321 kprintf("\t\tpid %ld addr %p pgrp %p\n", 1322 (long)p->p_pid, (void *)p, 1323 (void *)p->p_pgrp); 1324 } 1325 } 1326 } 1327 } 1328 #endif /* DDB */ 1329 1330 /* 1331 * The caller must hold proc_token. 1332 */ 1333 static int 1334 sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags) 1335 { 1336 struct kinfo_proc ki; 1337 struct lwp *lp; 1338 int skp = 0, had_output = 0; 1339 int error; 1340 1341 bzero(&ki, sizeof(ki)); 1342 lwkt_gettoken_shared(&p->p_token); 1343 fill_kinfo_proc(p, &ki); 1344 if ((flags & KERN_PROC_FLAG_LWP) == 0) 1345 skp = 1; 1346 error = 0; 1347 FOREACH_LWP_IN_PROC(lp, p) { 1348 LWPHOLD(lp); 1349 fill_kinfo_lwp(lp, &ki.kp_lwp); 1350 had_output = 1; 1351 error = SYSCTL_OUT(req, &ki, sizeof(ki)); 1352 LWPRELE(lp); 1353 if (error) 1354 break; 1355 if (skp) 1356 break; 1357 } 1358 lwkt_reltoken(&p->p_token); 1359 /* We need to output at least the proc, even if there is no lwp. */ 1360 if (had_output == 0) { 1361 error = SYSCTL_OUT(req, &ki, sizeof(ki)); 1362 } 1363 return (error); 1364 } 1365 1366 /* 1367 * The caller must hold proc_token. 1368 */ 1369 static int 1370 sysctl_out_proc_kthread(struct thread *td, struct sysctl_req *req) 1371 { 1372 struct kinfo_proc ki; 1373 int error; 1374 1375 fill_kinfo_proc_kthread(td, &ki); 1376 error = SYSCTL_OUT(req, &ki, sizeof(ki)); 1377 if (error) 1378 return error; 1379 return(0); 1380 } 1381 1382 /* 1383 * No requirements. 1384 */ 1385 static int 1386 sysctl_kern_proc(SYSCTL_HANDLER_ARGS) 1387 { 1388 int *name = (int *)arg1; 1389 int oid = oidp->oid_number; 1390 u_int namelen = arg2; 1391 struct proc *p; 1392 struct thread *td; 1393 struct thread *marker; 1394 int flags = 0; 1395 int error = 0; 1396 int n; 1397 int origcpu; 1398 struct ucred *cr1 = curproc->p_ucred; 1399 1400 flags = oid & KERN_PROC_FLAGMASK; 1401 oid &= ~KERN_PROC_FLAGMASK; 1402 1403 if ((oid == KERN_PROC_ALL && namelen != 0) || 1404 (oid != KERN_PROC_ALL && namelen != 1)) { 1405 return (EINVAL); 1406 } 1407 1408 /* 1409 * proc_token protects the allproc list and PHOLD() prevents the 1410 * process from being removed from the allproc list or the zombproc 1411 * list. 1412 */ 1413 if (oid == KERN_PROC_PID) { 1414 p = pfind((pid_t)name[0]); 1415 if (p) { 1416 if (PRISON_CHECK(cr1, p->p_ucred)) 1417 error = sysctl_out_proc(p, req, flags); 1418 PRELE(p); 1419 } 1420 goto post_threads; 1421 } 1422 p = NULL; 1423 1424 if (!req->oldptr) { 1425 /* overestimate by 5 procs */ 1426 error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5); 1427 if (error) 1428 goto post_threads; 1429 } 1430 1431 for (n = 0; n < ALLPROC_HSIZE; ++n) { 1432 if (LIST_EMPTY(&allprocs[n])) 1433 continue; 1434 lwkt_gettoken_shared(&proc_tokens[n]); 1435 LIST_FOREACH(p, &allprocs[n], p_list) { 1436 /* 1437 * Show a user only their processes. 1438 */ 1439 if ((!ps_showallprocs) && 1440 (p->p_ucred == NULL || p_trespass(cr1, p->p_ucred))) { 1441 continue; 1442 } 1443 /* 1444 * Skip embryonic processes. 1445 */ 1446 if (p->p_stat == SIDL) 1447 continue; 1448 /* 1449 * TODO - make more efficient (see notes below). 1450 * do by session. 1451 */ 1452 switch (oid) { 1453 case KERN_PROC_PGRP: 1454 /* could do this by traversing pgrp */ 1455 if (p->p_pgrp == NULL || 1456 p->p_pgrp->pg_id != (pid_t)name[0]) 1457 continue; 1458 break; 1459 1460 case KERN_PROC_TTY: 1461 if ((p->p_flags & P_CONTROLT) == 0 || 1462 p->p_session == NULL || 1463 p->p_session->s_ttyp == NULL || 1464 dev2udev(p->p_session->s_ttyp->t_dev) != 1465 (udev_t)name[0]) 1466 continue; 1467 break; 1468 1469 case KERN_PROC_UID: 1470 if (p->p_ucred == NULL || 1471 p->p_ucred->cr_uid != (uid_t)name[0]) 1472 continue; 1473 break; 1474 1475 case KERN_PROC_RUID: 1476 if (p->p_ucred == NULL || 1477 p->p_ucred->cr_ruid != (uid_t)name[0]) 1478 continue; 1479 break; 1480 } 1481 1482 if (!PRISON_CHECK(cr1, p->p_ucred)) 1483 continue; 1484 PHOLD(p); 1485 error = sysctl_out_proc(p, req, flags); 1486 PRELE(p); 1487 if (error) { 1488 lwkt_reltoken(&proc_tokens[n]); 1489 goto post_threads; 1490 } 1491 } 1492 lwkt_reltoken(&proc_tokens[n]); 1493 } 1494 1495 /* 1496 * Iterate over all active cpus and scan their thread list. Start 1497 * with the next logical cpu and end with our original cpu. We 1498 * migrate our own thread to each target cpu in order to safely scan 1499 * its thread list. In the last loop we migrate back to our original 1500 * cpu. 1501 */ 1502 origcpu = mycpu->gd_cpuid; 1503 if (!ps_showallthreads || jailed(cr1)) 1504 goto post_threads; 1505 1506 marker = kmalloc(sizeof(struct thread), M_TEMP, M_WAITOK|M_ZERO); 1507 marker->td_flags = TDF_MARKER; 1508 error = 0; 1509 1510 for (n = 1; n <= ncpus; ++n) { 1511 globaldata_t rgd; 1512 int nid; 1513 1514 nid = (origcpu + n) % ncpus; 1515 if (CPUMASK_TESTBIT(smp_active_mask, nid) == 0) 1516 continue; 1517 rgd = globaldata_find(nid); 1518 lwkt_setcpu_self(rgd); 1519 1520 crit_enter(); 1521 TAILQ_INSERT_TAIL(&rgd->gd_tdallq, marker, td_allq); 1522 1523 while ((td = TAILQ_PREV(marker, lwkt_queue, td_allq)) != NULL) { 1524 TAILQ_REMOVE(&rgd->gd_tdallq, marker, td_allq); 1525 TAILQ_INSERT_BEFORE(td, marker, td_allq); 1526 if (td->td_flags & TDF_MARKER) 1527 continue; 1528 if (td->td_proc) 1529 continue; 1530 1531 lwkt_hold(td); 1532 crit_exit(); 1533 1534 switch (oid) { 1535 case KERN_PROC_PGRP: 1536 case KERN_PROC_TTY: 1537 case KERN_PROC_UID: 1538 case KERN_PROC_RUID: 1539 break; 1540 default: 1541 error = sysctl_out_proc_kthread(td, req); 1542 break; 1543 } 1544 lwkt_rele(td); 1545 crit_enter(); 1546 if (error) 1547 break; 1548 } 1549 TAILQ_REMOVE(&rgd->gd_tdallq, marker, td_allq); 1550 crit_exit(); 1551 1552 if (error) 1553 break; 1554 } 1555 1556 /* 1557 * Userland scheduler expects us to return on the same cpu we 1558 * started on. 1559 */ 1560 if (mycpu->gd_cpuid != origcpu) 1561 lwkt_setcpu_self(globaldata_find(origcpu)); 1562 1563 kfree(marker, M_TEMP); 1564 1565 post_threads: 1566 return (error); 1567 } 1568 1569 /* 1570 * This sysctl allows a process to retrieve the argument list or process 1571 * title for another process without groping around in the address space 1572 * of the other process. It also allow a process to set its own "process 1573 * title to a string of its own choice. 1574 * 1575 * No requirements. 1576 */ 1577 static int 1578 sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS) 1579 { 1580 int *name = (int*) arg1; 1581 u_int namelen = arg2; 1582 struct proc *p; 1583 struct pargs *opa; 1584 struct pargs *pa; 1585 int error = 0; 1586 struct ucred *cr1 = curproc->p_ucred; 1587 1588 if (namelen != 1) 1589 return (EINVAL); 1590 1591 p = pfind((pid_t)name[0]); 1592 if (p == NULL) 1593 goto done; 1594 lwkt_gettoken(&p->p_token); 1595 1596 if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred)) 1597 goto done; 1598 1599 if (req->newptr && curproc != p) { 1600 error = EPERM; 1601 goto done; 1602 } 1603 if (req->oldptr && (pa = p->p_args) != NULL) { 1604 refcount_acquire(&pa->ar_ref); 1605 error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length); 1606 if (refcount_release(&pa->ar_ref)) 1607 kfree(pa, M_PARGS); 1608 } 1609 if (req->newptr == NULL) 1610 goto done; 1611 1612 if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit) { 1613 goto done; 1614 } 1615 1616 pa = kmalloc(sizeof(struct pargs) + req->newlen, M_PARGS, M_WAITOK); 1617 refcount_init(&pa->ar_ref, 1); 1618 pa->ar_length = req->newlen; 1619 error = SYSCTL_IN(req, pa->ar_args, req->newlen); 1620 if (error) { 1621 kfree(pa, M_PARGS); 1622 goto done; 1623 } 1624 1625 1626 /* 1627 * Replace p_args with the new pa. p_args may have previously 1628 * been NULL. 1629 */ 1630 opa = p->p_args; 1631 p->p_args = pa; 1632 1633 if (opa) { 1634 KKASSERT(opa->ar_ref > 0); 1635 if (refcount_release(&opa->ar_ref)) { 1636 kfree(opa, M_PARGS); 1637 /* opa = NULL; */ 1638 } 1639 } 1640 done: 1641 if (p) { 1642 lwkt_reltoken(&p->p_token); 1643 PRELE(p); 1644 } 1645 return (error); 1646 } 1647 1648 static int 1649 sysctl_kern_proc_cwd(SYSCTL_HANDLER_ARGS) 1650 { 1651 int *name = (int*) arg1; 1652 u_int namelen = arg2; 1653 struct proc *p; 1654 int error = 0; 1655 char *fullpath, *freepath; 1656 struct ucred *cr1 = curproc->p_ucred; 1657 1658 if (namelen != 1) 1659 return (EINVAL); 1660 1661 p = pfind((pid_t)name[0]); 1662 if (p == NULL) 1663 goto done; 1664 lwkt_gettoken_shared(&p->p_token); 1665 1666 /* 1667 * If we are not allowed to see other args, we certainly shouldn't 1668 * get the cwd either. Also check the usual trespassing. 1669 */ 1670 if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred)) 1671 goto done; 1672 1673 if (req->oldptr && p->p_fd != NULL && p->p_fd->fd_ncdir.ncp) { 1674 struct nchandle nch; 1675 1676 cache_copy(&p->p_fd->fd_ncdir, &nch); 1677 error = cache_fullpath(p, &nch, NULL, 1678 &fullpath, &freepath, 0); 1679 cache_drop(&nch); 1680 if (error) 1681 goto done; 1682 error = SYSCTL_OUT(req, fullpath, strlen(fullpath) + 1); 1683 kfree(freepath, M_TEMP); 1684 } 1685 1686 done: 1687 if (p) { 1688 lwkt_reltoken(&p->p_token); 1689 PRELE(p); 1690 } 1691 return (error); 1692 } 1693 1694 /* 1695 * This sysctl allows a process to retrieve the path of the executable for 1696 * itself or another process. 1697 */ 1698 static int 1699 sysctl_kern_proc_pathname(SYSCTL_HANDLER_ARGS) 1700 { 1701 pid_t *pidp = (pid_t *)arg1; 1702 unsigned int arglen = arg2; 1703 struct proc *p; 1704 struct vnode *vp; 1705 char *retbuf, *freebuf; 1706 int error = 0; 1707 1708 if (arglen != 1) 1709 return (EINVAL); 1710 if (*pidp == -1) { /* -1 means this process */ 1711 p = curproc; 1712 } else { 1713 p = pfind(*pidp); 1714 if (p == NULL) 1715 return (ESRCH); 1716 } 1717 1718 vp = p->p_textvp; 1719 if (vp == NULL) 1720 goto done; 1721 1722 vref(vp); 1723 error = vn_fullpath(p, vp, &retbuf, &freebuf, 0); 1724 vrele(vp); 1725 if (error) 1726 goto done; 1727 error = SYSCTL_OUT(req, retbuf, strlen(retbuf) + 1); 1728 kfree(freebuf, M_TEMP); 1729 done: 1730 if(*pidp != -1) 1731 PRELE(p); 1732 1733 return (error); 1734 } 1735 1736 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD, 0, "Process table"); 1737 1738 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT, 1739 0, 0, sysctl_kern_proc, "S,proc", "Return entire process table"); 1740 1741 SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD, 1742 sysctl_kern_proc, "Process table"); 1743 1744 SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD, 1745 sysctl_kern_proc, "Process table"); 1746 1747 SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD, 1748 sysctl_kern_proc, "Process table"); 1749 1750 SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD, 1751 sysctl_kern_proc, "Process table"); 1752 1753 SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD, 1754 sysctl_kern_proc, "Process table"); 1755 1756 SYSCTL_NODE(_kern_proc, (KERN_PROC_ALL | KERN_PROC_FLAG_LWP), all_lwp, CTLFLAG_RD, 1757 sysctl_kern_proc, "Process table"); 1758 1759 SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_FLAG_LWP), pgrp_lwp, CTLFLAG_RD, 1760 sysctl_kern_proc, "Process table"); 1761 1762 SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_FLAG_LWP), tty_lwp, CTLFLAG_RD, 1763 sysctl_kern_proc, "Process table"); 1764 1765 SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_FLAG_LWP), uid_lwp, CTLFLAG_RD, 1766 sysctl_kern_proc, "Process table"); 1767 1768 SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_FLAG_LWP), ruid_lwp, CTLFLAG_RD, 1769 sysctl_kern_proc, "Process table"); 1770 1771 SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_FLAG_LWP), pid_lwp, CTLFLAG_RD, 1772 sysctl_kern_proc, "Process table"); 1773 1774 SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, CTLFLAG_RW | CTLFLAG_ANYBODY, 1775 sysctl_kern_proc_args, "Process argument list"); 1776 1777 SYSCTL_NODE(_kern_proc, KERN_PROC_CWD, cwd, CTLFLAG_RD | CTLFLAG_ANYBODY, 1778 sysctl_kern_proc_cwd, "Process argument list"); 1779 1780 static SYSCTL_NODE(_kern_proc, KERN_PROC_PATHNAME, pathname, CTLFLAG_RD, 1781 sysctl_kern_proc_pathname, "Process executable path"); 1782