1 /*- 2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice(s), this list of conditions and the following disclaimer as 10 * the first lines of this file unmodified other than the possible 11 * addition of one or more copyright notices. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice(s), this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26 * DAMAGE. 27 */ 28 29 #include "opt_witness.h" 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/lock.h> 38 #include <sys/mutex.h> 39 #include <sys/proc.h> 40 #include <sys/resourcevar.h> 41 #include <sys/smp.h> 42 #include <sys/sysctl.h> 43 #include <sys/sched.h> 44 #include <sys/sleepqueue.h> 45 #include <sys/selinfo.h> 46 #include <sys/turnstile.h> 47 #include <sys/ktr.h> 48 #include <sys/umtx.h> 49 #include <sys/cpuset.h> 50 51 #include <security/audit/audit.h> 52 53 #include <vm/vm.h> 54 #include <vm/vm_extern.h> 55 #include <vm/uma.h> 56 #include <sys/eventhandler.h> 57 58 /* 59 * thread related storage. 60 */ 61 static uma_zone_t thread_zone; 62 63 SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation"); 64 65 int max_threads_per_proc = 1500; 66 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW, 67 &max_threads_per_proc, 0, "Limit on threads per proc"); 68 69 int max_threads_hits; 70 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD, 71 &max_threads_hits, 0, ""); 72 73 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 74 static struct mtx zombie_lock; 75 MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN); 76 77 static void thread_zombie(struct thread *); 78 79 struct mtx tid_lock; 80 static struct unrhdr *tid_unrhdr; 81 82 /* 83 * Prepare a thread for use. 84 */ 85 static int 86 thread_ctor(void *mem, int size, void *arg, int flags) 87 { 88 struct thread *td; 89 90 td = (struct thread *)mem; 91 td->td_state = TDS_INACTIVE; 92 td->td_oncpu = NOCPU; 93 94 td->td_tid = alloc_unr(tid_unrhdr); 95 td->td_syscalls = 0; 96 97 /* 98 * Note that td_critnest begins life as 1 because the thread is not 99 * running and is thereby implicitly waiting to be on the receiving 100 * end of a context switch. 101 */ 102 td->td_critnest = 1; 103 EVENTHANDLER_INVOKE(thread_ctor, td); 104 #ifdef AUDIT 105 audit_thread_alloc(td); 106 #endif 107 umtx_thread_alloc(td); 108 return (0); 109 } 110 111 /* 112 * Reclaim a thread after use. 113 */ 114 static void 115 thread_dtor(void *mem, int size, void *arg) 116 { 117 struct thread *td; 118 119 td = (struct thread *)mem; 120 121 #ifdef INVARIANTS 122 /* Verify that this thread is in a safe state to free. */ 123 switch (td->td_state) { 124 case TDS_INHIBITED: 125 case TDS_RUNNING: 126 case TDS_CAN_RUN: 127 case TDS_RUNQ: 128 /* 129 * We must never unlink a thread that is in one of 130 * these states, because it is currently active. 131 */ 132 panic("bad state for thread unlinking"); 133 /* NOTREACHED */ 134 case TDS_INACTIVE: 135 break; 136 default: 137 panic("bad thread state"); 138 /* NOTREACHED */ 139 } 140 #endif 141 #ifdef AUDIT 142 audit_thread_free(td); 143 #endif 144 /* Free all OSD associated to this thread. */ 145 osd_thread_exit(td); 146 147 EVENTHANDLER_INVOKE(thread_dtor, td); 148 free_unr(tid_unrhdr, td->td_tid); 149 } 150 151 /* 152 * Initialize type-stable parts of a thread (when newly created). 153 */ 154 static int 155 thread_init(void *mem, int size, int flags) 156 { 157 struct thread *td; 158 159 td = (struct thread *)mem; 160 161 td->td_sleepqueue = sleepq_alloc(); 162 td->td_turnstile = turnstile_alloc(); 163 EVENTHANDLER_INVOKE(thread_init, td); 164 td->td_sched = (struct td_sched *)&td[1]; 165 umtx_thread_init(td); 166 td->td_kstack = 0; 167 return (0); 168 } 169 170 /* 171 * Tear down type-stable parts of a thread (just before being discarded). 172 */ 173 static void 174 thread_fini(void *mem, int size) 175 { 176 struct thread *td; 177 178 td = (struct thread *)mem; 179 EVENTHANDLER_INVOKE(thread_fini, td); 180 turnstile_free(td->td_turnstile); 181 sleepq_free(td->td_sleepqueue); 182 umtx_thread_fini(td); 183 seltdfini(td); 184 } 185 186 /* 187 * For a newly created process, 188 * link up all the structures and its initial threads etc. 189 * called from: 190 * {arch}/{arch}/machdep.c ia64_init(), init386() etc. 191 * proc_dtor() (should go away) 192 * proc_init() 193 */ 194 void 195 proc_linkup0(struct proc *p, struct thread *td) 196 { 197 TAILQ_INIT(&p->p_threads); /* all threads in proc */ 198 proc_linkup(p, td); 199 } 200 201 void 202 proc_linkup(struct proc *p, struct thread *td) 203 { 204 205 sigqueue_init(&p->p_sigqueue, p); 206 p->p_ksi = ksiginfo_alloc(1); 207 if (p->p_ksi != NULL) { 208 /* XXX p_ksi may be null if ksiginfo zone is not ready */ 209 p->p_ksi->ksi_flags = KSI_EXT | KSI_INS; 210 } 211 LIST_INIT(&p->p_mqnotifier); 212 p->p_numthreads = 0; 213 thread_link(td, p); 214 } 215 216 /* 217 * Initialize global thread allocation resources. 218 */ 219 void 220 threadinit(void) 221 { 222 223 mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF); 224 /* leave one number for thread0 */ 225 tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock); 226 227 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 228 thread_ctor, thread_dtor, thread_init, thread_fini, 229 16 - 1, 0); 230 } 231 232 /* 233 * Place an unused thread on the zombie list. 234 * Use the slpq as that must be unused by now. 235 */ 236 void 237 thread_zombie(struct thread *td) 238 { 239 mtx_lock_spin(&zombie_lock); 240 TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq); 241 mtx_unlock_spin(&zombie_lock); 242 } 243 244 /* 245 * Release a thread that has exited after cpu_throw(). 246 */ 247 void 248 thread_stash(struct thread *td) 249 { 250 atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1); 251 thread_zombie(td); 252 } 253 254 /* 255 * Reap zombie resources. 256 */ 257 void 258 thread_reap(void) 259 { 260 struct thread *td_first, *td_next; 261 262 /* 263 * Don't even bother to lock if none at this instant, 264 * we really don't care about the next instant.. 265 */ 266 if (!TAILQ_EMPTY(&zombie_threads)) { 267 mtx_lock_spin(&zombie_lock); 268 td_first = TAILQ_FIRST(&zombie_threads); 269 if (td_first) 270 TAILQ_INIT(&zombie_threads); 271 mtx_unlock_spin(&zombie_lock); 272 while (td_first) { 273 td_next = TAILQ_NEXT(td_first, td_slpq); 274 if (td_first->td_ucred) 275 crfree(td_first->td_ucred); 276 thread_free(td_first); 277 td_first = td_next; 278 } 279 } 280 } 281 282 /* 283 * Allocate a thread. 284 */ 285 struct thread * 286 thread_alloc(void) 287 { 288 struct thread *td; 289 290 thread_reap(); /* check if any zombies to get */ 291 292 td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK); 293 KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack")); 294 if (!vm_thread_new(td, 0)) { 295 uma_zfree(thread_zone, td); 296 return (NULL); 297 } 298 cpu_thread_alloc(td); 299 return (td); 300 } 301 302 303 /* 304 * Deallocate a thread. 305 */ 306 void 307 thread_free(struct thread *td) 308 { 309 if (td->td_cpuset) 310 cpuset_rel(td->td_cpuset); 311 td->td_cpuset = NULL; 312 cpu_thread_free(td); 313 if (td->td_altkstack != 0) 314 vm_thread_dispose_altkstack(td); 315 if (td->td_kstack != 0) 316 vm_thread_dispose(td); 317 uma_zfree(thread_zone, td); 318 } 319 320 /* 321 * Discard the current thread and exit from its context. 322 * Always called with scheduler locked. 323 * 324 * Because we can't free a thread while we're operating under its context, 325 * push the current thread into our CPU's deadthread holder. This means 326 * we needn't worry about someone else grabbing our context before we 327 * do a cpu_throw(). 328 */ 329 void 330 thread_exit(void) 331 { 332 uint64_t new_switchtime; 333 struct thread *td; 334 struct thread *td2; 335 struct proc *p; 336 int wakeup_swapper; 337 338 td = curthread; 339 p = td->td_proc; 340 341 PROC_SLOCK_ASSERT(p, MA_OWNED); 342 mtx_assert(&Giant, MA_NOTOWNED); 343 344 PROC_LOCK_ASSERT(p, MA_OWNED); 345 KASSERT(p != NULL, ("thread exiting without a process")); 346 CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td, 347 (long)p->p_pid, td->td_name); 348 KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending")); 349 350 #ifdef AUDIT 351 AUDIT_SYSCALL_EXIT(0, td); 352 #endif 353 umtx_thread_exit(td); 354 /* 355 * drop FPU & debug register state storage, or any other 356 * architecture specific resources that 357 * would not be on a new untouched process. 358 */ 359 cpu_thread_exit(td); /* XXXSMP */ 360 361 /* Do the same timestamp bookkeeping that mi_switch() would do. */ 362 new_switchtime = cpu_ticks(); 363 p->p_rux.rux_runtime += (new_switchtime - PCPU_GET(switchtime)); 364 PCPU_SET(switchtime, new_switchtime); 365 PCPU_SET(switchticks, ticks); 366 PCPU_INC(cnt.v_swtch); 367 /* Save our resource usage in our process. */ 368 td->td_ru.ru_nvcsw++; 369 rucollect(&p->p_ru, &td->td_ru); 370 /* 371 * The last thread is left attached to the process 372 * So that the whole bundle gets recycled. Skip 373 * all this stuff if we never had threads. 374 * EXIT clears all sign of other threads when 375 * it goes to single threading, so the last thread always 376 * takes the short path. 377 */ 378 if (p->p_flag & P_HADTHREADS) { 379 if (p->p_numthreads > 1) { 380 thread_unlink(td); 381 td2 = FIRST_THREAD_IN_PROC(p); 382 sched_exit_thread(td2, td); 383 384 /* 385 * The test below is NOT true if we are the 386 * sole exiting thread. P_STOPPED_SNGL is unset 387 * in exit1() after it is the only survivor. 388 */ 389 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 390 if (p->p_numthreads == p->p_suspcount) { 391 thread_lock(p->p_singlethread); 392 wakeup_swapper = thread_unsuspend_one( 393 p->p_singlethread); 394 thread_unlock(p->p_singlethread); 395 if (wakeup_swapper) 396 kick_proc0(); 397 } 398 } 399 400 atomic_add_int(&td->td_proc->p_exitthreads, 1); 401 PCPU_SET(deadthread, td); 402 } else { 403 /* 404 * The last thread is exiting.. but not through exit() 405 */ 406 panic ("thread_exit: Last thread exiting on its own"); 407 } 408 } 409 PROC_UNLOCK(p); 410 thread_lock(td); 411 /* Save our tick information with both the thread and proc locked */ 412 ruxagg(&p->p_rux, td); 413 PROC_SUNLOCK(p); 414 td->td_state = TDS_INACTIVE; 415 #ifdef WITNESS 416 witness_thread_exit(td); 417 #endif 418 CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td); 419 sched_throw(td); 420 panic("I'm a teapot!"); 421 /* NOTREACHED */ 422 } 423 424 /* 425 * Do any thread specific cleanups that may be needed in wait() 426 * called with Giant, proc and schedlock not held. 427 */ 428 void 429 thread_wait(struct proc *p) 430 { 431 struct thread *td; 432 433 mtx_assert(&Giant, MA_NOTOWNED); 434 KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()")); 435 td = FIRST_THREAD_IN_PROC(p); 436 /* Lock the last thread so we spin until it exits cpu_throw(). */ 437 thread_lock(td); 438 thread_unlock(td); 439 /* Wait for any remaining threads to exit cpu_throw(). */ 440 while (p->p_exitthreads) 441 sched_relinquish(curthread); 442 cpuset_rel(td->td_cpuset); 443 td->td_cpuset = NULL; 444 cpu_thread_clean(td); 445 crfree(td->td_ucred); 446 thread_reap(); /* check for zombie threads etc. */ 447 } 448 449 /* 450 * Link a thread to a process. 451 * set up anything that needs to be initialized for it to 452 * be used by the process. 453 */ 454 void 455 thread_link(struct thread *td, struct proc *p) 456 { 457 458 /* 459 * XXX This can't be enabled because it's called for proc0 before 460 * its lock has been created. 461 * PROC_LOCK_ASSERT(p, MA_OWNED); 462 */ 463 td->td_state = TDS_INACTIVE; 464 td->td_proc = p; 465 td->td_flags = TDF_INMEM; 466 467 LIST_INIT(&td->td_contested); 468 LIST_INIT(&td->td_lprof[0]); 469 LIST_INIT(&td->td_lprof[1]); 470 sigqueue_init(&td->td_sigqueue, p); 471 callout_init(&td->td_slpcallout, CALLOUT_MPSAFE); 472 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist); 473 p->p_numthreads++; 474 } 475 476 /* 477 * Convert a process with one thread to an unthreaded process. 478 */ 479 void 480 thread_unthread(struct thread *td) 481 { 482 struct proc *p = td->td_proc; 483 484 KASSERT((p->p_numthreads == 1), ("Unthreading with >1 threads")); 485 p->p_flag &= ~P_HADTHREADS; 486 } 487 488 /* 489 * Called from: 490 * thread_exit() 491 */ 492 void 493 thread_unlink(struct thread *td) 494 { 495 struct proc *p = td->td_proc; 496 497 PROC_LOCK_ASSERT(p, MA_OWNED); 498 TAILQ_REMOVE(&p->p_threads, td, td_plist); 499 p->p_numthreads--; 500 /* could clear a few other things here */ 501 /* Must NOT clear links to proc! */ 502 } 503 504 /* 505 * Enforce single-threading. 506 * 507 * Returns 1 if the caller must abort (another thread is waiting to 508 * exit the process or similar). Process is locked! 509 * Returns 0 when you are successfully the only thread running. 510 * A process has successfully single threaded in the suspend mode when 511 * There are no threads in user mode. Threads in the kernel must be 512 * allowed to continue until they get to the user boundary. They may even 513 * copy out their return values and data before suspending. They may however be 514 * accelerated in reaching the user boundary as we will wake up 515 * any sleeping threads that are interruptable. (PCATCH). 516 */ 517 int 518 thread_single(int mode) 519 { 520 struct thread *td; 521 struct thread *td2; 522 struct proc *p; 523 int remaining, wakeup_swapper; 524 525 td = curthread; 526 p = td->td_proc; 527 mtx_assert(&Giant, MA_NOTOWNED); 528 PROC_LOCK_ASSERT(p, MA_OWNED); 529 KASSERT((td != NULL), ("curthread is NULL")); 530 531 if ((p->p_flag & P_HADTHREADS) == 0) 532 return (0); 533 534 /* Is someone already single threading? */ 535 if (p->p_singlethread != NULL && p->p_singlethread != td) 536 return (1); 537 538 if (mode == SINGLE_EXIT) { 539 p->p_flag |= P_SINGLE_EXIT; 540 p->p_flag &= ~P_SINGLE_BOUNDARY; 541 } else { 542 p->p_flag &= ~P_SINGLE_EXIT; 543 if (mode == SINGLE_BOUNDARY) 544 p->p_flag |= P_SINGLE_BOUNDARY; 545 else 546 p->p_flag &= ~P_SINGLE_BOUNDARY; 547 } 548 p->p_flag |= P_STOPPED_SINGLE; 549 PROC_SLOCK(p); 550 p->p_singlethread = td; 551 if (mode == SINGLE_EXIT) 552 remaining = p->p_numthreads; 553 else if (mode == SINGLE_BOUNDARY) 554 remaining = p->p_numthreads - p->p_boundary_count; 555 else 556 remaining = p->p_numthreads - p->p_suspcount; 557 while (remaining != 1) { 558 if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE) 559 goto stopme; 560 wakeup_swapper = 0; 561 FOREACH_THREAD_IN_PROC(p, td2) { 562 if (td2 == td) 563 continue; 564 thread_lock(td2); 565 td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK; 566 if (TD_IS_INHIBITED(td2)) { 567 switch (mode) { 568 case SINGLE_EXIT: 569 if (TD_IS_SUSPENDED(td2)) 570 wakeup_swapper |= 571 thread_unsuspend_one(td2); 572 if (TD_ON_SLEEPQ(td2) && 573 (td2->td_flags & TDF_SINTR)) 574 wakeup_swapper |= 575 sleepq_abort(td2, EINTR); 576 break; 577 case SINGLE_BOUNDARY: 578 if (TD_IS_SUSPENDED(td2) && 579 !(td2->td_flags & TDF_BOUNDARY)) 580 wakeup_swapper |= 581 thread_unsuspend_one(td2); 582 if (TD_ON_SLEEPQ(td2) && 583 (td2->td_flags & TDF_SINTR)) 584 wakeup_swapper |= 585 sleepq_abort(td2, ERESTART); 586 break; 587 default: 588 if (TD_IS_SUSPENDED(td2)) { 589 thread_unlock(td2); 590 continue; 591 } 592 /* 593 * maybe other inhibited states too? 594 */ 595 if ((td2->td_flags & TDF_SINTR) && 596 (td2->td_inhibitors & 597 (TDI_SLEEPING | TDI_SWAPPED))) 598 thread_suspend_one(td2); 599 break; 600 } 601 } 602 #ifdef SMP 603 else if (TD_IS_RUNNING(td2) && td != td2) { 604 forward_signal(td2); 605 } 606 #endif 607 thread_unlock(td2); 608 } 609 if (wakeup_swapper) 610 kick_proc0(); 611 if (mode == SINGLE_EXIT) 612 remaining = p->p_numthreads; 613 else if (mode == SINGLE_BOUNDARY) 614 remaining = p->p_numthreads - p->p_boundary_count; 615 else 616 remaining = p->p_numthreads - p->p_suspcount; 617 618 /* 619 * Maybe we suspended some threads.. was it enough? 620 */ 621 if (remaining == 1) 622 break; 623 624 stopme: 625 /* 626 * Wake us up when everyone else has suspended. 627 * In the mean time we suspend as well. 628 */ 629 thread_suspend_switch(td); 630 if (mode == SINGLE_EXIT) 631 remaining = p->p_numthreads; 632 else if (mode == SINGLE_BOUNDARY) 633 remaining = p->p_numthreads - p->p_boundary_count; 634 else 635 remaining = p->p_numthreads - p->p_suspcount; 636 } 637 if (mode == SINGLE_EXIT) { 638 /* 639 * We have gotten rid of all the other threads and we 640 * are about to either exit or exec. In either case, 641 * we try our utmost to revert to being a non-threaded 642 * process. 643 */ 644 p->p_singlethread = NULL; 645 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT); 646 thread_unthread(td); 647 } 648 PROC_SUNLOCK(p); 649 return (0); 650 } 651 652 /* 653 * Called in from locations that can safely check to see 654 * whether we have to suspend or at least throttle for a 655 * single-thread event (e.g. fork). 656 * 657 * Such locations include userret(). 658 * If the "return_instead" argument is non zero, the thread must be able to 659 * accept 0 (caller may continue), or 1 (caller must abort) as a result. 660 * 661 * The 'return_instead' argument tells the function if it may do a 662 * thread_exit() or suspend, or whether the caller must abort and back 663 * out instead. 664 * 665 * If the thread that set the single_threading request has set the 666 * P_SINGLE_EXIT bit in the process flags then this call will never return 667 * if 'return_instead' is false, but will exit. 668 * 669 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 670 *---------------+--------------------+--------------------- 671 * 0 | returns 0 | returns 0 or 1 672 * | when ST ends | immediatly 673 *---------------+--------------------+--------------------- 674 * 1 | thread exits | returns 1 675 * | | immediatly 676 * 0 = thread_exit() or suspension ok, 677 * other = return error instead of stopping the thread. 678 * 679 * While a full suspension is under effect, even a single threading 680 * thread would be suspended if it made this call (but it shouldn't). 681 * This call should only be made from places where 682 * thread_exit() would be safe as that may be the outcome unless 683 * return_instead is set. 684 */ 685 int 686 thread_suspend_check(int return_instead) 687 { 688 struct thread *td; 689 struct proc *p; 690 int wakeup_swapper; 691 692 td = curthread; 693 p = td->td_proc; 694 mtx_assert(&Giant, MA_NOTOWNED); 695 PROC_LOCK_ASSERT(p, MA_OWNED); 696 while (P_SHOULDSTOP(p) || 697 ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_SUSPEND))) { 698 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 699 KASSERT(p->p_singlethread != NULL, 700 ("singlethread not set")); 701 /* 702 * The only suspension in action is a 703 * single-threading. Single threader need not stop. 704 * XXX Should be safe to access unlocked 705 * as it can only be set to be true by us. 706 */ 707 if (p->p_singlethread == td) 708 return (0); /* Exempt from stopping. */ 709 } 710 if ((p->p_flag & P_SINGLE_EXIT) && return_instead) 711 return (EINTR); 712 713 /* Should we goto user boundary if we didn't come from there? */ 714 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 715 (p->p_flag & P_SINGLE_BOUNDARY) && return_instead) 716 return (ERESTART); 717 718 /* If thread will exit, flush its pending signals */ 719 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) 720 sigqueue_flush(&td->td_sigqueue); 721 722 PROC_SLOCK(p); 723 thread_stopped(p); 724 /* 725 * If the process is waiting for us to exit, 726 * this thread should just suicide. 727 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 728 */ 729 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) 730 thread_exit(); 731 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 732 if (p->p_numthreads == p->p_suspcount + 1) { 733 thread_lock(p->p_singlethread); 734 wakeup_swapper = 735 thread_unsuspend_one(p->p_singlethread); 736 thread_unlock(p->p_singlethread); 737 if (wakeup_swapper) 738 kick_proc0(); 739 } 740 } 741 PROC_UNLOCK(p); 742 thread_lock(td); 743 /* 744 * When a thread suspends, it just 745 * gets taken off all queues. 746 */ 747 thread_suspend_one(td); 748 if (return_instead == 0) { 749 p->p_boundary_count++; 750 td->td_flags |= TDF_BOUNDARY; 751 } 752 PROC_SUNLOCK(p); 753 mi_switch(SW_INVOL | SWT_SUSPEND, NULL); 754 if (return_instead == 0) 755 td->td_flags &= ~TDF_BOUNDARY; 756 thread_unlock(td); 757 PROC_LOCK(p); 758 if (return_instead == 0) 759 p->p_boundary_count--; 760 } 761 return (0); 762 } 763 764 void 765 thread_suspend_switch(struct thread *td) 766 { 767 struct proc *p; 768 769 p = td->td_proc; 770 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 771 PROC_LOCK_ASSERT(p, MA_OWNED); 772 PROC_SLOCK_ASSERT(p, MA_OWNED); 773 /* 774 * We implement thread_suspend_one in stages here to avoid 775 * dropping the proc lock while the thread lock is owned. 776 */ 777 thread_stopped(p); 778 p->p_suspcount++; 779 PROC_UNLOCK(p); 780 thread_lock(td); 781 td->td_flags &= ~TDF_NEEDSUSPCHK; 782 TD_SET_SUSPENDED(td); 783 sched_sleep(td, 0); 784 PROC_SUNLOCK(p); 785 DROP_GIANT(); 786 mi_switch(SW_VOL | SWT_SUSPEND, NULL); 787 thread_unlock(td); 788 PICKUP_GIANT(); 789 PROC_LOCK(p); 790 PROC_SLOCK(p); 791 } 792 793 void 794 thread_suspend_one(struct thread *td) 795 { 796 struct proc *p = td->td_proc; 797 798 PROC_SLOCK_ASSERT(p, MA_OWNED); 799 THREAD_LOCK_ASSERT(td, MA_OWNED); 800 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 801 p->p_suspcount++; 802 td->td_flags &= ~TDF_NEEDSUSPCHK; 803 TD_SET_SUSPENDED(td); 804 sched_sleep(td, 0); 805 } 806 807 int 808 thread_unsuspend_one(struct thread *td) 809 { 810 struct proc *p = td->td_proc; 811 812 PROC_SLOCK_ASSERT(p, MA_OWNED); 813 THREAD_LOCK_ASSERT(td, MA_OWNED); 814 KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); 815 TD_CLR_SUSPENDED(td); 816 p->p_suspcount--; 817 return (setrunnable(td)); 818 } 819 820 /* 821 * Allow all threads blocked by single threading to continue running. 822 */ 823 void 824 thread_unsuspend(struct proc *p) 825 { 826 struct thread *td; 827 int wakeup_swapper; 828 829 PROC_LOCK_ASSERT(p, MA_OWNED); 830 PROC_SLOCK_ASSERT(p, MA_OWNED); 831 wakeup_swapper = 0; 832 if (!P_SHOULDSTOP(p)) { 833 FOREACH_THREAD_IN_PROC(p, td) { 834 thread_lock(td); 835 if (TD_IS_SUSPENDED(td)) { 836 wakeup_swapper |= thread_unsuspend_one(td); 837 } 838 thread_unlock(td); 839 } 840 } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) && 841 (p->p_numthreads == p->p_suspcount)) { 842 /* 843 * Stopping everything also did the job for the single 844 * threading request. Now we've downgraded to single-threaded, 845 * let it continue. 846 */ 847 thread_lock(p->p_singlethread); 848 wakeup_swapper = thread_unsuspend_one(p->p_singlethread); 849 thread_unlock(p->p_singlethread); 850 } 851 if (wakeup_swapper) 852 kick_proc0(); 853 } 854 855 /* 856 * End the single threading mode.. 857 */ 858 void 859 thread_single_end(void) 860 { 861 struct thread *td; 862 struct proc *p; 863 int wakeup_swapper; 864 865 td = curthread; 866 p = td->td_proc; 867 PROC_LOCK_ASSERT(p, MA_OWNED); 868 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY); 869 PROC_SLOCK(p); 870 p->p_singlethread = NULL; 871 wakeup_swapper = 0; 872 /* 873 * If there are other threads they may now run, 874 * unless of course there is a blanket 'stop order' 875 * on the process. The single threader must be allowed 876 * to continue however as this is a bad place to stop. 877 */ 878 if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) { 879 FOREACH_THREAD_IN_PROC(p, td) { 880 thread_lock(td); 881 if (TD_IS_SUSPENDED(td)) { 882 wakeup_swapper |= thread_unsuspend_one(td); 883 } 884 thread_unlock(td); 885 } 886 } 887 PROC_SUNLOCK(p); 888 if (wakeup_swapper) 889 kick_proc0(); 890 } 891 892 struct thread * 893 thread_find(struct proc *p, lwpid_t tid) 894 { 895 struct thread *td; 896 897 PROC_LOCK_ASSERT(p, MA_OWNED); 898 FOREACH_THREAD_IN_PROC(p, td) { 899 if (td->td_tid == tid) 900 break; 901 } 902 return (td); 903 } 904