1 /* 2 * Copyright (c) 2003-2011 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 /* 36 * Each cpu in a system has its own self-contained light weight kernel 37 * thread scheduler, which means that generally speaking we only need 38 * to use a critical section to avoid problems. Foreign thread 39 * scheduling is queued via (async) IPIs. 40 */ 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/proc.h> 46 #include <sys/rtprio.h> 47 #include <sys/kinfo.h> 48 #include <sys/malloc.h> 49 #include <sys/queue.h> 50 #include <sys/sysctl.h> 51 #include <sys/kthread.h> 52 #include <machine/cpu.h> 53 #include <sys/lock.h> 54 #include <sys/spinlock.h> 55 #include <sys/ktr.h> 56 #include <sys/indefinite.h> 57 58 #include <sys/thread2.h> 59 #include <sys/spinlock2.h> 60 #include <sys/indefinite2.h> 61 62 #include <sys/dsched.h> 63 64 #include <vm/vm.h> 65 #include <vm/vm_param.h> 66 #include <vm/vm_kern.h> 67 #include <vm/vm_object.h> 68 #include <vm/vm_page.h> 69 #include <vm/vm_map.h> 70 #include <vm/vm_pager.h> 71 #include <vm/vm_extern.h> 72 73 #include <machine/stdarg.h> 74 #include <machine/smp.h> 75 #include <machine/clock.h> 76 77 #define LOOPMASK 78 79 #if !defined(KTR_CTXSW) 80 #define KTR_CTXSW KTR_ALL 81 #endif 82 KTR_INFO_MASTER(ctxsw); 83 KTR_INFO(KTR_CTXSW, ctxsw, sw, 0, "#cpu[%d].td = %p", int cpu, struct thread *td); 84 KTR_INFO(KTR_CTXSW, ctxsw, pre, 1, "#cpu[%d].td = %p", int cpu, struct thread *td); 85 KTR_INFO(KTR_CTXSW, ctxsw, newtd, 2, "#threads[%p].name = %s", struct thread *td, char *comm); 86 KTR_INFO(KTR_CTXSW, ctxsw, deadtd, 3, "#threads[%p].name = <dead>", struct thread *td); 87 88 static MALLOC_DEFINE(M_THREAD, "thread", "lwkt threads"); 89 90 #ifdef INVARIANTS 91 static int panic_on_cscount = 0; 92 #endif 93 #ifdef DEBUG_LWKT_THREAD 94 static int64_t switch_count = 0; 95 static int64_t preempt_hit = 0; 96 static int64_t preempt_miss = 0; 97 static int64_t preempt_weird = 0; 98 #endif 99 static int lwkt_use_spin_port; 100 __read_mostly static struct objcache *thread_cache; 101 int cpu_mwait_spin = 0; 102 103 static void lwkt_schedule_remote(void *arg, int arg2, struct intrframe *frame); 104 static void lwkt_setcpu_remote(void *arg); 105 106 /* 107 * We can make all thread ports use the spin backend instead of the thread 108 * backend. This should only be set to debug the spin backend. 109 */ 110 TUNABLE_INT("lwkt.use_spin_port", &lwkt_use_spin_port); 111 112 #ifdef INVARIANTS 113 SYSCTL_INT(_lwkt, OID_AUTO, panic_on_cscount, CTLFLAG_RW, &panic_on_cscount, 0, 114 "Panic if attempting to switch lwkt's while mastering cpusync"); 115 #endif 116 #ifdef DEBUG_LWKT_THREAD 117 SYSCTL_QUAD(_lwkt, OID_AUTO, switch_count, CTLFLAG_RW, &switch_count, 0, 118 "Number of switched threads"); 119 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_hit, CTLFLAG_RW, &preempt_hit, 0, 120 "Successful preemption events"); 121 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_miss, CTLFLAG_RW, &preempt_miss, 0, 122 "Failed preemption events"); 123 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_weird, CTLFLAG_RW, &preempt_weird, 0, 124 "Number of preempted threads."); 125 #endif 126 extern int lwkt_sched_debug; 127 int lwkt_sched_debug = 0; 128 SYSCTL_INT(_lwkt, OID_AUTO, sched_debug, CTLFLAG_RW, 129 &lwkt_sched_debug, 0, "Scheduler debug"); 130 __read_mostly static u_int lwkt_spin_loops = 10; 131 SYSCTL_UINT(_lwkt, OID_AUTO, spin_loops, CTLFLAG_RW, 132 &lwkt_spin_loops, 0, "Scheduler spin loops until sorted decon"); 133 __read_mostly static int preempt_enable = 1; 134 SYSCTL_INT(_lwkt, OID_AUTO, preempt_enable, CTLFLAG_RW, 135 &preempt_enable, 0, "Enable preemption"); 136 static int lwkt_cache_threads = 0; 137 SYSCTL_INT(_lwkt, OID_AUTO, cache_threads, CTLFLAG_RD, 138 &lwkt_cache_threads, 0, "thread+kstack cache"); 139 140 /* 141 * These helper procedures handle the runq, they can only be called from 142 * within a critical section. 143 * 144 * WARNING! Prior to SMP being brought up it is possible to enqueue and 145 * dequeue threads belonging to other cpus, so be sure to use td->td_gd 146 * instead of 'mycpu' when referencing the globaldata structure. Once 147 * SMP live enqueuing and dequeueing only occurs on the current cpu. 148 */ 149 static __inline 150 void 151 _lwkt_dequeue(thread_t td) 152 { 153 if (td->td_flags & TDF_RUNQ) { 154 struct globaldata *gd = td->td_gd; 155 156 td->td_flags &= ~TDF_RUNQ; 157 TAILQ_REMOVE(&gd->gd_tdrunq, td, td_threadq); 158 --gd->gd_tdrunqcount; 159 if (TAILQ_FIRST(&gd->gd_tdrunq) == NULL) 160 atomic_clear_int(&gd->gd_reqflags, RQF_RUNNING); 161 } 162 } 163 164 /* 165 * Priority enqueue. 166 * 167 * There are a limited number of lwkt threads runnable since user 168 * processes only schedule one at a time per cpu. However, there can 169 * be many user processes in kernel mode exiting from a tsleep() which 170 * become runnable. 171 * 172 * We scan the queue in both directions to help deal with degenerate 173 * situations when hundreds or thousands (or more) threads are runnable. 174 * 175 * NOTE: lwkt_schedulerclock() will force a round-robin based on td_pri and 176 * will ignore user priority. This is to ensure that user threads in 177 * kernel mode get cpu at some point regardless of what the user 178 * scheduler thinks. 179 */ 180 static __inline 181 void 182 _lwkt_enqueue(thread_t td) 183 { 184 thread_t xtd; /* forward scan */ 185 thread_t rtd; /* reverse scan */ 186 187 if ((td->td_flags & (TDF_RUNQ|TDF_MIGRATING|TDF_BLOCKQ)) == 0) { 188 struct globaldata *gd = td->td_gd; 189 190 td->td_flags |= TDF_RUNQ; 191 xtd = TAILQ_FIRST(&gd->gd_tdrunq); 192 if (xtd == NULL) { 193 TAILQ_INSERT_TAIL(&gd->gd_tdrunq, td, td_threadq); 194 atomic_set_int(&gd->gd_reqflags, RQF_RUNNING); 195 } else { 196 /* 197 * NOTE: td_upri - higher numbers more desireable, same sense 198 * as td_pri (typically reversed from lwp_upri). 199 * 200 * In the equal priority case we want the best selection 201 * at the beginning so the less desireable selections know 202 * that they have to setrunqueue/go-to-another-cpu, even 203 * though it means switching back to the 'best' selection. 204 * This also avoids degenerate situations when many threads 205 * are runnable or waking up at the same time. 206 * 207 * If upri matches exactly place at end/round-robin. 208 */ 209 rtd = TAILQ_LAST(&gd->gd_tdrunq, lwkt_queue); 210 211 while (xtd && 212 (xtd->td_pri > td->td_pri || 213 (xtd->td_pri == td->td_pri && 214 xtd->td_upri >= td->td_upri))) { 215 xtd = TAILQ_NEXT(xtd, td_threadq); 216 217 /* 218 * Doing a reverse scan at the same time is an optimization 219 * for the insert-closer-to-tail case that avoids having to 220 * scan the entire list. This situation can occur when 221 * thousands of threads are woken up at the same time. 222 */ 223 if (rtd->td_pri > td->td_pri || 224 (rtd->td_pri == td->td_pri && 225 rtd->td_upri >= td->td_upri)) { 226 TAILQ_INSERT_AFTER(&gd->gd_tdrunq, rtd, td, td_threadq); 227 goto skip; 228 } 229 rtd = TAILQ_PREV(rtd, lwkt_queue, td_threadq); 230 } 231 if (xtd) 232 TAILQ_INSERT_BEFORE(xtd, td, td_threadq); 233 else 234 TAILQ_INSERT_TAIL(&gd->gd_tdrunq, td, td_threadq); 235 } 236 skip: 237 ++gd->gd_tdrunqcount; 238 239 /* 240 * Request a LWKT reschedule if we are now at the head of the queue. 241 */ 242 if (TAILQ_FIRST(&gd->gd_tdrunq) == td) 243 need_lwkt_resched(); 244 } 245 } 246 247 static boolean_t 248 _lwkt_thread_ctor(void *obj, void *privdata, int ocflags) 249 { 250 struct thread *td = (struct thread *)obj; 251 252 td->td_kstack = NULL; 253 td->td_kstack_size = 0; 254 td->td_flags = TDF_ALLOCATED_THREAD; 255 td->td_mpflags = 0; 256 return (1); 257 } 258 259 static void 260 _lwkt_thread_dtor(void *obj, void *privdata) 261 { 262 struct thread *td = (struct thread *)obj; 263 264 KASSERT(td->td_flags & TDF_ALLOCATED_THREAD, 265 ("_lwkt_thread_dtor: not allocated from objcache")); 266 KASSERT((td->td_flags & TDF_ALLOCATED_STACK) && td->td_kstack && 267 td->td_kstack_size > 0, 268 ("_lwkt_thread_dtor: corrupted stack")); 269 kmem_free(kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size); 270 td->td_kstack = NULL; 271 td->td_flags = 0; 272 } 273 274 /* 275 * Initialize the lwkt s/system. 276 * 277 * Nominally cache up to 32 thread + kstack structures. Cache more on 278 * systems with a lot of cpu cores. 279 */ 280 static void 281 lwkt_init(void) 282 { 283 TUNABLE_INT("lwkt.cache_threads", &lwkt_cache_threads); 284 if (lwkt_cache_threads == 0) { 285 lwkt_cache_threads = ncpus * 4; 286 if (lwkt_cache_threads < 32) 287 lwkt_cache_threads = 32; 288 } 289 thread_cache = objcache_create_mbacked( 290 M_THREAD, sizeof(struct thread), 291 0, lwkt_cache_threads, 292 _lwkt_thread_ctor, _lwkt_thread_dtor, NULL); 293 } 294 SYSINIT(lwkt_init, SI_BOOT2_LWKT_INIT, SI_ORDER_FIRST, lwkt_init, NULL); 295 296 /* 297 * Schedule a thread to run. As the current thread we can always safely 298 * schedule ourselves, and a shortcut procedure is provided for that 299 * function. 300 * 301 * (non-blocking, self contained on a per cpu basis) 302 */ 303 void 304 lwkt_schedule_self(thread_t td) 305 { 306 KKASSERT((td->td_flags & TDF_MIGRATING) == 0); 307 crit_enter_quick(td); 308 KASSERT(td != &td->td_gd->gd_idlethread, 309 ("lwkt_schedule_self(): scheduling gd_idlethread is illegal!")); 310 KKASSERT(td->td_lwp == NULL || 311 (td->td_lwp->lwp_mpflags & LWP_MP_ONRUNQ) == 0); 312 _lwkt_enqueue(td); 313 crit_exit_quick(td); 314 } 315 316 /* 317 * Deschedule a thread. 318 * 319 * (non-blocking, self contained on a per cpu basis) 320 */ 321 void 322 lwkt_deschedule_self(thread_t td) 323 { 324 crit_enter_quick(td); 325 _lwkt_dequeue(td); 326 crit_exit_quick(td); 327 } 328 329 /* 330 * LWKTs operate on a per-cpu basis 331 * 332 * WARNING! Called from early boot, 'mycpu' may not work yet. 333 */ 334 void 335 lwkt_gdinit(struct globaldata *gd) 336 { 337 TAILQ_INIT(&gd->gd_tdrunq); 338 TAILQ_INIT(&gd->gd_tdallq); 339 lockinit(&gd->gd_sysctllock, "sysctl", 0, LK_CANRECURSE); 340 } 341 342 /* 343 * Create a new thread. The thread must be associated with a process context 344 * or LWKT start address before it can be scheduled. If the target cpu is 345 * -1 the thread will be created on the current cpu. 346 * 347 * If you intend to create a thread without a process context this function 348 * does everything except load the startup and switcher function. 349 */ 350 thread_t 351 lwkt_alloc_thread(struct thread *td, int stksize, int cpu, int flags) 352 { 353 static int cpu_rotator; 354 globaldata_t gd = mycpu; 355 void *stack; 356 357 /* 358 * If static thread storage is not supplied allocate a thread. Reuse 359 * a cached free thread if possible. gd_freetd is used to keep an exiting 360 * thread intact through the exit. 361 */ 362 if (td == NULL) { 363 crit_enter_gd(gd); 364 if ((td = gd->gd_freetd) != NULL) { 365 KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK| 366 TDF_RUNQ)) == 0); 367 gd->gd_freetd = NULL; 368 } else { 369 td = objcache_get(thread_cache, M_WAITOK); 370 KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK| 371 TDF_RUNQ)) == 0); 372 } 373 crit_exit_gd(gd); 374 KASSERT((td->td_flags & 375 (TDF_ALLOCATED_THREAD|TDF_RUNNING|TDF_PREEMPT_LOCK)) == 376 TDF_ALLOCATED_THREAD, 377 ("lwkt_alloc_thread: corrupted td flags 0x%X", td->td_flags)); 378 flags |= td->td_flags & (TDF_ALLOCATED_THREAD|TDF_ALLOCATED_STACK); 379 } 380 381 /* 382 * Try to reuse cached stack. 383 */ 384 if ((stack = td->td_kstack) != NULL && td->td_kstack_size != stksize) { 385 if (flags & TDF_ALLOCATED_STACK) { 386 kmem_free(kernel_map, (vm_offset_t)stack, td->td_kstack_size); 387 stack = NULL; 388 } 389 } 390 if (stack == NULL) { 391 if (cpu < 0) { 392 stack = (void *)kmem_alloc_stack(kernel_map, stksize, 0); 393 } else { 394 stack = (void *)kmem_alloc_stack(kernel_map, stksize, 395 KM_CPU(cpu)); 396 } 397 flags |= TDF_ALLOCATED_STACK; 398 } 399 if (cpu < 0) { 400 cpu = ++cpu_rotator; 401 cpu_ccfence(); 402 cpu = (uint32_t)cpu % (uint32_t)ncpus; 403 } 404 lwkt_init_thread(td, stack, stksize, flags, globaldata_find(cpu)); 405 return(td); 406 } 407 408 /* 409 * Initialize a preexisting thread structure. This function is used by 410 * lwkt_alloc_thread() and also used to initialize the per-cpu idlethread. 411 * 412 * All threads start out in a critical section at a priority of 413 * TDPRI_KERN_DAEMON. Higher level code will modify the priority as 414 * appropriate. This function may send an IPI message when the 415 * requested cpu is not the current cpu and consequently gd_tdallq may 416 * not be initialized synchronously from the point of view of the originating 417 * cpu. 418 * 419 * NOTE! we have to be careful in regards to creating threads for other cpus 420 * if SMP has not yet been activated. 421 */ 422 static void 423 lwkt_init_thread_remote(void *arg) 424 { 425 thread_t td = arg; 426 427 /* 428 * Protected by critical section held by IPI dispatch 429 */ 430 TAILQ_INSERT_TAIL(&td->td_gd->gd_tdallq, td, td_allq); 431 } 432 433 /* 434 * lwkt core thread structural initialization. 435 * 436 * NOTE: All threads are initialized as mpsafe threads. 437 */ 438 void 439 lwkt_init_thread(thread_t td, void *stack, int stksize, int flags, 440 struct globaldata *gd) 441 { 442 globaldata_t mygd = mycpu; 443 444 bzero(td, sizeof(struct thread)); 445 td->td_kstack = stack; 446 td->td_kstack_size = stksize; 447 td->td_flags = flags; 448 td->td_mpflags = 0; 449 td->td_type = TD_TYPE_GENERIC; 450 td->td_gd = gd; 451 td->td_pri = TDPRI_KERN_DAEMON; 452 td->td_critcount = 1; 453 td->td_toks_have = NULL; 454 td->td_toks_stop = &td->td_toks_base; 455 if (lwkt_use_spin_port || (flags & TDF_FORCE_SPINPORT)) { 456 lwkt_initport_spin(&td->td_msgport, td, 457 (flags & TDF_FIXEDCPU) ? TRUE : FALSE); 458 } else { 459 lwkt_initport_thread(&td->td_msgport, td); 460 } 461 pmap_init_thread(td); 462 /* 463 * Normally initializing a thread for a remote cpu requires sending an 464 * IPI. However, the idlethread is setup before the other cpus are 465 * activated so we have to treat it as a special case. XXX manipulation 466 * of gd_tdallq requires the BGL. 467 */ 468 if (gd == mygd || td == &gd->gd_idlethread) { 469 crit_enter_gd(mygd); 470 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq); 471 crit_exit_gd(mygd); 472 } else { 473 lwkt_send_ipiq(gd, lwkt_init_thread_remote, td); 474 } 475 dsched_enter_thread(td); 476 } 477 478 void 479 lwkt_set_comm(thread_t td, const char *ctl, ...) 480 { 481 __va_list va; 482 483 __va_start(va, ctl); 484 kvsnprintf(td->td_comm, sizeof(td->td_comm), ctl, va); 485 __va_end(va); 486 KTR_LOG(ctxsw_newtd, td, td->td_comm); 487 } 488 489 /* 490 * Prevent the thread from getting destroyed. Note that unlike PHOLD/PRELE 491 * this does not prevent the thread from migrating to another cpu so the 492 * gd_tdallq state is not protected by this. 493 */ 494 void 495 lwkt_hold(thread_t td) 496 { 497 atomic_add_int(&td->td_refs, 1); 498 } 499 500 void 501 lwkt_rele(thread_t td) 502 { 503 KKASSERT(td->td_refs > 0); 504 atomic_add_int(&td->td_refs, -1); 505 } 506 507 void 508 lwkt_free_thread(thread_t td) 509 { 510 KKASSERT(td->td_refs == 0); 511 KKASSERT((td->td_flags & (TDF_RUNNING | TDF_PREEMPT_LOCK | 512 TDF_RUNQ | TDF_TSLEEPQ)) == 0); 513 if (td->td_flags & TDF_ALLOCATED_THREAD) { 514 objcache_put(thread_cache, td); 515 } else if (td->td_flags & TDF_ALLOCATED_STACK) { 516 /* client-allocated struct with internally allocated stack */ 517 KASSERT(td->td_kstack && td->td_kstack_size > 0, 518 ("lwkt_free_thread: corrupted stack")); 519 kmem_free(kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size); 520 td->td_kstack = NULL; 521 td->td_kstack_size = 0; 522 } 523 524 KTR_LOG(ctxsw_deadtd, td); 525 } 526 527 528 /* 529 * Switch to the next runnable lwkt. If no LWKTs are runnable then 530 * switch to the idlethread. Switching must occur within a critical 531 * section to avoid races with the scheduling queue. 532 * 533 * We always have full control over our cpu's run queue. Other cpus 534 * that wish to manipulate our queue must use the cpu_*msg() calls to 535 * talk to our cpu, so a critical section is all that is needed and 536 * the result is very, very fast thread switching. 537 * 538 * The LWKT scheduler uses a fixed priority model and round-robins at 539 * each priority level. User process scheduling is a totally 540 * different beast and LWKT priorities should not be confused with 541 * user process priorities. 542 * 543 * PREEMPTION NOTE: Preemption occurs via lwkt_preempt(). lwkt_switch() 544 * is not called by the current thread in the preemption case, only when 545 * the preempting thread blocks (in order to return to the original thread). 546 * 547 * SPECIAL NOTE ON SWITCH ATOMICY: Certain operations such as thread 548 * migration and tsleep deschedule the current lwkt thread and call 549 * lwkt_switch(). In particular, the target cpu of the migration fully 550 * expects the thread to become non-runnable and can deadlock against 551 * cpusync operations if we run any IPIs prior to switching the thread out. 552 * 553 * WE MUST BE VERY CAREFUL NOT TO RUN SPLZ DIRECTLY OR INDIRECTLY IF 554 * THE CURRENT THREAD HAS BEEN DESCHEDULED! 555 */ 556 void 557 lwkt_switch(void) 558 { 559 globaldata_t gd = mycpu; 560 thread_t td = gd->gd_curthread; 561 thread_t ntd; 562 thread_t xtd; 563 int upri; 564 #ifdef LOOPMASK 565 uint64_t tsc_base = rdtsc(); 566 #endif 567 568 KKASSERT(gd->gd_processing_ipiq == 0); 569 KKASSERT(td->td_flags & TDF_RUNNING); 570 571 /* 572 * Switching from within a 'fast' (non thread switched) interrupt or IPI 573 * is illegal. However, we may have to do it anyway if we hit a fatal 574 * kernel trap or we have paniced. 575 * 576 * If this case occurs save and restore the interrupt nesting level. 577 */ 578 if (gd->gd_intr_nesting_level) { 579 int savegdnest; 580 int savegdtrap; 581 582 if (gd->gd_trap_nesting_level == 0 && panic_cpu_gd != mycpu) { 583 panic("lwkt_switch: Attempt to switch from a " 584 "fast interrupt, ipi, or hard code section, " 585 "td %p\n", 586 td); 587 } else { 588 savegdnest = gd->gd_intr_nesting_level; 589 savegdtrap = gd->gd_trap_nesting_level; 590 gd->gd_intr_nesting_level = 0; 591 gd->gd_trap_nesting_level = 0; 592 if ((td->td_flags & TDF_PANICWARN) == 0) { 593 td->td_flags |= TDF_PANICWARN; 594 kprintf("Warning: thread switch from interrupt, IPI, " 595 "or hard code section.\n" 596 "thread %p (%s)\n", td, td->td_comm); 597 print_backtrace(-1); 598 } 599 lwkt_switch(); 600 gd->gd_intr_nesting_level = savegdnest; 601 gd->gd_trap_nesting_level = savegdtrap; 602 return; 603 } 604 } 605 606 /* 607 * Release our current user process designation if we are blocking 608 * or if a user reschedule was requested. 609 * 610 * NOTE: This function is NOT called if we are switching into or 611 * returning from a preemption. 612 * 613 * NOTE: Releasing our current user process designation may cause 614 * it to be assigned to another thread, which in turn will 615 * cause us to block in the usched acquire code when we attempt 616 * to return to userland. 617 * 618 * NOTE: On SMP systems this can be very nasty when heavy token 619 * contention is present so we want to be careful not to 620 * release the designation gratuitously. 621 */ 622 if (td->td_release && 623 (user_resched_wanted() || (td->td_flags & TDF_RUNQ) == 0)) { 624 td->td_release(td); 625 } 626 627 /* 628 * Release all tokens. Once we do this we must remain in the critical 629 * section and cannot run IPIs or other interrupts until we switch away 630 * because they may implode if they try to get a token using our thread 631 * context. 632 */ 633 crit_enter_gd(gd); 634 if (TD_TOKS_HELD(td)) 635 lwkt_relalltokens(td); 636 637 /* 638 * We had better not be holding any spin locks, but don't get into an 639 * endless panic loop. 640 */ 641 KASSERT(gd->gd_spinlocks == 0 || panicstr != NULL, 642 ("lwkt_switch: still holding %d exclusive spinlocks!", 643 gd->gd_spinlocks)); 644 645 #ifdef INVARIANTS 646 if (td->td_cscount) { 647 kprintf("Diagnostic: attempt to switch while mastering cpusync: %p\n", 648 td); 649 if (panic_on_cscount) 650 panic("switching while mastering cpusync"); 651 } 652 #endif 653 654 /* 655 * If we had preempted another thread on this cpu, resume the preempted 656 * thread. This occurs transparently, whether the preempted thread 657 * was scheduled or not (it may have been preempted after descheduling 658 * itself). 659 * 660 * We have to setup the MP lock for the original thread after backing 661 * out the adjustment that was made to curthread when the original 662 * was preempted. 663 */ 664 if ((ntd = td->td_preempted) != NULL) { 665 KKASSERT(ntd->td_flags & TDF_PREEMPT_LOCK); 666 ntd->td_flags |= TDF_PREEMPT_DONE; 667 ntd->td_contended = 0; /* reset contended */ 668 669 /* 670 * The interrupt may have woken a thread up, we need to properly 671 * set the reschedule flag if the originally interrupted thread is 672 * at a lower priority. 673 * 674 * NOTE: The interrupt may not have descheduled ntd. 675 * 676 * NOTE: We do not reschedule if there are no threads on the runq. 677 * (ntd could be the idlethread). 678 */ 679 xtd = TAILQ_FIRST(&gd->gd_tdrunq); 680 if (xtd && xtd != ntd) 681 need_lwkt_resched(); 682 goto havethread_preempted; 683 } 684 685 /* 686 * Figure out switch target. If we cannot switch to our desired target 687 * look for a thread that we can switch to. 688 * 689 * NOTE! The limited spin loop and related parameters are extremely 690 * important for system performance, particularly for pipes and 691 * concurrent conflicting VM faults. 692 */ 693 clear_lwkt_resched(); 694 ntd = TAILQ_FIRST(&gd->gd_tdrunq); 695 696 if (ntd) { 697 do { 698 if (TD_TOKS_NOT_HELD(ntd) || 699 lwkt_getalltokens(ntd, (ntd->td_contended > lwkt_spin_loops))) 700 { 701 goto havethread; 702 } 703 ++ntd->td_contended; /* overflow ok */ 704 if (gd->gd_indefinite.type == 0) 705 indefinite_init(&gd->gd_indefinite, NULL, 0, 't'); 706 #ifdef LOOPMASK 707 if (tsc_frequency && rdtsc() - tsc_base > tsc_frequency) { 708 kprintf("lwkt_switch: excessive contended %d " 709 "thread %p\n", ntd->td_contended, ntd); 710 tsc_base = rdtsc(); 711 } 712 #endif 713 } while (ntd->td_contended < (lwkt_spin_loops >> 1)); 714 upri = ntd->td_upri; 715 716 /* 717 * Bleh, the thread we wanted to switch to has a contended token. 718 * See if we can switch to another thread. 719 * 720 * We generally don't want to do this because it represents a 721 * priority inversion, but contending tokens on the same cpu can 722 * cause real problems if we don't now that we have an exclusive 723 * priority mechanism over shared for tokens. 724 * 725 * The solution is to allow threads with pending tokens to compete 726 * for them (a lower priority thread will get less cpu once it 727 * returns from the kernel anyway). If a thread does not have 728 * any contending tokens, we go by td_pri and upri. 729 */ 730 while ((ntd = TAILQ_NEXT(ntd, td_threadq)) != NULL) { 731 if (TD_TOKS_NOT_HELD(ntd) && 732 ntd->td_pri < TDPRI_KERN_LPSCHED && upri > ntd->td_upri) { 733 continue; 734 } 735 if (upri < ntd->td_upri) 736 upri = ntd->td_upri; 737 738 /* 739 * Try this one. 740 */ 741 if (TD_TOKS_NOT_HELD(ntd) || 742 lwkt_getalltokens(ntd, (ntd->td_contended > lwkt_spin_loops))) { 743 goto havethread; 744 } 745 ++ntd->td_contended; /* overflow ok */ 746 } 747 748 /* 749 * Fall through, switch to idle thread to get us out of the current 750 * context. Since we were contended, prevent HLT by flagging a 751 * LWKT reschedule. 752 */ 753 need_lwkt_resched(); 754 } 755 756 /* 757 * We either contended on ntd or the runq is empty. We must switch 758 * through the idle thread to get out of the current context. 759 */ 760 ntd = &gd->gd_idlethread; 761 if (gd->gd_trap_nesting_level == 0 && panicstr == NULL) 762 ASSERT_NO_TOKENS_HELD(ntd); 763 cpu_time.cp_msg[0] = 0; 764 goto haveidle; 765 766 havethread: 767 /* 768 * Clear gd_idle_repeat when doing a normal switch to a non-idle 769 * thread. 770 */ 771 ntd->td_wmesg = NULL; 772 ntd->td_contended = 0; /* reset once scheduled */ 773 ++gd->gd_cnt.v_swtch; 774 gd->gd_idle_repeat = 0; 775 776 /* 777 * If we were busy waiting record final disposition 778 */ 779 if (gd->gd_indefinite.type) 780 indefinite_done(&gd->gd_indefinite); 781 782 havethread_preempted: 783 /* 784 * If the new target does not need the MP lock and we are holding it, 785 * release the MP lock. If the new target requires the MP lock we have 786 * already acquired it for the target. 787 */ 788 ; 789 haveidle: 790 KASSERT(ntd->td_critcount, 791 ("priority problem in lwkt_switch %d %d", 792 td->td_critcount, ntd->td_critcount)); 793 794 if (td != ntd) { 795 /* 796 * Execute the actual thread switch operation. This function 797 * returns to the current thread and returns the previous thread 798 * (which may be different from the thread we switched to). 799 * 800 * We are responsible for marking ntd as TDF_RUNNING. 801 */ 802 KKASSERT((ntd->td_flags & TDF_RUNNING) == 0); 803 #ifdef DEBUG_LWKT_THREAD 804 ++switch_count; 805 #endif 806 KTR_LOG(ctxsw_sw, gd->gd_cpuid, ntd); 807 ntd->td_flags |= TDF_RUNNING; 808 lwkt_switch_return(td->td_switch(ntd)); 809 /* ntd invalid, td_switch() can return a different thread_t */ 810 } 811 812 /* 813 * catch-all. XXX is this strictly needed? 814 */ 815 splz_check(); 816 817 /* NOTE: current cpu may have changed after switch */ 818 crit_exit_quick(td); 819 } 820 821 /* 822 * Called by assembly in the td_switch (thread restore path) for thread 823 * bootstrap cases which do not 'return' to lwkt_switch(). 824 */ 825 void 826 lwkt_switch_return(thread_t otd) 827 { 828 globaldata_t rgd; 829 #ifdef LOOPMASK 830 uint64_t tsc_base = rdtsc(); 831 #endif 832 int exiting; 833 834 exiting = otd->td_flags & TDF_EXITING; 835 cpu_ccfence(); 836 837 /* 838 * Check if otd was migrating. Now that we are on ntd we can finish 839 * up the migration. This is a bit messy but it is the only place 840 * where td is known to be fully descheduled. 841 * 842 * We can only activate the migration if otd was migrating but not 843 * held on the cpu due to a preemption chain. We still have to 844 * clear TDF_RUNNING on the old thread either way. 845 * 846 * We are responsible for clearing the previously running thread's 847 * TDF_RUNNING. 848 */ 849 if ((rgd = otd->td_migrate_gd) != NULL && 850 (otd->td_flags & TDF_PREEMPT_LOCK) == 0) { 851 KKASSERT((otd->td_flags & (TDF_MIGRATING | TDF_RUNNING)) == 852 (TDF_MIGRATING | TDF_RUNNING)); 853 otd->td_migrate_gd = NULL; 854 otd->td_flags &= ~TDF_RUNNING; 855 lwkt_send_ipiq(rgd, lwkt_setcpu_remote, otd); 856 } else { 857 otd->td_flags &= ~TDF_RUNNING; 858 } 859 860 /* 861 * Final exit validations (see lwp_wait()). Note that otd becomes 862 * invalid the *instant* we set TDF_MP_EXITSIG. 863 * 864 * Use the EXITING status loaded from before we clear TDF_RUNNING, 865 * because if it is not set otd becomes invalid the instant we clear 866 * TDF_RUNNING on it (otherwise, if the system is fast enough, we 867 * might 'steal' TDF_EXITING from another switch-return!). 868 */ 869 while (exiting) { 870 u_int mpflags; 871 872 mpflags = otd->td_mpflags; 873 cpu_ccfence(); 874 875 if (mpflags & TDF_MP_EXITWAIT) { 876 if (atomic_cmpset_int(&otd->td_mpflags, mpflags, 877 mpflags | TDF_MP_EXITSIG)) { 878 wakeup(otd); 879 break; 880 } 881 } else { 882 if (atomic_cmpset_int(&otd->td_mpflags, mpflags, 883 mpflags | TDF_MP_EXITSIG)) { 884 wakeup(otd); 885 break; 886 } 887 } 888 889 #ifdef LOOPMASK 890 if (tsc_frequency && rdtsc() - tsc_base > tsc_frequency) { 891 kprintf("lwkt_switch_return: excessive TDF_EXITING " 892 "thread %p\n", otd); 893 tsc_base = rdtsc(); 894 } 895 #endif 896 } 897 } 898 899 /* 900 * Request that the target thread preempt the current thread. Preemption 901 * can only occur only: 902 * 903 * - If our critical section is the one that we were called with 904 * - The relative priority of the target thread is higher 905 * - The target is not excessively interrupt-nested via td_nest_count 906 * - The target thread holds no tokens. 907 * - The target thread is not already scheduled and belongs to the 908 * current cpu. 909 * - The current thread is not holding any spin-locks. 910 * 911 * THE CALLER OF LWKT_PREEMPT() MUST BE IN A CRITICAL SECTION. Typically 912 * this is called via lwkt_schedule() through the td_preemptable callback. 913 * critcount is the managed critical priority that we should ignore in order 914 * to determine whether preemption is possible (aka usually just the crit 915 * priority of lwkt_schedule() itself). 916 * 917 * Preemption is typically limited to interrupt threads. 918 * 919 * Operation works in a fairly straight-forward manner. The normal 920 * scheduling code is bypassed and we switch directly to the target 921 * thread. When the target thread attempts to block or switch away 922 * code at the base of lwkt_switch() will switch directly back to our 923 * thread. Our thread is able to retain whatever tokens it holds and 924 * if the target needs one of them the target will switch back to us 925 * and reschedule itself normally. 926 */ 927 void 928 lwkt_preempt(thread_t ntd, int critcount) 929 { 930 struct globaldata *gd = mycpu; 931 thread_t xtd; 932 thread_t td; 933 int save_gd_intr_nesting_level; 934 935 /* 936 * The caller has put us in a critical section. We can only preempt 937 * if the caller of the caller was not in a critical section (basically 938 * a local interrupt), as determined by the 'critcount' parameter. We 939 * also can't preempt if the caller is holding any spinlocks (even if 940 * he isn't in a critical section). This also handles the tokens test. 941 * 942 * YYY The target thread must be in a critical section (else it must 943 * inherit our critical section? I dunno yet). 944 */ 945 KASSERT(ntd->td_critcount, ("BADCRIT0 %d", ntd->td_pri)); 946 947 td = gd->gd_curthread; 948 if (preempt_enable == 0) { 949 #ifdef DEBUG_LWKT_THREAD 950 ++preempt_miss; 951 #endif 952 return; 953 } 954 if (ntd->td_pri <= td->td_pri) { 955 #ifdef DEBUG_LWKT_THREAD 956 ++preempt_miss; 957 #endif 958 return; 959 } 960 if (td->td_critcount > critcount) { 961 #ifdef DEBUG_LWKT_THREAD 962 ++preempt_miss; 963 #endif 964 return; 965 } 966 if (td->td_nest_count >= 2) { 967 #ifdef DEBUG_LWKT_THREAD 968 ++preempt_miss; 969 #endif 970 return; 971 } 972 if (td->td_cscount) { 973 #ifdef DEBUG_LWKT_THREAD 974 ++preempt_miss; 975 #endif 976 return; 977 } 978 if (ntd->td_gd != gd) { 979 #ifdef DEBUG_LWKT_THREAD 980 ++preempt_miss; 981 #endif 982 return; 983 } 984 985 /* 986 * We don't have to check spinlocks here as they will also bump 987 * td_critcount. 988 * 989 * Do not try to preempt if the target thread is holding any tokens. 990 * We could try to acquire the tokens but this case is so rare there 991 * is no need to support it. 992 */ 993 KKASSERT(gd->gd_spinlocks == 0); 994 995 if (TD_TOKS_HELD(ntd)) { 996 #ifdef DEBUG_LWKT_THREAD 997 ++preempt_miss; 998 #endif 999 return; 1000 } 1001 if (td == ntd || ((td->td_flags | ntd->td_flags) & TDF_PREEMPT_LOCK)) { 1002 #ifdef DEBUG_LWKT_THREAD 1003 ++preempt_weird; 1004 #endif 1005 return; 1006 } 1007 if (ntd->td_preempted) { 1008 #ifdef DEBUG_LWKT_THREAD 1009 ++preempt_hit; 1010 #endif 1011 return; 1012 } 1013 KKASSERT(gd->gd_processing_ipiq == 0); 1014 1015 /* 1016 * Since we are able to preempt the current thread, there is no need to 1017 * call need_lwkt_resched(). 1018 * 1019 * We must temporarily clear gd_intr_nesting_level around the switch 1020 * since switchouts from the target thread are allowed (they will just 1021 * return to our thread), and since the target thread has its own stack. 1022 * 1023 * A preemption must switch back to the original thread, assert the 1024 * case. 1025 */ 1026 #ifdef DEBUG_LWKT_THREAD 1027 ++preempt_hit; 1028 #endif 1029 ntd->td_preempted = td; 1030 td->td_flags |= TDF_PREEMPT_LOCK; 1031 KTR_LOG(ctxsw_pre, gd->gd_cpuid, ntd); 1032 save_gd_intr_nesting_level = gd->gd_intr_nesting_level; 1033 gd->gd_intr_nesting_level = 0; 1034 1035 KKASSERT((ntd->td_flags & TDF_RUNNING) == 0); 1036 ntd->td_flags |= TDF_RUNNING; 1037 xtd = td->td_switch(ntd); 1038 KKASSERT(xtd == ntd); 1039 lwkt_switch_return(xtd); 1040 gd->gd_intr_nesting_level = save_gd_intr_nesting_level; 1041 1042 KKASSERT(ntd->td_preempted && (td->td_flags & TDF_PREEMPT_DONE)); 1043 ntd->td_preempted = NULL; 1044 td->td_flags &= ~(TDF_PREEMPT_LOCK|TDF_PREEMPT_DONE); 1045 } 1046 1047 /* 1048 * Conditionally call splz() if gd_reqflags indicates work is pending. 1049 * This will work inside a critical section but not inside a hard code 1050 * section. 1051 * 1052 * (self contained on a per cpu basis) 1053 */ 1054 void 1055 splz_check(void) 1056 { 1057 globaldata_t gd = mycpu; 1058 thread_t td = gd->gd_curthread; 1059 1060 if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) && 1061 gd->gd_intr_nesting_level == 0 && 1062 td->td_nest_count < 2) 1063 { 1064 splz(); 1065 } 1066 } 1067 1068 /* 1069 * This version is integrated into crit_exit, reqflags has already 1070 * been tested but td_critcount has not. 1071 * 1072 * We only want to execute the splz() on the 1->0 transition of 1073 * critcount and not in a hard code section or if too deeply nested. 1074 * 1075 * NOTE: gd->gd_spinlocks is implied to be 0 when td_critcount is 0. 1076 */ 1077 void 1078 lwkt_maybe_splz(thread_t td) 1079 { 1080 globaldata_t gd = td->td_gd; 1081 1082 if (td->td_critcount == 0 && 1083 gd->gd_intr_nesting_level == 0 && 1084 td->td_nest_count < 2) 1085 { 1086 splz(); 1087 } 1088 } 1089 1090 /* 1091 * Drivers which set up processing co-threads can call this function to 1092 * run the co-thread at a higher priority and to allow it to preempt 1093 * normal threads. 1094 */ 1095 void 1096 lwkt_set_interrupt_support_thread(void) 1097 { 1098 thread_t td = curthread; 1099 1100 lwkt_setpri_self(TDPRI_INT_SUPPORT); 1101 td->td_flags |= TDF_INTTHREAD; 1102 td->td_preemptable = lwkt_preempt; 1103 } 1104 1105 1106 /* 1107 * This function is used to negotiate a passive release of the current 1108 * process/lwp designation with the user scheduler, allowing the user 1109 * scheduler to schedule another user thread. The related kernel thread 1110 * (curthread) continues running in the released state. 1111 */ 1112 void 1113 lwkt_passive_release(struct thread *td) 1114 { 1115 struct lwp *lp = td->td_lwp; 1116 1117 td->td_release = NULL; 1118 lwkt_setpri_self(TDPRI_KERN_USER); 1119 1120 lp->lwp_proc->p_usched->release_curproc(lp); 1121 } 1122 1123 1124 /* 1125 * This implements a LWKT yield, allowing a kernel thread to yield to other 1126 * kernel threads at the same or higher priority. This function can be 1127 * called in a tight loop and will typically only yield once per tick. 1128 * 1129 * Most kernel threads run at the same priority in order to allow equal 1130 * sharing. 1131 * 1132 * (self contained on a per cpu basis) 1133 */ 1134 void 1135 lwkt_yield(void) 1136 { 1137 globaldata_t gd = mycpu; 1138 thread_t td = gd->gd_curthread; 1139 1140 /* 1141 * Should never be called with spinlocks held but there is a path 1142 * via ACPI where it might happen. 1143 */ 1144 if (gd->gd_spinlocks) 1145 return; 1146 1147 /* 1148 * Safe to call splz if we are not too-heavily nested. 1149 */ 1150 if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) && td->td_nest_count < 2) 1151 splz(); 1152 1153 /* 1154 * Caller allows switching 1155 */ 1156 if (lwkt_resched_wanted()) { 1157 atomic_set_int(&td->td_mpflags, TDF_MP_DIDYIELD); 1158 lwkt_schedule_self(td); 1159 lwkt_switch(); 1160 } 1161 } 1162 1163 /* 1164 * The quick version processes pending interrupts and higher-priority 1165 * LWKT threads but will not round-robin same-priority LWKT threads. 1166 * 1167 * When called while attempting to return to userland the only same-pri 1168 * threads are the ones which have already tried to become the current 1169 * user process. 1170 */ 1171 void 1172 lwkt_yield_quick(void) 1173 { 1174 globaldata_t gd = mycpu; 1175 thread_t td = gd->gd_curthread; 1176 1177 if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) && td->td_nest_count < 2) 1178 splz(); 1179 if (lwkt_resched_wanted()) { 1180 crit_enter(); 1181 if (TAILQ_FIRST(&gd->gd_tdrunq) == td) { 1182 clear_lwkt_resched(); 1183 } else { 1184 atomic_set_int(&td->td_mpflags, TDF_MP_DIDYIELD); 1185 lwkt_schedule_self(curthread); 1186 lwkt_switch(); 1187 } 1188 crit_exit(); 1189 } 1190 } 1191 1192 /* 1193 * This yield is designed for kernel threads with a user context. 1194 * 1195 * The kernel acting on behalf of the user is potentially cpu-bound, 1196 * this function will efficiently allow other threads to run and also 1197 * switch to other processes by releasing. 1198 * 1199 * The lwkt_user_yield() function is designed to have very low overhead 1200 * if no yield is determined to be needed. 1201 */ 1202 void 1203 lwkt_user_yield(void) 1204 { 1205 globaldata_t gd = mycpu; 1206 thread_t td = gd->gd_curthread; 1207 1208 /* 1209 * Should never be called with spinlocks held but there is a path 1210 * via ACPI where it might happen. 1211 */ 1212 if (gd->gd_spinlocks) 1213 return; 1214 1215 /* 1216 * Always run any pending interrupts in case we are in a critical 1217 * section. 1218 */ 1219 if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) && td->td_nest_count < 2) 1220 splz(); 1221 1222 /* 1223 * Switch (which forces a release) if another kernel thread needs 1224 * the cpu, if userland wants us to resched, or if our kernel 1225 * quantum has run out. 1226 */ 1227 if (lwkt_resched_wanted() || 1228 user_resched_wanted()) 1229 { 1230 lwkt_switch(); 1231 } 1232 1233 #if 0 1234 /* 1235 * Reacquire the current process if we are released. 1236 * 1237 * XXX not implemented atm. The kernel may be holding locks and such, 1238 * so we want the thread to continue to receive cpu. 1239 */ 1240 if (td->td_release == NULL && lp) { 1241 lp->lwp_proc->p_usched->acquire_curproc(lp); 1242 td->td_release = lwkt_passive_release; 1243 lwkt_setpri_self(TDPRI_USER_NORM); 1244 } 1245 #endif 1246 } 1247 1248 /* 1249 * Generic schedule. Possibly schedule threads belonging to other cpus and 1250 * deal with threads that might be blocked on a wait queue. 1251 * 1252 * We have a little helper inline function which does additional work after 1253 * the thread has been enqueued, including dealing with preemption and 1254 * setting need_lwkt_resched() (which prevents the kernel from returning 1255 * to userland until it has processed higher priority threads). 1256 * 1257 * It is possible for this routine to be called after a failed _enqueue 1258 * (due to the target thread migrating, sleeping, or otherwise blocked). 1259 * We have to check that the thread is actually on the run queue! 1260 */ 1261 static __inline 1262 void 1263 _lwkt_schedule_post(globaldata_t gd, thread_t ntd, int ccount) 1264 { 1265 if (ntd->td_flags & TDF_RUNQ) { 1266 if (ntd->td_preemptable) { 1267 ntd->td_preemptable(ntd, ccount); /* YYY +token */ 1268 } 1269 } 1270 } 1271 1272 static __inline 1273 void 1274 _lwkt_schedule(thread_t td) 1275 { 1276 globaldata_t mygd = mycpu; 1277 1278 KASSERT(td != &td->td_gd->gd_idlethread, 1279 ("lwkt_schedule(): scheduling gd_idlethread is illegal!")); 1280 KKASSERT((td->td_flags & TDF_MIGRATING) == 0); 1281 crit_enter_gd(mygd); 1282 KKASSERT(td->td_lwp == NULL || 1283 (td->td_lwp->lwp_mpflags & LWP_MP_ONRUNQ) == 0); 1284 1285 if (td == mygd->gd_curthread) { 1286 _lwkt_enqueue(td); 1287 } else { 1288 /* 1289 * If we own the thread, there is no race (since we are in a 1290 * critical section). If we do not own the thread there might 1291 * be a race but the target cpu will deal with it. 1292 */ 1293 if (td->td_gd == mygd) { 1294 _lwkt_enqueue(td); 1295 _lwkt_schedule_post(mygd, td, 1); 1296 } else { 1297 lwkt_send_ipiq3(td->td_gd, lwkt_schedule_remote, td, 0); 1298 } 1299 } 1300 crit_exit_gd(mygd); 1301 } 1302 1303 void 1304 lwkt_schedule(thread_t td) 1305 { 1306 _lwkt_schedule(td); 1307 } 1308 1309 void 1310 lwkt_schedule_noresched(thread_t td) /* XXX not impl */ 1311 { 1312 _lwkt_schedule(td); 1313 } 1314 1315 /* 1316 * When scheduled remotely if frame != NULL the IPIQ is being 1317 * run via doreti or an interrupt then preemption can be allowed. 1318 * 1319 * To allow preemption we have to drop the critical section so only 1320 * one is present in _lwkt_schedule_post. 1321 */ 1322 static void 1323 lwkt_schedule_remote(void *arg, int arg2, struct intrframe *frame) 1324 { 1325 thread_t td = curthread; 1326 thread_t ntd = arg; 1327 1328 if (frame && ntd->td_preemptable) { 1329 crit_exit_noyield(td); 1330 _lwkt_schedule(ntd); 1331 crit_enter_quick(td); 1332 } else { 1333 _lwkt_schedule(ntd); 1334 } 1335 } 1336 1337 /* 1338 * Thread migration using a 'Pull' method. The thread may or may not be 1339 * the current thread. It MUST be descheduled and in a stable state. 1340 * lwkt_giveaway() must be called on the cpu owning the thread. 1341 * 1342 * At any point after lwkt_giveaway() is called, the target cpu may 1343 * 'pull' the thread by calling lwkt_acquire(). 1344 * 1345 * We have to make sure the thread is not sitting on a per-cpu tsleep 1346 * queue or it will blow up when it moves to another cpu. 1347 * 1348 * MPSAFE - must be called under very specific conditions. 1349 */ 1350 void 1351 lwkt_giveaway(thread_t td) 1352 { 1353 globaldata_t gd = mycpu; 1354 1355 crit_enter_gd(gd); 1356 if (td->td_flags & TDF_TSLEEPQ) 1357 tsleep_remove(td); 1358 KKASSERT(td->td_gd == gd); 1359 TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq); 1360 td->td_flags |= TDF_MIGRATING; 1361 crit_exit_gd(gd); 1362 } 1363 1364 void 1365 lwkt_acquire(thread_t td) 1366 { 1367 globaldata_t gd; 1368 globaldata_t mygd; 1369 1370 KKASSERT(td->td_flags & TDF_MIGRATING); 1371 gd = td->td_gd; 1372 mygd = mycpu; 1373 if (gd != mycpu) { 1374 #ifdef LOOPMASK 1375 uint64_t tsc_base = rdtsc(); 1376 #endif 1377 cpu_lfence(); 1378 KKASSERT((td->td_flags & TDF_RUNQ) == 0); 1379 crit_enter_gd(mygd); 1380 DEBUG_PUSH_INFO("lwkt_acquire"); 1381 while (td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) { 1382 lwkt_process_ipiq(); 1383 cpu_lfence(); 1384 #ifdef _KERNEL_VIRTUAL 1385 vkernel_yield(); 1386 #endif 1387 #ifdef LOOPMASK 1388 if (tsc_frequency && rdtsc() - tsc_base > tsc_frequency) { 1389 kprintf("lwkt_acquire: stuck td %p td->td_flags %08x\n", 1390 td, td->td_flags); 1391 tsc_base = rdtsc(); 1392 } 1393 #endif 1394 } 1395 DEBUG_POP_INFO(); 1396 cpu_mfence(); 1397 td->td_gd = mygd; 1398 TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq); 1399 td->td_flags &= ~TDF_MIGRATING; 1400 crit_exit_gd(mygd); 1401 } else { 1402 crit_enter_gd(mygd); 1403 TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq); 1404 td->td_flags &= ~TDF_MIGRATING; 1405 crit_exit_gd(mygd); 1406 } 1407 } 1408 1409 /* 1410 * Generic deschedule. Descheduling threads other then your own should be 1411 * done only in carefully controlled circumstances. Descheduling is 1412 * asynchronous. 1413 * 1414 * This function may block if the cpu has run out of messages. 1415 */ 1416 void 1417 lwkt_deschedule(thread_t td) 1418 { 1419 crit_enter(); 1420 if (td == curthread) { 1421 _lwkt_dequeue(td); 1422 } else { 1423 if (td->td_gd == mycpu) { 1424 _lwkt_dequeue(td); 1425 } else { 1426 lwkt_send_ipiq(td->td_gd, (ipifunc1_t)lwkt_deschedule, td); 1427 } 1428 } 1429 crit_exit(); 1430 } 1431 1432 /* 1433 * Set the target thread's priority. This routine does not automatically 1434 * switch to a higher priority thread, LWKT threads are not designed for 1435 * continuous priority changes. Yield if you want to switch. 1436 */ 1437 void 1438 lwkt_setpri(thread_t td, int pri) 1439 { 1440 if (td->td_pri != pri) { 1441 KKASSERT(pri >= 0); 1442 crit_enter(); 1443 if (td->td_flags & TDF_RUNQ) { 1444 KKASSERT(td->td_gd == mycpu); 1445 _lwkt_dequeue(td); 1446 td->td_pri = pri; 1447 _lwkt_enqueue(td); 1448 } else { 1449 td->td_pri = pri; 1450 } 1451 crit_exit(); 1452 } 1453 } 1454 1455 /* 1456 * Set the initial priority for a thread prior to it being scheduled for 1457 * the first time. The thread MUST NOT be scheduled before or during 1458 * this call. The thread may be assigned to a cpu other then the current 1459 * cpu. 1460 * 1461 * Typically used after a thread has been created with TDF_STOPPREQ, 1462 * and before the thread is initially scheduled. 1463 */ 1464 void 1465 lwkt_setpri_initial(thread_t td, int pri) 1466 { 1467 KKASSERT(pri >= 0); 1468 KKASSERT((td->td_flags & TDF_RUNQ) == 0); 1469 td->td_pri = pri; 1470 } 1471 1472 void 1473 lwkt_setpri_self(int pri) 1474 { 1475 thread_t td = curthread; 1476 1477 KKASSERT(pri >= 0 && pri <= TDPRI_MAX); 1478 crit_enter(); 1479 if (td->td_flags & TDF_RUNQ) { 1480 _lwkt_dequeue(td); 1481 td->td_pri = pri; 1482 _lwkt_enqueue(td); 1483 } else { 1484 td->td_pri = pri; 1485 } 1486 crit_exit(); 1487 } 1488 1489 /* 1490 * hz tick scheduler clock for LWKT threads 1491 */ 1492 void 1493 lwkt_schedulerclock(thread_t td) 1494 { 1495 globaldata_t gd = td->td_gd; 1496 thread_t xtd; 1497 1498 xtd = TAILQ_FIRST(&gd->gd_tdrunq); 1499 if (xtd == td) { 1500 /* 1501 * If the current thread is at the head of the runq shift it to the 1502 * end of any equal-priority threads and request a LWKT reschedule 1503 * if it moved. 1504 * 1505 * Ignore upri in this situation. There will only be one user thread 1506 * in user mode, all others will be user threads running in kernel 1507 * mode and we have to make sure they get some cpu. 1508 */ 1509 xtd = TAILQ_NEXT(td, td_threadq); 1510 if (xtd && xtd->td_pri == td->td_pri) { 1511 TAILQ_REMOVE(&gd->gd_tdrunq, td, td_threadq); 1512 while (xtd && xtd->td_pri == td->td_pri) 1513 xtd = TAILQ_NEXT(xtd, td_threadq); 1514 if (xtd) 1515 TAILQ_INSERT_BEFORE(xtd, td, td_threadq); 1516 else 1517 TAILQ_INSERT_TAIL(&gd->gd_tdrunq, td, td_threadq); 1518 need_lwkt_resched(); 1519 } 1520 } else if (xtd) { 1521 /* 1522 * If we scheduled a thread other than the one at the head of the 1523 * queue always request a reschedule every tick. 1524 */ 1525 need_lwkt_resched(); 1526 } 1527 /* else curthread probably the idle thread, no need to reschedule */ 1528 } 1529 1530 /* 1531 * Migrate the current thread to the specified cpu. 1532 * 1533 * This is accomplished by descheduling ourselves from the current cpu 1534 * and setting td_migrate_gd. The lwkt_switch() code will detect that the 1535 * 'old' thread wants to migrate after it has been completely switched out 1536 * and will complete the migration. 1537 * 1538 * TDF_MIGRATING prevents scheduling races while the thread is being migrated. 1539 * 1540 * We must be sure to release our current process designation (if a user 1541 * process) before clearing out any tsleepq we are on because the release 1542 * code may re-add us. 1543 * 1544 * We must be sure to remove ourselves from the current cpu's tsleepq 1545 * before potentially moving to another queue. The thread can be on 1546 * a tsleepq due to a left-over tsleep_interlock(). 1547 */ 1548 1549 void 1550 lwkt_setcpu_self(globaldata_t rgd) 1551 { 1552 thread_t td = curthread; 1553 1554 if (td->td_gd != rgd) { 1555 crit_enter_quick(td); 1556 1557 if (td->td_release) 1558 td->td_release(td); 1559 if (td->td_flags & TDF_TSLEEPQ) 1560 tsleep_remove(td); 1561 1562 /* 1563 * Set TDF_MIGRATING to prevent a spurious reschedule while we are 1564 * trying to deschedule ourselves and switch away, then deschedule 1565 * ourself, remove us from tdallq, and set td_migrate_gd. Finally, 1566 * call lwkt_switch() to complete the operation. 1567 */ 1568 td->td_flags |= TDF_MIGRATING; 1569 lwkt_deschedule_self(td); 1570 TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq); 1571 td->td_migrate_gd = rgd; 1572 lwkt_switch(); 1573 1574 /* 1575 * We are now on the target cpu 1576 */ 1577 KKASSERT(rgd == mycpu); 1578 TAILQ_INSERT_TAIL(&rgd->gd_tdallq, td, td_allq); 1579 crit_exit_quick(td); 1580 } 1581 } 1582 1583 void 1584 lwkt_migratecpu(int cpuid) 1585 { 1586 globaldata_t rgd; 1587 1588 rgd = globaldata_find(cpuid); 1589 lwkt_setcpu_self(rgd); 1590 } 1591 1592 /* 1593 * Remote IPI for cpu migration (called while in a critical section so we 1594 * do not have to enter another one). 1595 * 1596 * The thread (td) has already been completely descheduled from the 1597 * originating cpu and we can simply assert the case. The thread is 1598 * assigned to the new cpu and enqueued. 1599 * 1600 * The thread will re-add itself to tdallq when it resumes execution. 1601 */ 1602 static void 1603 lwkt_setcpu_remote(void *arg) 1604 { 1605 thread_t td = arg; 1606 globaldata_t gd = mycpu; 1607 1608 KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) == 0); 1609 td->td_gd = gd; 1610 cpu_mfence(); 1611 td->td_flags &= ~TDF_MIGRATING; 1612 KKASSERT(td->td_migrate_gd == NULL); 1613 KKASSERT(td->td_lwp == NULL || 1614 (td->td_lwp->lwp_mpflags & LWP_MP_ONRUNQ) == 0); 1615 _lwkt_enqueue(td); 1616 } 1617 1618 struct lwp * 1619 lwkt_preempted_proc(void) 1620 { 1621 thread_t td = curthread; 1622 while (td->td_preempted) 1623 td = td->td_preempted; 1624 return(td->td_lwp); 1625 } 1626 1627 /* 1628 * Create a kernel process/thread/whatever. It shares it's address space 1629 * with proc0 - ie: kernel only. 1630 * 1631 * If the cpu is not specified one will be selected. In the future 1632 * specifying a cpu of -1 will enable kernel thread migration between 1633 * cpus. 1634 */ 1635 int 1636 lwkt_create(void (*func)(void *), void *arg, struct thread **tdp, 1637 thread_t template, int tdflags, int cpu, const char *fmt, ...) 1638 { 1639 thread_t td; 1640 __va_list ap; 1641 1642 td = lwkt_alloc_thread(template, LWKT_THREAD_STACK, cpu, 1643 tdflags); 1644 if (tdp) 1645 *tdp = td; 1646 cpu_set_thread_handler(td, lwkt_exit, func, arg); 1647 1648 /* 1649 * Set up arg0 for 'ps' etc 1650 */ 1651 __va_start(ap, fmt); 1652 kvsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap); 1653 __va_end(ap); 1654 1655 /* 1656 * Schedule the thread to run 1657 */ 1658 if (td->td_flags & TDF_NOSTART) 1659 td->td_flags &= ~TDF_NOSTART; 1660 else 1661 lwkt_schedule(td); 1662 return 0; 1663 } 1664 1665 /* 1666 * Destroy an LWKT thread. Warning! This function is not called when 1667 * a process exits, cpu_proc_exit() directly calls cpu_thread_exit() and 1668 * uses a different reaping mechanism. 1669 */ 1670 void 1671 lwkt_exit(void) 1672 { 1673 thread_t td = curthread; 1674 thread_t std; 1675 globaldata_t gd; 1676 1677 /* 1678 * Do any cleanup that might block here 1679 */ 1680 biosched_done(td); 1681 dsched_exit_thread(td); 1682 1683 /* 1684 * Get us into a critical section to interlock gd_freetd and loop 1685 * until we can get it freed. 1686 * 1687 * We have to cache the current td in gd_freetd because objcache_put()ing 1688 * it would rip it out from under us while our thread is still active. 1689 * 1690 * We are the current thread so of course our own TDF_RUNNING bit will 1691 * be set, so unlike the lwp reap code we don't wait for it to clear. 1692 */ 1693 gd = mycpu; 1694 crit_enter_quick(td); 1695 for (;;) { 1696 if (td->td_refs) { 1697 tsleep(td, 0, "tdreap", 1); 1698 continue; 1699 } 1700 if ((std = gd->gd_freetd) != NULL) { 1701 KKASSERT((std->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) == 0); 1702 gd->gd_freetd = NULL; 1703 objcache_put(thread_cache, std); 1704 continue; 1705 } 1706 break; 1707 } 1708 1709 /* 1710 * Remove thread resources from kernel lists and deschedule us for 1711 * the last time. We cannot block after this point or we may end 1712 * up with a stale td on the tsleepq. 1713 * 1714 * None of this may block, the critical section is the only thing 1715 * protecting tdallq and the only thing preventing new lwkt_hold() 1716 * thread refs now. 1717 */ 1718 if (td->td_flags & TDF_TSLEEPQ) 1719 tsleep_remove(td); 1720 lwkt_deschedule_self(td); 1721 lwkt_remove_tdallq(td); 1722 KKASSERT(td->td_refs == 0); 1723 1724 /* 1725 * Final cleanup 1726 */ 1727 KKASSERT(gd->gd_freetd == NULL); 1728 if (td->td_flags & TDF_ALLOCATED_THREAD) 1729 gd->gd_freetd = td; 1730 cpu_thread_exit(); 1731 } 1732 1733 void 1734 lwkt_remove_tdallq(thread_t td) 1735 { 1736 KKASSERT(td->td_gd == mycpu); 1737 TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq); 1738 } 1739 1740 /* 1741 * Code reduction and branch prediction improvements. Call/return 1742 * overhead on modern cpus often degenerates into 0 cycles due to 1743 * the cpu's branch prediction hardware and return pc cache. We 1744 * can take advantage of this by not inlining medium-complexity 1745 * functions and we can also reduce the branch prediction impact 1746 * by collapsing perfectly predictable branches into a single 1747 * procedure instead of duplicating it. 1748 * 1749 * Is any of this noticeable? Probably not, so I'll take the 1750 * smaller code size. 1751 */ 1752 void 1753 crit_exit_wrapper(__DEBUG_CRIT_ARG__) 1754 { 1755 _crit_exit(mycpu __DEBUG_CRIT_PASS_ARG__); 1756 } 1757 1758 void 1759 crit_panic(void) 1760 { 1761 thread_t td = curthread; 1762 int lcrit = td->td_critcount; 1763 1764 td->td_critcount = 0; 1765 cpu_ccfence(); 1766 panic("td_critcount is/would-go negative! %p %d", td, lcrit); 1767 /* NOT REACHED */ 1768 } 1769 1770 /* 1771 * Called from debugger/panic on cpus which have been stopped. We must still 1772 * process the IPIQ while stopped. 1773 * 1774 * If we are dumping also try to process any pending interrupts. This may 1775 * or may not work depending on the state of the cpu at the point it was 1776 * stopped. 1777 */ 1778 void 1779 lwkt_smp_stopped(void) 1780 { 1781 globaldata_t gd = mycpu; 1782 1783 if (dumping) { 1784 lwkt_process_ipiq(); 1785 --gd->gd_intr_nesting_level; 1786 splz(); 1787 ++gd->gd_intr_nesting_level; 1788 } else { 1789 lwkt_process_ipiq(); 1790 } 1791 cpu_smp_stopped(); 1792 } 1793