1 /* 2 * Copyright (c) 2003-2011 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 /* 36 * Each cpu in a system has its own self-contained light weight kernel 37 * thread scheduler, which means that generally speaking we only need 38 * to use a critical section to avoid problems. Foreign thread 39 * scheduling is queued via (async) IPIs. 40 */ 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/proc.h> 46 #include <sys/rtprio.h> 47 #include <sys/kinfo.h> 48 #include <sys/malloc.h> 49 #include <sys/queue.h> 50 #include <sys/sysctl.h> 51 #include <sys/kthread.h> 52 #include <machine/cpu.h> 53 #include <sys/lock.h> 54 #include <sys/spinlock.h> 55 #include <sys/ktr.h> 56 #include <sys/indefinite.h> 57 58 #include <sys/thread2.h> 59 #include <sys/spinlock2.h> 60 #include <sys/indefinite2.h> 61 62 #include <sys/dsched.h> 63 64 #include <vm/vm.h> 65 #include <vm/vm_param.h> 66 #include <vm/vm_kern.h> 67 #include <vm/vm_object.h> 68 #include <vm/vm_page.h> 69 #include <vm/vm_map.h> 70 #include <vm/vm_pager.h> 71 #include <vm/vm_extern.h> 72 73 #include <machine/stdarg.h> 74 #include <machine/smp.h> 75 #include <machine/clock.h> 76 77 #define LOOPMASK 78 79 #if !defined(KTR_CTXSW) 80 #define KTR_CTXSW KTR_ALL 81 #endif 82 KTR_INFO_MASTER(ctxsw); 83 KTR_INFO(KTR_CTXSW, ctxsw, sw, 0, "#cpu[%d].td = %p", int cpu, struct thread *td); 84 KTR_INFO(KTR_CTXSW, ctxsw, pre, 1, "#cpu[%d].td = %p", int cpu, struct thread *td); 85 KTR_INFO(KTR_CTXSW, ctxsw, newtd, 2, "#threads[%p].name = %s", struct thread *td, char *comm); 86 KTR_INFO(KTR_CTXSW, ctxsw, deadtd, 3, "#threads[%p].name = <dead>", struct thread *td); 87 88 static MALLOC_DEFINE(M_THREAD, "thread", "lwkt threads"); 89 90 #ifdef INVARIANTS 91 static int panic_on_cscount = 0; 92 #endif 93 #ifdef DEBUG_LWKT_THREAD 94 static int64_t switch_count = 0; 95 static int64_t preempt_hit = 0; 96 static int64_t preempt_miss = 0; 97 static int64_t preempt_weird = 0; 98 #endif 99 static int lwkt_use_spin_port; 100 __read_mostly static struct objcache *thread_cache; 101 int cpu_mwait_spin = 0; 102 103 static void lwkt_schedule_remote(void *arg, int arg2, struct intrframe *frame); 104 static void lwkt_setcpu_remote(void *arg); 105 106 /* 107 * We can make all thread ports use the spin backend instead of the thread 108 * backend. This should only be set to debug the spin backend. 109 */ 110 TUNABLE_INT("lwkt.use_spin_port", &lwkt_use_spin_port); 111 112 #ifdef INVARIANTS 113 SYSCTL_INT(_lwkt, OID_AUTO, panic_on_cscount, CTLFLAG_RW, &panic_on_cscount, 0, 114 "Panic if attempting to switch lwkt's while mastering cpusync"); 115 #endif 116 #ifdef DEBUG_LWKT_THREAD 117 SYSCTL_QUAD(_lwkt, OID_AUTO, switch_count, CTLFLAG_RW, &switch_count, 0, 118 "Number of switched threads"); 119 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_hit, CTLFLAG_RW, &preempt_hit, 0, 120 "Successful preemption events"); 121 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_miss, CTLFLAG_RW, &preempt_miss, 0, 122 "Failed preemption events"); 123 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_weird, CTLFLAG_RW, &preempt_weird, 0, 124 "Number of preempted threads."); 125 #endif 126 extern int lwkt_sched_debug; 127 int lwkt_sched_debug = 0; 128 SYSCTL_INT(_lwkt, OID_AUTO, sched_debug, CTLFLAG_RW, 129 &lwkt_sched_debug, 0, "Scheduler debug"); 130 __read_mostly static u_int lwkt_spin_loops = 10; 131 SYSCTL_UINT(_lwkt, OID_AUTO, spin_loops, CTLFLAG_RW, 132 &lwkt_spin_loops, 0, "Scheduler spin loops until sorted decon"); 133 __read_mostly static int preempt_enable = 1; 134 SYSCTL_INT(_lwkt, OID_AUTO, preempt_enable, CTLFLAG_RW, 135 &preempt_enable, 0, "Enable preemption"); 136 static int lwkt_cache_threads = 0; 137 SYSCTL_INT(_lwkt, OID_AUTO, cache_threads, CTLFLAG_RD, 138 &lwkt_cache_threads, 0, "thread+kstack cache"); 139 140 /* 141 * These helper procedures handle the runq, they can only be called from 142 * within a critical section. 143 * 144 * WARNING! Prior to SMP being brought up it is possible to enqueue and 145 * dequeue threads belonging to other cpus, so be sure to use td->td_gd 146 * instead of 'mycpu' when referencing the globaldata structure. Once 147 * SMP live enqueuing and dequeueing only occurs on the current cpu. 148 */ 149 static __inline 150 void 151 _lwkt_dequeue(thread_t td) 152 { 153 if (td->td_flags & TDF_RUNQ) { 154 struct globaldata *gd = td->td_gd; 155 156 td->td_flags &= ~TDF_RUNQ; 157 TAILQ_REMOVE(&gd->gd_tdrunq, td, td_threadq); 158 --gd->gd_tdrunqcount; 159 if (TAILQ_FIRST(&gd->gd_tdrunq) == NULL) 160 atomic_clear_int(&gd->gd_reqflags, RQF_RUNNING); 161 } 162 } 163 164 /* 165 * Priority enqueue. 166 * 167 * There are a limited number of lwkt threads runnable since user 168 * processes only schedule one at a time per cpu. However, there can 169 * be many user processes in kernel mode exiting from a tsleep() which 170 * become runnable. 171 * 172 * We scan the queue in both directions to help deal with degenerate 173 * situations when hundreds or thousands (or more) threads are runnable. 174 * 175 * NOTE: lwkt_schedulerclock() will force a round-robin based on td_pri and 176 * will ignore user priority. This is to ensure that user threads in 177 * kernel mode get cpu at some point regardless of what the user 178 * scheduler thinks. 179 */ 180 static __inline 181 void 182 _lwkt_enqueue(thread_t td) 183 { 184 thread_t xtd; /* forward scan */ 185 thread_t rtd; /* reverse scan */ 186 187 if ((td->td_flags & (TDF_RUNQ|TDF_MIGRATING|TDF_BLOCKQ)) == 0) { 188 struct globaldata *gd = td->td_gd; 189 190 td->td_flags |= TDF_RUNQ; 191 xtd = TAILQ_FIRST(&gd->gd_tdrunq); 192 if (xtd == NULL) { 193 TAILQ_INSERT_TAIL(&gd->gd_tdrunq, td, td_threadq); 194 atomic_set_int(&gd->gd_reqflags, RQF_RUNNING); 195 } else { 196 /* 197 * NOTE: td_upri - higher numbers more desireable, same sense 198 * as td_pri (typically reversed from lwp_upri). 199 * 200 * In the equal priority case we want the best selection 201 * at the beginning so the less desireable selections know 202 * that they have to setrunqueue/go-to-another-cpu, even 203 * though it means switching back to the 'best' selection. 204 * This also avoids degenerate situations when many threads 205 * are runnable or waking up at the same time. 206 * 207 * If upri matches exactly place at end/round-robin. 208 */ 209 rtd = TAILQ_LAST(&gd->gd_tdrunq, lwkt_queue); 210 211 while (xtd && 212 (xtd->td_pri > td->td_pri || 213 (xtd->td_pri == td->td_pri && 214 xtd->td_upri >= td->td_upri))) { 215 xtd = TAILQ_NEXT(xtd, td_threadq); 216 217 /* 218 * Doing a reverse scan at the same time is an optimization 219 * for the insert-closer-to-tail case that avoids having to 220 * scan the entire list. This situation can occur when 221 * thousands of threads are woken up at the same time. 222 */ 223 if (rtd->td_pri > td->td_pri || 224 (rtd->td_pri == td->td_pri && 225 rtd->td_upri >= td->td_upri)) { 226 TAILQ_INSERT_AFTER(&gd->gd_tdrunq, rtd, td, td_threadq); 227 goto skip; 228 } 229 rtd = TAILQ_PREV(rtd, lwkt_queue, td_threadq); 230 } 231 if (xtd) 232 TAILQ_INSERT_BEFORE(xtd, td, td_threadq); 233 else 234 TAILQ_INSERT_TAIL(&gd->gd_tdrunq, td, td_threadq); 235 } 236 skip: 237 ++gd->gd_tdrunqcount; 238 239 /* 240 * Request a LWKT reschedule if we are now at the head of the queue. 241 */ 242 if (TAILQ_FIRST(&gd->gd_tdrunq) == td) 243 need_lwkt_resched(); 244 } 245 } 246 247 static boolean_t 248 _lwkt_thread_ctor(void *obj, void *privdata, int ocflags) 249 { 250 struct thread *td = (struct thread *)obj; 251 252 td->td_kstack = NULL; 253 td->td_kstack_size = 0; 254 td->td_flags = TDF_ALLOCATED_THREAD; 255 td->td_mpflags = 0; 256 return (1); 257 } 258 259 static void 260 _lwkt_thread_dtor(void *obj, void *privdata) 261 { 262 struct thread *td = (struct thread *)obj; 263 264 KASSERT(td->td_flags & TDF_ALLOCATED_THREAD, 265 ("_lwkt_thread_dtor: not allocated from objcache")); 266 KASSERT((td->td_flags & TDF_ALLOCATED_STACK) && td->td_kstack && 267 td->td_kstack_size > 0, 268 ("_lwkt_thread_dtor: corrupted stack")); 269 kmem_free(kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size); 270 td->td_kstack = NULL; 271 td->td_flags = 0; 272 } 273 274 /* 275 * Initialize the lwkt s/system. 276 * 277 * Nominally cache up to 32 thread + kstack structures. Cache more on 278 * systems with a lot of cpu cores. 279 */ 280 static void 281 lwkt_init(void) 282 { 283 TUNABLE_INT("lwkt.cache_threads", &lwkt_cache_threads); 284 if (lwkt_cache_threads == 0) { 285 lwkt_cache_threads = ncpus * 4; 286 if (lwkt_cache_threads < 32) 287 lwkt_cache_threads = 32; 288 } 289 thread_cache = objcache_create_mbacked( 290 M_THREAD, sizeof(struct thread), 291 0, lwkt_cache_threads, 292 _lwkt_thread_ctor, _lwkt_thread_dtor, NULL); 293 } 294 SYSINIT(lwkt_init, SI_BOOT2_LWKT_INIT, SI_ORDER_FIRST, lwkt_init, NULL); 295 296 /* 297 * Schedule a thread to run. As the current thread we can always safely 298 * schedule ourselves, and a shortcut procedure is provided for that 299 * function. 300 * 301 * (non-blocking, self contained on a per cpu basis) 302 */ 303 void 304 lwkt_schedule_self(thread_t td) 305 { 306 KKASSERT((td->td_flags & TDF_MIGRATING) == 0); 307 crit_enter_quick(td); 308 KASSERT(td != &td->td_gd->gd_idlethread, 309 ("lwkt_schedule_self(): scheduling gd_idlethread is illegal!")); 310 KKASSERT(td->td_lwp == NULL || 311 (td->td_lwp->lwp_mpflags & LWP_MP_ONRUNQ) == 0); 312 _lwkt_enqueue(td); 313 crit_exit_quick(td); 314 } 315 316 /* 317 * Deschedule a thread. 318 * 319 * (non-blocking, self contained on a per cpu basis) 320 */ 321 void 322 lwkt_deschedule_self(thread_t td) 323 { 324 crit_enter_quick(td); 325 _lwkt_dequeue(td); 326 crit_exit_quick(td); 327 } 328 329 /* 330 * LWKTs operate on a per-cpu basis 331 * 332 * WARNING! Called from early boot, 'mycpu' may not work yet. 333 */ 334 void 335 lwkt_gdinit(struct globaldata *gd) 336 { 337 TAILQ_INIT(&gd->gd_tdrunq); 338 TAILQ_INIT(&gd->gd_tdallq); 339 lockinit(&gd->gd_sysctllock, "sysctl", 0, LK_CANRECURSE); 340 } 341 342 /* 343 * Create a new thread. The thread must be associated with a process context 344 * or LWKT start address before it can be scheduled. If the target cpu is 345 * -1 the thread will be created on the current cpu. 346 * 347 * If you intend to create a thread without a process context this function 348 * does everything except load the startup and switcher function. 349 */ 350 thread_t 351 lwkt_alloc_thread(struct thread *td, int stksize, int cpu, int flags) 352 { 353 static int cpu_rotator; 354 globaldata_t gd = mycpu; 355 void *stack; 356 357 /* 358 * If static thread storage is not supplied allocate a thread. Reuse 359 * a cached free thread if possible. gd_freetd is used to keep an exiting 360 * thread intact through the exit. 361 */ 362 if (td == NULL) { 363 crit_enter_gd(gd); 364 if ((td = gd->gd_freetd) != NULL) { 365 KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK| 366 TDF_RUNQ)) == 0); 367 gd->gd_freetd = NULL; 368 } else { 369 td = objcache_get(thread_cache, M_WAITOK); 370 KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK| 371 TDF_RUNQ)) == 0); 372 } 373 crit_exit_gd(gd); 374 KASSERT((td->td_flags & 375 (TDF_ALLOCATED_THREAD|TDF_RUNNING|TDF_PREEMPT_LOCK)) == 376 TDF_ALLOCATED_THREAD, 377 ("lwkt_alloc_thread: corrupted td flags 0x%X", td->td_flags)); 378 flags |= td->td_flags & (TDF_ALLOCATED_THREAD|TDF_ALLOCATED_STACK); 379 } 380 381 /* 382 * Try to reuse cached stack. 383 */ 384 if ((stack = td->td_kstack) != NULL && td->td_kstack_size != stksize) { 385 if (flags & TDF_ALLOCATED_STACK) { 386 kmem_free(kernel_map, (vm_offset_t)stack, td->td_kstack_size); 387 stack = NULL; 388 } 389 } 390 if (stack == NULL) { 391 if (cpu < 0) { 392 stack = (void *)kmem_alloc_stack(kernel_map, stksize, 0); 393 } else { 394 stack = (void *)kmem_alloc_stack(kernel_map, stksize, 395 KM_CPU(cpu)); 396 } 397 flags |= TDF_ALLOCATED_STACK; 398 } 399 if (cpu < 0) { 400 cpu = ++cpu_rotator; 401 cpu_ccfence(); 402 cpu = (uint32_t)cpu % (uint32_t)ncpus; 403 } 404 lwkt_init_thread(td, stack, stksize, flags, globaldata_find(cpu)); 405 return(td); 406 } 407 408 /* 409 * Initialize a preexisting thread structure. This function is used by 410 * lwkt_alloc_thread() and also used to initialize the per-cpu idlethread. 411 * 412 * All threads start out in a critical section at a priority of 413 * TDPRI_KERN_DAEMON. Higher level code will modify the priority as 414 * appropriate. This function may send an IPI message when the 415 * requested cpu is not the current cpu and consequently gd_tdallq may 416 * not be initialized synchronously from the point of view of the originating 417 * cpu. 418 * 419 * NOTE! we have to be careful in regards to creating threads for other cpus 420 * if SMP has not yet been activated. 421 */ 422 static void 423 lwkt_init_thread_remote(void *arg) 424 { 425 thread_t td = arg; 426 427 /* 428 * Protected by critical section held by IPI dispatch 429 */ 430 TAILQ_INSERT_TAIL(&td->td_gd->gd_tdallq, td, td_allq); 431 } 432 433 /* 434 * lwkt core thread structural initialization. 435 * 436 * NOTE: All threads are initialized as mpsafe threads. 437 */ 438 void 439 lwkt_init_thread(thread_t td, void *stack, int stksize, int flags, 440 struct globaldata *gd) 441 { 442 globaldata_t mygd = mycpu; 443 444 bzero(td, sizeof(struct thread)); 445 td->td_kstack = stack; 446 td->td_kstack_size = stksize; 447 td->td_flags = flags; 448 td->td_mpflags = 0; 449 td->td_type = TD_TYPE_GENERIC; 450 td->td_gd = gd; 451 td->td_pri = TDPRI_KERN_DAEMON; 452 td->td_critcount = 1; 453 td->td_toks_have = NULL; 454 td->td_toks_stop = &td->td_toks_base; 455 if (lwkt_use_spin_port || (flags & TDF_FORCE_SPINPORT)) { 456 lwkt_initport_spin(&td->td_msgport, td, 457 (flags & TDF_FIXEDCPU) ? TRUE : FALSE); 458 } else { 459 lwkt_initport_thread(&td->td_msgport, td); 460 } 461 pmap_init_thread(td); 462 /* 463 * Normally initializing a thread for a remote cpu requires sending an 464 * IPI. However, the idlethread is setup before the other cpus are 465 * activated so we have to treat it as a special case. XXX manipulation 466 * of gd_tdallq requires the BGL. 467 */ 468 if (gd == mygd || td == &gd->gd_idlethread) { 469 crit_enter_gd(mygd); 470 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq); 471 crit_exit_gd(mygd); 472 } else { 473 lwkt_send_ipiq(gd, lwkt_init_thread_remote, td); 474 } 475 dsched_enter_thread(td); 476 } 477 478 void 479 lwkt_set_comm(thread_t td, const char *ctl, ...) 480 { 481 __va_list va; 482 483 __va_start(va, ctl); 484 kvsnprintf(td->td_comm, sizeof(td->td_comm), ctl, va); 485 __va_end(va); 486 KTR_LOG(ctxsw_newtd, td, td->td_comm); 487 } 488 489 /* 490 * Prevent the thread from getting destroyed. Note that unlike PHOLD/PRELE 491 * this does not prevent the thread from migrating to another cpu so the 492 * gd_tdallq state is not protected by this. 493 */ 494 void 495 lwkt_hold(thread_t td) 496 { 497 atomic_add_int(&td->td_refs, 1); 498 } 499 500 void 501 lwkt_rele(thread_t td) 502 { 503 KKASSERT(td->td_refs > 0); 504 atomic_add_int(&td->td_refs, -1); 505 } 506 507 void 508 lwkt_free_thread(thread_t td) 509 { 510 KKASSERT(td->td_refs == 0); 511 KKASSERT((td->td_flags & (TDF_RUNNING | TDF_PREEMPT_LOCK | 512 TDF_RUNQ | TDF_TSLEEPQ)) == 0); 513 if (td->td_flags & TDF_ALLOCATED_THREAD) { 514 objcache_put(thread_cache, td); 515 } else if (td->td_flags & TDF_ALLOCATED_STACK) { 516 /* client-allocated struct with internally allocated stack */ 517 KASSERT(td->td_kstack && td->td_kstack_size > 0, 518 ("lwkt_free_thread: corrupted stack")); 519 kmem_free(kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size); 520 td->td_kstack = NULL; 521 td->td_kstack_size = 0; 522 } 523 524 KTR_LOG(ctxsw_deadtd, td); 525 } 526 527 528 /* 529 * Switch to the next runnable lwkt. If no LWKTs are runnable then 530 * switch to the idlethread. Switching must occur within a critical 531 * section to avoid races with the scheduling queue. 532 * 533 * We always have full control over our cpu's run queue. Other cpus 534 * that wish to manipulate our queue must use the cpu_*msg() calls to 535 * talk to our cpu, so a critical section is all that is needed and 536 * the result is very, very fast thread switching. 537 * 538 * The LWKT scheduler uses a fixed priority model and round-robins at 539 * each priority level. User process scheduling is a totally 540 * different beast and LWKT priorities should not be confused with 541 * user process priorities. 542 * 543 * PREEMPTION NOTE: Preemption occurs via lwkt_preempt(). lwkt_switch() 544 * is not called by the current thread in the preemption case, only when 545 * the preempting thread blocks (in order to return to the original thread). 546 * 547 * SPECIAL NOTE ON SWITCH ATOMICY: Certain operations such as thread 548 * migration and tsleep deschedule the current lwkt thread and call 549 * lwkt_switch(). In particular, the target cpu of the migration fully 550 * expects the thread to become non-runnable and can deadlock against 551 * cpusync operations if we run any IPIs prior to switching the thread out. 552 * 553 * WE MUST BE VERY CAREFUL NOT TO RUN SPLZ DIRECTLY OR INDIRECTLY IF 554 * THE CURRENT THREAD HAS BEEN DESCHEDULED! 555 */ 556 void 557 lwkt_switch(void) 558 { 559 globaldata_t gd = mycpu; 560 thread_t td = gd->gd_curthread; 561 thread_t ntd; 562 thread_t xtd; 563 int upri; 564 #ifdef LOOPMASK 565 uint64_t tsc_base = rdtsc(); 566 #endif 567 568 KKASSERT(gd->gd_processing_ipiq == 0); 569 KKASSERT(td->td_flags & TDF_RUNNING); 570 571 /* 572 * Switching from within a 'fast' (non thread switched) interrupt or IPI 573 * is illegal. However, we may have to do it anyway if we hit a fatal 574 * kernel trap or we have paniced. 575 * 576 * If this case occurs save and restore the interrupt nesting level. 577 */ 578 if (gd->gd_intr_nesting_level) { 579 int savegdnest; 580 int savegdtrap; 581 582 if (gd->gd_trap_nesting_level == 0 && panic_cpu_gd != mycpu) { 583 panic("lwkt_switch: Attempt to switch from a " 584 "fast interrupt, ipi, or hard code section, " 585 "td %p\n", 586 td); 587 } else { 588 savegdnest = gd->gd_intr_nesting_level; 589 savegdtrap = gd->gd_trap_nesting_level; 590 gd->gd_intr_nesting_level = 0; 591 gd->gd_trap_nesting_level = 0; 592 if ((td->td_flags & TDF_PANICWARN) == 0) { 593 td->td_flags |= TDF_PANICWARN; 594 kprintf("Warning: thread switch from interrupt, IPI, " 595 "or hard code section.\n" 596 "thread %p (%s)\n", td, td->td_comm); 597 print_backtrace(-1); 598 } 599 lwkt_switch(); 600 gd->gd_intr_nesting_level = savegdnest; 601 gd->gd_trap_nesting_level = savegdtrap; 602 return; 603 } 604 } 605 606 /* 607 * Release our current user process designation if we are blocking 608 * or if a user reschedule was requested. 609 * 610 * NOTE: This function is NOT called if we are switching into or 611 * returning from a preemption. 612 * 613 * NOTE: Releasing our current user process designation may cause 614 * it to be assigned to another thread, which in turn will 615 * cause us to block in the usched acquire code when we attempt 616 * to return to userland. 617 * 618 * NOTE: On SMP systems this can be very nasty when heavy token 619 * contention is present so we want to be careful not to 620 * release the designation gratuitously. 621 */ 622 if (td->td_release && 623 (user_resched_wanted() || (td->td_flags & TDF_RUNQ) == 0)) { 624 td->td_release(td); 625 } 626 627 /* 628 * Release all tokens. Once we do this we must remain in the critical 629 * section and cannot run IPIs or other interrupts until we switch away 630 * because they may implode if they try to get a token using our thread 631 * context. 632 */ 633 crit_enter_gd(gd); 634 if (TD_TOKS_HELD(td)) 635 lwkt_relalltokens(td); 636 637 /* 638 * We had better not be holding any spin locks, but don't get into an 639 * endless panic loop. 640 */ 641 KASSERT(gd->gd_spinlocks == 0 || panicstr != NULL, 642 ("lwkt_switch: still holding %d exclusive spinlocks!", 643 gd->gd_spinlocks)); 644 645 #ifdef INVARIANTS 646 if (td->td_cscount) { 647 kprintf("Diagnostic: attempt to switch while mastering cpusync: %p\n", 648 td); 649 if (panic_on_cscount) 650 panic("switching while mastering cpusync"); 651 } 652 #endif 653 654 /* 655 * If we had preempted another thread on this cpu, resume the preempted 656 * thread. This occurs transparently, whether the preempted thread 657 * was scheduled or not (it may have been preempted after descheduling 658 * itself). 659 * 660 * We have to setup the MP lock for the original thread after backing 661 * out the adjustment that was made to curthread when the original 662 * was preempted. 663 */ 664 if ((ntd = td->td_preempted) != NULL) { 665 KKASSERT(ntd->td_flags & TDF_PREEMPT_LOCK); 666 ntd->td_flags |= TDF_PREEMPT_DONE; 667 ntd->td_contended = 0; /* reset contended */ 668 669 /* 670 * The interrupt may have woken a thread up, we need to properly 671 * set the reschedule flag if the originally interrupted thread is 672 * at a lower priority. 673 * 674 * NOTE: The interrupt may not have descheduled ntd. 675 * 676 * NOTE: We do not reschedule if there are no threads on the runq. 677 * (ntd could be the idlethread). 678 */ 679 xtd = TAILQ_FIRST(&gd->gd_tdrunq); 680 if (xtd && xtd != ntd) 681 need_lwkt_resched(); 682 goto havethread_preempted; 683 } 684 685 /* 686 * Figure out switch target. If we cannot switch to our desired target 687 * look for a thread that we can switch to. 688 * 689 * NOTE! The limited spin loop and related parameters are extremely 690 * important for system performance, particularly for pipes and 691 * concurrent conflicting VM faults. 692 */ 693 clear_lwkt_resched(); 694 ntd = TAILQ_FIRST(&gd->gd_tdrunq); 695 696 if (ntd) { 697 do { 698 if (TD_TOKS_NOT_HELD(ntd) || 699 lwkt_getalltokens(ntd, (ntd->td_contended > lwkt_spin_loops))) 700 { 701 goto havethread; 702 } 703 ++ntd->td_contended; /* overflow ok */ 704 if (gd->gd_indefinite.type == 0) 705 indefinite_init(&gd->gd_indefinite, NULL, 0, 't'); 706 #ifdef LOOPMASK 707 if (tsc_frequency && rdtsc() - tsc_base > tsc_frequency) { 708 kprintf("lwkt_switch: WARNING, excessive token contention " 709 "cpu %d, %d sec, " 710 "td %p (%s)\n", 711 gd->gd_cpuid, 712 ntd->td_contended, 713 ntd, 714 ntd->td_comm); 715 tsc_base = rdtsc(); 716 } 717 #endif 718 } while (ntd->td_contended < (lwkt_spin_loops >> 1)); 719 upri = ntd->td_upri; 720 721 /* 722 * Bleh, the thread we wanted to switch to has a contended token. 723 * See if we can switch to another thread. 724 * 725 * We generally don't want to do this because it represents a 726 * priority inversion, but contending tokens on the same cpu can 727 * cause real problems if we don't now that we have an exclusive 728 * priority mechanism over shared for tokens. 729 * 730 * The solution is to allow threads with pending tokens to compete 731 * for them (a lower priority thread will get less cpu once it 732 * returns from the kernel anyway). If a thread does not have 733 * any contending tokens, we go by td_pri and upri. 734 */ 735 while ((ntd = TAILQ_NEXT(ntd, td_threadq)) != NULL) { 736 if (TD_TOKS_NOT_HELD(ntd) && 737 ntd->td_pri < TDPRI_KERN_LPSCHED && upri > ntd->td_upri) { 738 continue; 739 } 740 if (upri < ntd->td_upri) 741 upri = ntd->td_upri; 742 743 /* 744 * Try this one. 745 */ 746 if (TD_TOKS_NOT_HELD(ntd) || 747 lwkt_getalltokens(ntd, (ntd->td_contended > lwkt_spin_loops))) { 748 goto havethread; 749 } 750 ++ntd->td_contended; /* overflow ok */ 751 } 752 753 /* 754 * Fall through, switch to idle thread to get us out of the current 755 * context. Since we were contended, prevent HLT by flagging a 756 * LWKT reschedule. 757 */ 758 need_lwkt_resched(); 759 } 760 761 /* 762 * We either contended on ntd or the runq is empty. We must switch 763 * through the idle thread to get out of the current context. 764 */ 765 ntd = &gd->gd_idlethread; 766 if (gd->gd_trap_nesting_level == 0 && panicstr == NULL) 767 ASSERT_NO_TOKENS_HELD(ntd); 768 cpu_time.cp_msg[0] = 0; 769 goto haveidle; 770 771 havethread: 772 /* 773 * Clear gd_idle_repeat when doing a normal switch to a non-idle 774 * thread. 775 */ 776 ntd->td_wmesg = NULL; 777 ntd->td_contended = 0; /* reset once scheduled */ 778 ++gd->gd_cnt.v_swtch; 779 gd->gd_idle_repeat = 0; 780 781 /* 782 * If we were busy waiting record final disposition 783 */ 784 if (gd->gd_indefinite.type) 785 indefinite_done(&gd->gd_indefinite); 786 787 havethread_preempted: 788 /* 789 * If the new target does not need the MP lock and we are holding it, 790 * release the MP lock. If the new target requires the MP lock we have 791 * already acquired it for the target. 792 */ 793 ; 794 haveidle: 795 KASSERT(ntd->td_critcount, 796 ("priority problem in lwkt_switch %d %d", 797 td->td_critcount, ntd->td_critcount)); 798 799 if (td != ntd) { 800 /* 801 * Execute the actual thread switch operation. This function 802 * returns to the current thread and returns the previous thread 803 * (which may be different from the thread we switched to). 804 * 805 * We are responsible for marking ntd as TDF_RUNNING. 806 */ 807 KKASSERT((ntd->td_flags & TDF_RUNNING) == 0); 808 #ifdef DEBUG_LWKT_THREAD 809 ++switch_count; 810 #endif 811 KTR_LOG(ctxsw_sw, gd->gd_cpuid, ntd); 812 ntd->td_flags |= TDF_RUNNING; 813 lwkt_switch_return(td->td_switch(ntd)); 814 /* ntd invalid, td_switch() can return a different thread_t */ 815 } 816 817 /* 818 * catch-all. XXX is this strictly needed? 819 */ 820 splz_check(); 821 822 /* NOTE: current cpu may have changed after switch */ 823 crit_exit_quick(td); 824 } 825 826 /* 827 * Called by assembly in the td_switch (thread restore path) for thread 828 * bootstrap cases which do not 'return' to lwkt_switch(). 829 */ 830 void 831 lwkt_switch_return(thread_t otd) 832 { 833 globaldata_t rgd; 834 #ifdef LOOPMASK 835 uint64_t tsc_base = rdtsc(); 836 #endif 837 int exiting; 838 839 exiting = otd->td_flags & TDF_EXITING; 840 cpu_ccfence(); 841 842 /* 843 * Check if otd was migrating. Now that we are on ntd we can finish 844 * up the migration. This is a bit messy but it is the only place 845 * where td is known to be fully descheduled. 846 * 847 * We can only activate the migration if otd was migrating but not 848 * held on the cpu due to a preemption chain. We still have to 849 * clear TDF_RUNNING on the old thread either way. 850 * 851 * We are responsible for clearing the previously running thread's 852 * TDF_RUNNING. 853 */ 854 if ((rgd = otd->td_migrate_gd) != NULL && 855 (otd->td_flags & TDF_PREEMPT_LOCK) == 0) { 856 KKASSERT((otd->td_flags & (TDF_MIGRATING | TDF_RUNNING)) == 857 (TDF_MIGRATING | TDF_RUNNING)); 858 otd->td_migrate_gd = NULL; 859 otd->td_flags &= ~TDF_RUNNING; 860 lwkt_send_ipiq(rgd, lwkt_setcpu_remote, otd); 861 } else { 862 otd->td_flags &= ~TDF_RUNNING; 863 } 864 865 /* 866 * Final exit validations (see lwp_wait()). Note that otd becomes 867 * invalid the *instant* we set TDF_MP_EXITSIG. 868 * 869 * Use the EXITING status loaded from before we clear TDF_RUNNING, 870 * because if it is not set otd becomes invalid the instant we clear 871 * TDF_RUNNING on it (otherwise, if the system is fast enough, we 872 * might 'steal' TDF_EXITING from another switch-return!). 873 */ 874 while (exiting) { 875 u_int mpflags; 876 877 mpflags = otd->td_mpflags; 878 cpu_ccfence(); 879 880 if (mpflags & TDF_MP_EXITWAIT) { 881 if (atomic_cmpset_int(&otd->td_mpflags, mpflags, 882 mpflags | TDF_MP_EXITSIG)) { 883 wakeup(otd); 884 break; 885 } 886 } else { 887 if (atomic_cmpset_int(&otd->td_mpflags, mpflags, 888 mpflags | TDF_MP_EXITSIG)) { 889 wakeup(otd); 890 break; 891 } 892 } 893 894 #ifdef LOOPMASK 895 if (tsc_frequency && rdtsc() - tsc_base > tsc_frequency) { 896 kprintf("lwkt_switch_return: excessive TDF_EXITING " 897 "thread %p\n", otd); 898 tsc_base = rdtsc(); 899 } 900 #endif 901 } 902 } 903 904 /* 905 * Request that the target thread preempt the current thread. Preemption 906 * can only occur only: 907 * 908 * - If our critical section is the one that we were called with 909 * - The relative priority of the target thread is higher 910 * - The target is not excessively interrupt-nested via td_nest_count 911 * - The target thread holds no tokens. 912 * - The target thread is not already scheduled and belongs to the 913 * current cpu. 914 * - The current thread is not holding any spin-locks. 915 * 916 * THE CALLER OF LWKT_PREEMPT() MUST BE IN A CRITICAL SECTION. Typically 917 * this is called via lwkt_schedule() through the td_preemptable callback. 918 * critcount is the managed critical priority that we should ignore in order 919 * to determine whether preemption is possible (aka usually just the crit 920 * priority of lwkt_schedule() itself). 921 * 922 * Preemption is typically limited to interrupt threads. 923 * 924 * Operation works in a fairly straight-forward manner. The normal 925 * scheduling code is bypassed and we switch directly to the target 926 * thread. When the target thread attempts to block or switch away 927 * code at the base of lwkt_switch() will switch directly back to our 928 * thread. Our thread is able to retain whatever tokens it holds and 929 * if the target needs one of them the target will switch back to us 930 * and reschedule itself normally. 931 */ 932 void 933 lwkt_preempt(thread_t ntd, int critcount) 934 { 935 struct globaldata *gd = mycpu; 936 thread_t xtd; 937 thread_t td; 938 int save_gd_intr_nesting_level; 939 940 /* 941 * The caller has put us in a critical section. We can only preempt 942 * if the caller of the caller was not in a critical section (basically 943 * a local interrupt), as determined by the 'critcount' parameter. We 944 * also can't preempt if the caller is holding any spinlocks (even if 945 * he isn't in a critical section). This also handles the tokens test. 946 * 947 * YYY The target thread must be in a critical section (else it must 948 * inherit our critical section? I dunno yet). 949 */ 950 KASSERT(ntd->td_critcount, ("BADCRIT0 %d", ntd->td_pri)); 951 952 td = gd->gd_curthread; 953 if (preempt_enable == 0) { 954 #ifdef DEBUG_LWKT_THREAD 955 ++preempt_miss; 956 #endif 957 return; 958 } 959 if (ntd->td_pri <= td->td_pri) { 960 #ifdef DEBUG_LWKT_THREAD 961 ++preempt_miss; 962 #endif 963 return; 964 } 965 if (td->td_critcount > critcount) { 966 #ifdef DEBUG_LWKT_THREAD 967 ++preempt_miss; 968 #endif 969 return; 970 } 971 if (td->td_nest_count >= 2) { 972 #ifdef DEBUG_LWKT_THREAD 973 ++preempt_miss; 974 #endif 975 return; 976 } 977 if (td->td_cscount) { 978 #ifdef DEBUG_LWKT_THREAD 979 ++preempt_miss; 980 #endif 981 return; 982 } 983 if (ntd->td_gd != gd) { 984 #ifdef DEBUG_LWKT_THREAD 985 ++preempt_miss; 986 #endif 987 return; 988 } 989 990 /* 991 * We don't have to check spinlocks here as they will also bump 992 * td_critcount. 993 * 994 * Do not try to preempt if the target thread is holding any tokens. 995 * We could try to acquire the tokens but this case is so rare there 996 * is no need to support it. 997 */ 998 KKASSERT(gd->gd_spinlocks == 0); 999 1000 if (TD_TOKS_HELD(ntd)) { 1001 #ifdef DEBUG_LWKT_THREAD 1002 ++preempt_miss; 1003 #endif 1004 return; 1005 } 1006 if (td == ntd || ((td->td_flags | ntd->td_flags) & TDF_PREEMPT_LOCK)) { 1007 #ifdef DEBUG_LWKT_THREAD 1008 ++preempt_weird; 1009 #endif 1010 return; 1011 } 1012 if (ntd->td_preempted) { 1013 #ifdef DEBUG_LWKT_THREAD 1014 ++preempt_hit; 1015 #endif 1016 return; 1017 } 1018 KKASSERT(gd->gd_processing_ipiq == 0); 1019 1020 /* 1021 * Since we are able to preempt the current thread, there is no need to 1022 * call need_lwkt_resched(). 1023 * 1024 * We must temporarily clear gd_intr_nesting_level around the switch 1025 * since switchouts from the target thread are allowed (they will just 1026 * return to our thread), and since the target thread has its own stack. 1027 * 1028 * A preemption must switch back to the original thread, assert the 1029 * case. 1030 */ 1031 #ifdef DEBUG_LWKT_THREAD 1032 ++preempt_hit; 1033 #endif 1034 ntd->td_preempted = td; 1035 td->td_flags |= TDF_PREEMPT_LOCK; 1036 KTR_LOG(ctxsw_pre, gd->gd_cpuid, ntd); 1037 save_gd_intr_nesting_level = gd->gd_intr_nesting_level; 1038 gd->gd_intr_nesting_level = 0; 1039 1040 KKASSERT((ntd->td_flags & TDF_RUNNING) == 0); 1041 ntd->td_flags |= TDF_RUNNING; 1042 xtd = td->td_switch(ntd); 1043 KKASSERT(xtd == ntd); 1044 lwkt_switch_return(xtd); 1045 gd->gd_intr_nesting_level = save_gd_intr_nesting_level; 1046 1047 KKASSERT(ntd->td_preempted && (td->td_flags & TDF_PREEMPT_DONE)); 1048 ntd->td_preempted = NULL; 1049 td->td_flags &= ~(TDF_PREEMPT_LOCK|TDF_PREEMPT_DONE); 1050 } 1051 1052 /* 1053 * Conditionally call splz() if gd_reqflags indicates work is pending. 1054 * This will work inside a critical section but not inside a hard code 1055 * section. 1056 * 1057 * (self contained on a per cpu basis) 1058 */ 1059 void 1060 splz_check(void) 1061 { 1062 globaldata_t gd = mycpu; 1063 thread_t td = gd->gd_curthread; 1064 1065 if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) && 1066 gd->gd_intr_nesting_level == 0 && 1067 td->td_nest_count < 2) 1068 { 1069 splz(); 1070 } 1071 } 1072 1073 /* 1074 * This version is integrated into crit_exit, reqflags has already 1075 * been tested but td_critcount has not. 1076 * 1077 * We only want to execute the splz() on the 1->0 transition of 1078 * critcount and not in a hard code section or if too deeply nested. 1079 * 1080 * NOTE: gd->gd_spinlocks is implied to be 0 when td_critcount is 0. 1081 */ 1082 void 1083 lwkt_maybe_splz(thread_t td) 1084 { 1085 globaldata_t gd = td->td_gd; 1086 1087 if (td->td_critcount == 0 && 1088 gd->gd_intr_nesting_level == 0 && 1089 td->td_nest_count < 2) 1090 { 1091 splz(); 1092 } 1093 } 1094 1095 /* 1096 * Drivers which set up processing co-threads can call this function to 1097 * run the co-thread at a higher priority and to allow it to preempt 1098 * normal threads. 1099 */ 1100 void 1101 lwkt_set_interrupt_support_thread(void) 1102 { 1103 thread_t td = curthread; 1104 1105 lwkt_setpri_self(TDPRI_INT_SUPPORT); 1106 td->td_flags |= TDF_INTTHREAD; 1107 td->td_preemptable = lwkt_preempt; 1108 } 1109 1110 1111 /* 1112 * This function is used to negotiate a passive release of the current 1113 * process/lwp designation with the user scheduler, allowing the user 1114 * scheduler to schedule another user thread. The related kernel thread 1115 * (curthread) continues running in the released state. 1116 */ 1117 void 1118 lwkt_passive_release(struct thread *td) 1119 { 1120 struct lwp *lp = td->td_lwp; 1121 1122 td->td_release = NULL; 1123 lwkt_setpri_self(TDPRI_KERN_USER); 1124 1125 lp->lwp_proc->p_usched->release_curproc(lp); 1126 } 1127 1128 1129 /* 1130 * This implements a LWKT yield, allowing a kernel thread to yield to other 1131 * kernel threads at the same or higher priority. This function can be 1132 * called in a tight loop and will typically only yield once per tick. 1133 * 1134 * Most kernel threads run at the same priority in order to allow equal 1135 * sharing. 1136 * 1137 * (self contained on a per cpu basis) 1138 */ 1139 void 1140 lwkt_yield(void) 1141 { 1142 globaldata_t gd = mycpu; 1143 thread_t td = gd->gd_curthread; 1144 1145 /* 1146 * Should never be called with spinlocks held but there is a path 1147 * via ACPI where it might happen. 1148 */ 1149 if (gd->gd_spinlocks) 1150 return; 1151 1152 /* 1153 * Safe to call splz if we are not too-heavily nested. 1154 */ 1155 if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) && td->td_nest_count < 2) 1156 splz(); 1157 1158 /* 1159 * Caller allows switching 1160 */ 1161 if (lwkt_resched_wanted()) { 1162 atomic_set_int(&td->td_mpflags, TDF_MP_DIDYIELD); 1163 lwkt_schedule_self(td); 1164 lwkt_switch(); 1165 } 1166 } 1167 1168 /* 1169 * The quick version processes pending interrupts and higher-priority 1170 * LWKT threads but will not round-robin same-priority LWKT threads. 1171 * 1172 * When called while attempting to return to userland the only same-pri 1173 * threads are the ones which have already tried to become the current 1174 * user process. 1175 */ 1176 void 1177 lwkt_yield_quick(void) 1178 { 1179 globaldata_t gd = mycpu; 1180 thread_t td = gd->gd_curthread; 1181 1182 if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) && td->td_nest_count < 2) 1183 splz(); 1184 if (lwkt_resched_wanted()) { 1185 crit_enter(); 1186 if (TAILQ_FIRST(&gd->gd_tdrunq) == td) { 1187 clear_lwkt_resched(); 1188 } else { 1189 atomic_set_int(&td->td_mpflags, TDF_MP_DIDYIELD); 1190 lwkt_schedule_self(curthread); 1191 lwkt_switch(); 1192 } 1193 crit_exit(); 1194 } 1195 } 1196 1197 /* 1198 * This yield is designed for kernel threads with a user context. 1199 * 1200 * The kernel acting on behalf of the user is potentially cpu-bound, 1201 * this function will efficiently allow other threads to run and also 1202 * switch to other processes by releasing. 1203 * 1204 * The lwkt_user_yield() function is designed to have very low overhead 1205 * if no yield is determined to be needed. 1206 */ 1207 void 1208 lwkt_user_yield(void) 1209 { 1210 globaldata_t gd = mycpu; 1211 thread_t td = gd->gd_curthread; 1212 1213 /* 1214 * Should never be called with spinlocks held but there is a path 1215 * via ACPI where it might happen. 1216 */ 1217 if (gd->gd_spinlocks) 1218 return; 1219 1220 /* 1221 * Always run any pending interrupts in case we are in a critical 1222 * section. 1223 */ 1224 if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) && td->td_nest_count < 2) 1225 splz(); 1226 1227 /* 1228 * Switch (which forces a release) if another kernel thread needs 1229 * the cpu, if userland wants us to resched, or if our kernel 1230 * quantum has run out. 1231 */ 1232 if (lwkt_resched_wanted() || 1233 user_resched_wanted()) 1234 { 1235 lwkt_switch(); 1236 } 1237 1238 #if 0 1239 /* 1240 * Reacquire the current process if we are released. 1241 * 1242 * XXX not implemented atm. The kernel may be holding locks and such, 1243 * so we want the thread to continue to receive cpu. 1244 */ 1245 if (td->td_release == NULL && lp) { 1246 lp->lwp_proc->p_usched->acquire_curproc(lp); 1247 td->td_release = lwkt_passive_release; 1248 lwkt_setpri_self(TDPRI_USER_NORM); 1249 } 1250 #endif 1251 } 1252 1253 /* 1254 * Generic schedule. Possibly schedule threads belonging to other cpus and 1255 * deal with threads that might be blocked on a wait queue. 1256 * 1257 * We have a little helper inline function which does additional work after 1258 * the thread has been enqueued, including dealing with preemption and 1259 * setting need_lwkt_resched() (which prevents the kernel from returning 1260 * to userland until it has processed higher priority threads). 1261 * 1262 * It is possible for this routine to be called after a failed _enqueue 1263 * (due to the target thread migrating, sleeping, or otherwise blocked). 1264 * We have to check that the thread is actually on the run queue! 1265 */ 1266 static __inline 1267 void 1268 _lwkt_schedule_post(globaldata_t gd, thread_t ntd, int ccount) 1269 { 1270 if (ntd->td_flags & TDF_RUNQ) { 1271 if (ntd->td_preemptable) { 1272 ntd->td_preemptable(ntd, ccount); /* YYY +token */ 1273 } 1274 } 1275 } 1276 1277 static __inline 1278 void 1279 _lwkt_schedule(thread_t td) 1280 { 1281 globaldata_t mygd = mycpu; 1282 1283 KASSERT(td != &td->td_gd->gd_idlethread, 1284 ("lwkt_schedule(): scheduling gd_idlethread is illegal!")); 1285 KKASSERT((td->td_flags & TDF_MIGRATING) == 0); 1286 crit_enter_gd(mygd); 1287 KKASSERT(td->td_lwp == NULL || 1288 (td->td_lwp->lwp_mpflags & LWP_MP_ONRUNQ) == 0); 1289 1290 if (td == mygd->gd_curthread) { 1291 _lwkt_enqueue(td); 1292 } else { 1293 /* 1294 * If we own the thread, there is no race (since we are in a 1295 * critical section). If we do not own the thread there might 1296 * be a race but the target cpu will deal with it. 1297 */ 1298 if (td->td_gd == mygd) { 1299 _lwkt_enqueue(td); 1300 _lwkt_schedule_post(mygd, td, 1); 1301 } else { 1302 lwkt_send_ipiq3(td->td_gd, lwkt_schedule_remote, td, 0); 1303 } 1304 } 1305 crit_exit_gd(mygd); 1306 } 1307 1308 void 1309 lwkt_schedule(thread_t td) 1310 { 1311 _lwkt_schedule(td); 1312 } 1313 1314 void 1315 lwkt_schedule_noresched(thread_t td) /* XXX not impl */ 1316 { 1317 _lwkt_schedule(td); 1318 } 1319 1320 /* 1321 * When scheduled remotely if frame != NULL the IPIQ is being 1322 * run via doreti or an interrupt then preemption can be allowed. 1323 * 1324 * To allow preemption we have to drop the critical section so only 1325 * one is present in _lwkt_schedule_post. 1326 */ 1327 static void 1328 lwkt_schedule_remote(void *arg, int arg2, struct intrframe *frame) 1329 { 1330 thread_t td = curthread; 1331 thread_t ntd = arg; 1332 1333 if (frame && ntd->td_preemptable) { 1334 crit_exit_noyield(td); 1335 _lwkt_schedule(ntd); 1336 crit_enter_quick(td); 1337 } else { 1338 _lwkt_schedule(ntd); 1339 } 1340 } 1341 1342 /* 1343 * Thread migration using a 'Pull' method. The thread may or may not be 1344 * the current thread. It MUST be descheduled and in a stable state. 1345 * lwkt_giveaway() must be called on the cpu owning the thread. 1346 * 1347 * At any point after lwkt_giveaway() is called, the target cpu may 1348 * 'pull' the thread by calling lwkt_acquire(). 1349 * 1350 * We have to make sure the thread is not sitting on a per-cpu tsleep 1351 * queue or it will blow up when it moves to another cpu. 1352 * 1353 * MPSAFE - must be called under very specific conditions. 1354 */ 1355 void 1356 lwkt_giveaway(thread_t td) 1357 { 1358 globaldata_t gd = mycpu; 1359 1360 crit_enter_gd(gd); 1361 if (td->td_flags & TDF_TSLEEPQ) 1362 tsleep_remove(td); 1363 KKASSERT(td->td_gd == gd); 1364 TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq); 1365 td->td_flags |= TDF_MIGRATING; 1366 crit_exit_gd(gd); 1367 } 1368 1369 void 1370 lwkt_acquire(thread_t td) 1371 { 1372 globaldata_t gd; 1373 globaldata_t mygd; 1374 1375 KKASSERT(td->td_flags & TDF_MIGRATING); 1376 gd = td->td_gd; 1377 mygd = mycpu; 1378 if (gd != mycpu) { 1379 #ifdef LOOPMASK 1380 uint64_t tsc_base = rdtsc(); 1381 #endif 1382 cpu_lfence(); 1383 KKASSERT((td->td_flags & TDF_RUNQ) == 0); 1384 crit_enter_gd(mygd); 1385 DEBUG_PUSH_INFO("lwkt_acquire"); 1386 while (td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) { 1387 lwkt_process_ipiq(); 1388 cpu_lfence(); 1389 #ifdef _KERNEL_VIRTUAL 1390 vkernel_yield(); 1391 #endif 1392 #ifdef LOOPMASK 1393 if (tsc_frequency && rdtsc() - tsc_base > tsc_frequency) { 1394 kprintf("lwkt_acquire: stuck td %p td->td_flags %08x\n", 1395 td, td->td_flags); 1396 tsc_base = rdtsc(); 1397 } 1398 #endif 1399 } 1400 DEBUG_POP_INFO(); 1401 cpu_mfence(); 1402 td->td_gd = mygd; 1403 TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq); 1404 td->td_flags &= ~TDF_MIGRATING; 1405 crit_exit_gd(mygd); 1406 } else { 1407 crit_enter_gd(mygd); 1408 TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq); 1409 td->td_flags &= ~TDF_MIGRATING; 1410 crit_exit_gd(mygd); 1411 } 1412 } 1413 1414 /* 1415 * Generic deschedule. Descheduling threads other then your own should be 1416 * done only in carefully controlled circumstances. Descheduling is 1417 * asynchronous. 1418 * 1419 * This function may block if the cpu has run out of messages. 1420 */ 1421 void 1422 lwkt_deschedule(thread_t td) 1423 { 1424 crit_enter(); 1425 if (td == curthread) { 1426 _lwkt_dequeue(td); 1427 } else { 1428 if (td->td_gd == mycpu) { 1429 _lwkt_dequeue(td); 1430 } else { 1431 lwkt_send_ipiq(td->td_gd, (ipifunc1_t)lwkt_deschedule, td); 1432 } 1433 } 1434 crit_exit(); 1435 } 1436 1437 /* 1438 * Set the target thread's priority. This routine does not automatically 1439 * switch to a higher priority thread, LWKT threads are not designed for 1440 * continuous priority changes. Yield if you want to switch. 1441 */ 1442 void 1443 lwkt_setpri(thread_t td, int pri) 1444 { 1445 if (td->td_pri != pri) { 1446 KKASSERT(pri >= 0); 1447 crit_enter(); 1448 if (td->td_flags & TDF_RUNQ) { 1449 KKASSERT(td->td_gd == mycpu); 1450 _lwkt_dequeue(td); 1451 td->td_pri = pri; 1452 _lwkt_enqueue(td); 1453 } else { 1454 td->td_pri = pri; 1455 } 1456 crit_exit(); 1457 } 1458 } 1459 1460 /* 1461 * Set the initial priority for a thread prior to it being scheduled for 1462 * the first time. The thread MUST NOT be scheduled before or during 1463 * this call. The thread may be assigned to a cpu other then the current 1464 * cpu. 1465 * 1466 * Typically used after a thread has been created with TDF_STOPPREQ, 1467 * and before the thread is initially scheduled. 1468 */ 1469 void 1470 lwkt_setpri_initial(thread_t td, int pri) 1471 { 1472 KKASSERT(pri >= 0); 1473 KKASSERT((td->td_flags & TDF_RUNQ) == 0); 1474 td->td_pri = pri; 1475 } 1476 1477 void 1478 lwkt_setpri_self(int pri) 1479 { 1480 thread_t td = curthread; 1481 1482 KKASSERT(pri >= 0 && pri <= TDPRI_MAX); 1483 crit_enter(); 1484 if (td->td_flags & TDF_RUNQ) { 1485 _lwkt_dequeue(td); 1486 td->td_pri = pri; 1487 _lwkt_enqueue(td); 1488 } else { 1489 td->td_pri = pri; 1490 } 1491 crit_exit(); 1492 } 1493 1494 /* 1495 * hz tick scheduler clock for LWKT threads 1496 */ 1497 void 1498 lwkt_schedulerclock(thread_t td) 1499 { 1500 globaldata_t gd = td->td_gd; 1501 thread_t xtd; 1502 1503 xtd = TAILQ_FIRST(&gd->gd_tdrunq); 1504 if (xtd == td) { 1505 /* 1506 * If the current thread is at the head of the runq shift it to the 1507 * end of any equal-priority threads and request a LWKT reschedule 1508 * if it moved. 1509 * 1510 * Ignore upri in this situation. There will only be one user thread 1511 * in user mode, all others will be user threads running in kernel 1512 * mode and we have to make sure they get some cpu. 1513 */ 1514 xtd = TAILQ_NEXT(td, td_threadq); 1515 if (xtd && xtd->td_pri == td->td_pri) { 1516 TAILQ_REMOVE(&gd->gd_tdrunq, td, td_threadq); 1517 while (xtd && xtd->td_pri == td->td_pri) 1518 xtd = TAILQ_NEXT(xtd, td_threadq); 1519 if (xtd) 1520 TAILQ_INSERT_BEFORE(xtd, td, td_threadq); 1521 else 1522 TAILQ_INSERT_TAIL(&gd->gd_tdrunq, td, td_threadq); 1523 need_lwkt_resched(); 1524 } 1525 } else if (xtd) { 1526 /* 1527 * If we scheduled a thread other than the one at the head of the 1528 * queue always request a reschedule every tick. 1529 */ 1530 need_lwkt_resched(); 1531 } 1532 /* else curthread probably the idle thread, no need to reschedule */ 1533 } 1534 1535 /* 1536 * Migrate the current thread to the specified cpu. 1537 * 1538 * This is accomplished by descheduling ourselves from the current cpu 1539 * and setting td_migrate_gd. The lwkt_switch() code will detect that the 1540 * 'old' thread wants to migrate after it has been completely switched out 1541 * and will complete the migration. 1542 * 1543 * TDF_MIGRATING prevents scheduling races while the thread is being migrated. 1544 * 1545 * We must be sure to release our current process designation (if a user 1546 * process) before clearing out any tsleepq we are on because the release 1547 * code may re-add us. 1548 * 1549 * We must be sure to remove ourselves from the current cpu's tsleepq 1550 * before potentially moving to another queue. The thread can be on 1551 * a tsleepq due to a left-over tsleep_interlock(). 1552 */ 1553 1554 void 1555 lwkt_setcpu_self(globaldata_t rgd) 1556 { 1557 thread_t td = curthread; 1558 1559 if (td->td_gd != rgd) { 1560 crit_enter_quick(td); 1561 1562 if (td->td_release) 1563 td->td_release(td); 1564 if (td->td_flags & TDF_TSLEEPQ) 1565 tsleep_remove(td); 1566 1567 /* 1568 * Set TDF_MIGRATING to prevent a spurious reschedule while we are 1569 * trying to deschedule ourselves and switch away, then deschedule 1570 * ourself, remove us from tdallq, and set td_migrate_gd. Finally, 1571 * call lwkt_switch() to complete the operation. 1572 */ 1573 td->td_flags |= TDF_MIGRATING; 1574 lwkt_deschedule_self(td); 1575 TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq); 1576 td->td_migrate_gd = rgd; 1577 lwkt_switch(); 1578 1579 /* 1580 * We are now on the target cpu 1581 */ 1582 KKASSERT(rgd == mycpu); 1583 TAILQ_INSERT_TAIL(&rgd->gd_tdallq, td, td_allq); 1584 crit_exit_quick(td); 1585 } 1586 } 1587 1588 void 1589 lwkt_migratecpu(int cpuid) 1590 { 1591 globaldata_t rgd; 1592 1593 rgd = globaldata_find(cpuid); 1594 lwkt_setcpu_self(rgd); 1595 } 1596 1597 /* 1598 * Remote IPI for cpu migration (called while in a critical section so we 1599 * do not have to enter another one). 1600 * 1601 * The thread (td) has already been completely descheduled from the 1602 * originating cpu and we can simply assert the case. The thread is 1603 * assigned to the new cpu and enqueued. 1604 * 1605 * The thread will re-add itself to tdallq when it resumes execution. 1606 */ 1607 static void 1608 lwkt_setcpu_remote(void *arg) 1609 { 1610 thread_t td = arg; 1611 globaldata_t gd = mycpu; 1612 1613 KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) == 0); 1614 td->td_gd = gd; 1615 cpu_mfence(); 1616 td->td_flags &= ~TDF_MIGRATING; 1617 KKASSERT(td->td_migrate_gd == NULL); 1618 KKASSERT(td->td_lwp == NULL || 1619 (td->td_lwp->lwp_mpflags & LWP_MP_ONRUNQ) == 0); 1620 _lwkt_enqueue(td); 1621 } 1622 1623 struct lwp * 1624 lwkt_preempted_proc(void) 1625 { 1626 thread_t td = curthread; 1627 while (td->td_preempted) 1628 td = td->td_preempted; 1629 return(td->td_lwp); 1630 } 1631 1632 /* 1633 * Create a kernel process/thread/whatever. It shares it's address space 1634 * with proc0 - ie: kernel only. 1635 * 1636 * If the cpu is not specified one will be selected. In the future 1637 * specifying a cpu of -1 will enable kernel thread migration between 1638 * cpus. 1639 */ 1640 int 1641 lwkt_create(void (*func)(void *), void *arg, struct thread **tdp, 1642 thread_t template, int tdflags, int cpu, const char *fmt, ...) 1643 { 1644 thread_t td; 1645 __va_list ap; 1646 1647 td = lwkt_alloc_thread(template, LWKT_THREAD_STACK, cpu, 1648 tdflags); 1649 if (tdp) 1650 *tdp = td; 1651 cpu_set_thread_handler(td, lwkt_exit, func, arg); 1652 1653 /* 1654 * Set up arg0 for 'ps' etc 1655 */ 1656 __va_start(ap, fmt); 1657 kvsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap); 1658 __va_end(ap); 1659 1660 /* 1661 * Schedule the thread to run 1662 */ 1663 if (td->td_flags & TDF_NOSTART) 1664 td->td_flags &= ~TDF_NOSTART; 1665 else 1666 lwkt_schedule(td); 1667 return 0; 1668 } 1669 1670 /* 1671 * Destroy an LWKT thread. Warning! This function is not called when 1672 * a process exits, cpu_proc_exit() directly calls cpu_thread_exit() and 1673 * uses a different reaping mechanism. 1674 */ 1675 void 1676 lwkt_exit(void) 1677 { 1678 thread_t td = curthread; 1679 thread_t std; 1680 globaldata_t gd; 1681 1682 /* 1683 * Do any cleanup that might block here 1684 */ 1685 biosched_done(td); 1686 dsched_exit_thread(td); 1687 1688 /* 1689 * Get us into a critical section to interlock gd_freetd and loop 1690 * until we can get it freed. 1691 * 1692 * We have to cache the current td in gd_freetd because objcache_put()ing 1693 * it would rip it out from under us while our thread is still active. 1694 * 1695 * We are the current thread so of course our own TDF_RUNNING bit will 1696 * be set, so unlike the lwp reap code we don't wait for it to clear. 1697 */ 1698 gd = mycpu; 1699 crit_enter_quick(td); 1700 for (;;) { 1701 if (td->td_refs) { 1702 tsleep(td, 0, "tdreap", 1); 1703 continue; 1704 } 1705 if ((std = gd->gd_freetd) != NULL) { 1706 KKASSERT((std->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) == 0); 1707 gd->gd_freetd = NULL; 1708 objcache_put(thread_cache, std); 1709 continue; 1710 } 1711 break; 1712 } 1713 1714 /* 1715 * Remove thread resources from kernel lists and deschedule us for 1716 * the last time. We cannot block after this point or we may end 1717 * up with a stale td on the tsleepq. 1718 * 1719 * None of this may block, the critical section is the only thing 1720 * protecting tdallq and the only thing preventing new lwkt_hold() 1721 * thread refs now. 1722 */ 1723 if (td->td_flags & TDF_TSLEEPQ) 1724 tsleep_remove(td); 1725 lwkt_deschedule_self(td); 1726 lwkt_remove_tdallq(td); 1727 KKASSERT(td->td_refs == 0); 1728 1729 /* 1730 * Final cleanup 1731 */ 1732 KKASSERT(gd->gd_freetd == NULL); 1733 if (td->td_flags & TDF_ALLOCATED_THREAD) 1734 gd->gd_freetd = td; 1735 cpu_thread_exit(); 1736 } 1737 1738 void 1739 lwkt_remove_tdallq(thread_t td) 1740 { 1741 KKASSERT(td->td_gd == mycpu); 1742 TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq); 1743 } 1744 1745 /* 1746 * Code reduction and branch prediction improvements. Call/return 1747 * overhead on modern cpus often degenerates into 0 cycles due to 1748 * the cpu's branch prediction hardware and return pc cache. We 1749 * can take advantage of this by not inlining medium-complexity 1750 * functions and we can also reduce the branch prediction impact 1751 * by collapsing perfectly predictable branches into a single 1752 * procedure instead of duplicating it. 1753 * 1754 * Is any of this noticeable? Probably not, so I'll take the 1755 * smaller code size. 1756 */ 1757 void 1758 crit_exit_wrapper(__DEBUG_CRIT_ARG__) 1759 { 1760 _crit_exit(mycpu __DEBUG_CRIT_PASS_ARG__); 1761 } 1762 1763 void 1764 crit_panic(void) 1765 { 1766 thread_t td = curthread; 1767 int lcrit = td->td_critcount; 1768 1769 td->td_critcount = 0; 1770 cpu_ccfence(); 1771 panic("td_critcount is/would-go negative! %p %d", td, lcrit); 1772 /* NOT REACHED */ 1773 } 1774 1775 /* 1776 * Called from debugger/panic on cpus which have been stopped. We must still 1777 * process the IPIQ while stopped. 1778 * 1779 * If we are dumping also try to process any pending interrupts. This may 1780 * or may not work depending on the state of the cpu at the point it was 1781 * stopped. 1782 */ 1783 void 1784 lwkt_smp_stopped(void) 1785 { 1786 globaldata_t gd = mycpu; 1787 1788 if (dumping) { 1789 lwkt_process_ipiq(); 1790 --gd->gd_intr_nesting_level; 1791 splz(); 1792 ++gd->gd_intr_nesting_level; 1793 } else { 1794 lwkt_process_ipiq(); 1795 } 1796 cpu_smp_stopped(); 1797 } 1798