1 /* 2 * Copyright (c) 2003-2011 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 /* 36 * Each cpu in a system has its own self-contained light weight kernel 37 * thread scheduler, which means that generally speaking we only need 38 * to use a critical section to avoid problems. Foreign thread 39 * scheduling is queued via (async) IPIs. 40 */ 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/proc.h> 46 #include <sys/rtprio.h> 47 #include <sys/kinfo.h> 48 #include <sys/queue.h> 49 #include <sys/sysctl.h> 50 #include <sys/kthread.h> 51 #include <machine/cpu.h> 52 #include <sys/lock.h> 53 #include <sys/spinlock.h> 54 #include <sys/ktr.h> 55 56 #include <sys/thread2.h> 57 #include <sys/spinlock2.h> 58 #include <sys/mplock2.h> 59 60 #include <sys/dsched.h> 61 62 #include <vm/vm.h> 63 #include <vm/vm_param.h> 64 #include <vm/vm_kern.h> 65 #include <vm/vm_object.h> 66 #include <vm/vm_page.h> 67 #include <vm/vm_map.h> 68 #include <vm/vm_pager.h> 69 #include <vm/vm_extern.h> 70 71 #include <machine/stdarg.h> 72 #include <machine/smp.h> 73 74 #if !defined(KTR_CTXSW) 75 #define KTR_CTXSW KTR_ALL 76 #endif 77 KTR_INFO_MASTER(ctxsw); 78 KTR_INFO(KTR_CTXSW, ctxsw, sw, 0, "#cpu[%d].td = %p", int cpu, struct thread *td); 79 KTR_INFO(KTR_CTXSW, ctxsw, pre, 1, "#cpu[%d].td = %p", int cpu, struct thread *td); 80 KTR_INFO(KTR_CTXSW, ctxsw, newtd, 2, "#threads[%p].name = %s", struct thread *td, char *comm); 81 KTR_INFO(KTR_CTXSW, ctxsw, deadtd, 3, "#threads[%p].name = <dead>", struct thread *td); 82 83 static MALLOC_DEFINE(M_THREAD, "thread", "lwkt threads"); 84 85 #ifdef INVARIANTS 86 static int panic_on_cscount = 0; 87 #endif 88 static __int64_t switch_count = 0; 89 static __int64_t preempt_hit = 0; 90 static __int64_t preempt_miss = 0; 91 static __int64_t preempt_weird = 0; 92 static int lwkt_use_spin_port; 93 static struct objcache *thread_cache; 94 95 static void lwkt_schedule_remote(void *arg, int arg2, struct intrframe *frame); 96 static void lwkt_setcpu_remote(void *arg); 97 98 extern void cpu_heavy_restore(void); 99 extern void cpu_lwkt_restore(void); 100 extern void cpu_kthread_restore(void); 101 extern void cpu_idle_restore(void); 102 103 /* 104 * We can make all thread ports use the spin backend instead of the thread 105 * backend. This should only be set to debug the spin backend. 106 */ 107 TUNABLE_INT("lwkt.use_spin_port", &lwkt_use_spin_port); 108 109 #ifdef INVARIANTS 110 SYSCTL_INT(_lwkt, OID_AUTO, panic_on_cscount, CTLFLAG_RW, &panic_on_cscount, 0, 111 "Panic if attempting to switch lwkt's while mastering cpusync"); 112 #endif 113 SYSCTL_QUAD(_lwkt, OID_AUTO, switch_count, CTLFLAG_RW, &switch_count, 0, 114 "Number of switched threads"); 115 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_hit, CTLFLAG_RW, &preempt_hit, 0, 116 "Successful preemption events"); 117 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_miss, CTLFLAG_RW, &preempt_miss, 0, 118 "Failed preemption events"); 119 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_weird, CTLFLAG_RW, &preempt_weird, 0, 120 "Number of preempted threads."); 121 static int fairq_enable = 0; 122 SYSCTL_INT(_lwkt, OID_AUTO, fairq_enable, CTLFLAG_RW, 123 &fairq_enable, 0, "Turn on fairq priority accumulators"); 124 static int fairq_bypass = -1; 125 SYSCTL_INT(_lwkt, OID_AUTO, fairq_bypass, CTLFLAG_RW, 126 &fairq_bypass, 0, "Allow fairq to bypass td on token failure"); 127 extern int lwkt_sched_debug; 128 int lwkt_sched_debug = 0; 129 SYSCTL_INT(_lwkt, OID_AUTO, sched_debug, CTLFLAG_RW, 130 &lwkt_sched_debug, 0, "Scheduler debug"); 131 static int lwkt_spin_loops = 10; 132 SYSCTL_INT(_lwkt, OID_AUTO, spin_loops, CTLFLAG_RW, 133 &lwkt_spin_loops, 0, "Scheduler spin loops until sorted decon"); 134 static int lwkt_spin_reseq = 0; 135 SYSCTL_INT(_lwkt, OID_AUTO, spin_reseq, CTLFLAG_RW, 136 &lwkt_spin_reseq, 0, "Scheduler resequencer enable"); 137 static int lwkt_spin_monitor = 0; 138 SYSCTL_INT(_lwkt, OID_AUTO, spin_monitor, CTLFLAG_RW, 139 &lwkt_spin_monitor, 0, "Scheduler uses monitor/mwait"); 140 static int lwkt_spin_fatal = 0; /* disabled */ 141 SYSCTL_INT(_lwkt, OID_AUTO, spin_fatal, CTLFLAG_RW, 142 &lwkt_spin_fatal, 0, "LWKT scheduler spin loops till fatal panic"); 143 static int preempt_enable = 1; 144 SYSCTL_INT(_lwkt, OID_AUTO, preempt_enable, CTLFLAG_RW, 145 &preempt_enable, 0, "Enable preemption"); 146 static int lwkt_cache_threads = 0; 147 SYSCTL_INT(_lwkt, OID_AUTO, cache_threads, CTLFLAG_RD, 148 &lwkt_cache_threads, 0, "thread+kstack cache"); 149 150 static __cachealign int lwkt_cseq_rindex; 151 static __cachealign int lwkt_cseq_windex; 152 153 /* 154 * These helper procedures handle the runq, they can only be called from 155 * within a critical section. 156 * 157 * WARNING! Prior to SMP being brought up it is possible to enqueue and 158 * dequeue threads belonging to other cpus, so be sure to use td->td_gd 159 * instead of 'mycpu' when referencing the globaldata structure. Once 160 * SMP live enqueuing and dequeueing only occurs on the current cpu. 161 */ 162 static __inline 163 void 164 _lwkt_dequeue(thread_t td) 165 { 166 if (td->td_flags & TDF_RUNQ) { 167 struct globaldata *gd = td->td_gd; 168 169 td->td_flags &= ~TDF_RUNQ; 170 TAILQ_REMOVE(&gd->gd_tdrunq, td, td_threadq); 171 --gd->gd_tdrunqcount; 172 if (TAILQ_FIRST(&gd->gd_tdrunq) == NULL) 173 atomic_clear_int(&gd->gd_reqflags, RQF_RUNNING); 174 } 175 } 176 177 /* 178 * Priority enqueue. 179 * 180 * There are a limited number of lwkt threads runnable since user 181 * processes only schedule one at a time per cpu. However, there can 182 * be many user processes in kernel mode exiting from a tsleep() which 183 * become runnable. 184 * 185 * NOTE: lwkt_schedulerclock() will force a round-robin based on td_pri and 186 * will ignore user priority. This is to ensure that user threads in 187 * kernel mode get cpu at some point regardless of what the user 188 * scheduler thinks. 189 */ 190 static __inline 191 void 192 _lwkt_enqueue(thread_t td) 193 { 194 thread_t xtd; 195 196 if ((td->td_flags & (TDF_RUNQ|TDF_MIGRATING|TDF_BLOCKQ)) == 0) { 197 struct globaldata *gd = td->td_gd; 198 199 td->td_flags |= TDF_RUNQ; 200 xtd = TAILQ_FIRST(&gd->gd_tdrunq); 201 if (xtd == NULL) { 202 TAILQ_INSERT_TAIL(&gd->gd_tdrunq, td, td_threadq); 203 atomic_set_int(&gd->gd_reqflags, RQF_RUNNING); 204 } else { 205 /* 206 * NOTE: td_upri - higher numbers more desireable, same sense 207 * as td_pri (typically reversed from lwp_upri). 208 * 209 * In the equal priority case we want the best selection 210 * at the beginning so the less desireable selections know 211 * that they have to setrunqueue/go-to-another-cpu, even 212 * though it means switching back to the 'best' selection. 213 * This also avoids degenerate situations when many threads 214 * are runnable or waking up at the same time. 215 * 216 * If upri matches exactly place at end/round-robin. 217 */ 218 while (xtd && 219 (xtd->td_pri >= td->td_pri || 220 (xtd->td_pri == td->td_pri && 221 xtd->td_upri >= td->td_upri))) { 222 xtd = TAILQ_NEXT(xtd, td_threadq); 223 } 224 if (xtd) 225 TAILQ_INSERT_BEFORE(xtd, td, td_threadq); 226 else 227 TAILQ_INSERT_TAIL(&gd->gd_tdrunq, td, td_threadq); 228 } 229 ++gd->gd_tdrunqcount; 230 231 /* 232 * Request a LWKT reschedule if we are now at the head of the queue. 233 */ 234 if (TAILQ_FIRST(&gd->gd_tdrunq) == td) 235 need_lwkt_resched(); 236 } 237 } 238 239 static __boolean_t 240 _lwkt_thread_ctor(void *obj, void *privdata, int ocflags) 241 { 242 struct thread *td = (struct thread *)obj; 243 244 td->td_kstack = NULL; 245 td->td_kstack_size = 0; 246 td->td_flags = TDF_ALLOCATED_THREAD; 247 td->td_mpflags = 0; 248 return (1); 249 } 250 251 static void 252 _lwkt_thread_dtor(void *obj, void *privdata) 253 { 254 struct thread *td = (struct thread *)obj; 255 256 KASSERT(td->td_flags & TDF_ALLOCATED_THREAD, 257 ("_lwkt_thread_dtor: not allocated from objcache")); 258 KASSERT((td->td_flags & TDF_ALLOCATED_STACK) && td->td_kstack && 259 td->td_kstack_size > 0, 260 ("_lwkt_thread_dtor: corrupted stack")); 261 kmem_free(&kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size); 262 td->td_kstack = NULL; 263 td->td_flags = 0; 264 } 265 266 /* 267 * Initialize the lwkt s/system. 268 * 269 * Nominally cache up to 32 thread + kstack structures. Cache more on 270 * systems with a lot of cpu cores. 271 */ 272 void 273 lwkt_init(void) 274 { 275 TUNABLE_INT("lwkt.cache_threads", &lwkt_cache_threads); 276 if (lwkt_cache_threads == 0) { 277 lwkt_cache_threads = ncpus * 4; 278 if (lwkt_cache_threads < 32) 279 lwkt_cache_threads = 32; 280 } 281 thread_cache = objcache_create_mbacked( 282 M_THREAD, sizeof(struct thread), 283 0, lwkt_cache_threads, 284 _lwkt_thread_ctor, _lwkt_thread_dtor, NULL); 285 } 286 287 /* 288 * Schedule a thread to run. As the current thread we can always safely 289 * schedule ourselves, and a shortcut procedure is provided for that 290 * function. 291 * 292 * (non-blocking, self contained on a per cpu basis) 293 */ 294 void 295 lwkt_schedule_self(thread_t td) 296 { 297 KKASSERT((td->td_flags & TDF_MIGRATING) == 0); 298 crit_enter_quick(td); 299 KASSERT(td != &td->td_gd->gd_idlethread, 300 ("lwkt_schedule_self(): scheduling gd_idlethread is illegal!")); 301 KKASSERT(td->td_lwp == NULL || 302 (td->td_lwp->lwp_mpflags & LWP_MP_ONRUNQ) == 0); 303 _lwkt_enqueue(td); 304 crit_exit_quick(td); 305 } 306 307 /* 308 * Deschedule a thread. 309 * 310 * (non-blocking, self contained on a per cpu basis) 311 */ 312 void 313 lwkt_deschedule_self(thread_t td) 314 { 315 crit_enter_quick(td); 316 _lwkt_dequeue(td); 317 crit_exit_quick(td); 318 } 319 320 /* 321 * LWKTs operate on a per-cpu basis 322 * 323 * WARNING! Called from early boot, 'mycpu' may not work yet. 324 */ 325 void 326 lwkt_gdinit(struct globaldata *gd) 327 { 328 TAILQ_INIT(&gd->gd_tdrunq); 329 TAILQ_INIT(&gd->gd_tdallq); 330 } 331 332 /* 333 * Create a new thread. The thread must be associated with a process context 334 * or LWKT start address before it can be scheduled. If the target cpu is 335 * -1 the thread will be created on the current cpu. 336 * 337 * If you intend to create a thread without a process context this function 338 * does everything except load the startup and switcher function. 339 */ 340 thread_t 341 lwkt_alloc_thread(struct thread *td, int stksize, int cpu, int flags) 342 { 343 static int cpu_rotator; 344 globaldata_t gd = mycpu; 345 void *stack; 346 347 /* 348 * If static thread storage is not supplied allocate a thread. Reuse 349 * a cached free thread if possible. gd_freetd is used to keep an exiting 350 * thread intact through the exit. 351 */ 352 if (td == NULL) { 353 crit_enter_gd(gd); 354 if ((td = gd->gd_freetd) != NULL) { 355 KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK| 356 TDF_RUNQ)) == 0); 357 gd->gd_freetd = NULL; 358 } else { 359 td = objcache_get(thread_cache, M_WAITOK); 360 KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK| 361 TDF_RUNQ)) == 0); 362 } 363 crit_exit_gd(gd); 364 KASSERT((td->td_flags & 365 (TDF_ALLOCATED_THREAD|TDF_RUNNING|TDF_PREEMPT_LOCK)) == 366 TDF_ALLOCATED_THREAD, 367 ("lwkt_alloc_thread: corrupted td flags 0x%X", td->td_flags)); 368 flags |= td->td_flags & (TDF_ALLOCATED_THREAD|TDF_ALLOCATED_STACK); 369 } 370 371 /* 372 * Try to reuse cached stack. 373 */ 374 if ((stack = td->td_kstack) != NULL && td->td_kstack_size != stksize) { 375 if (flags & TDF_ALLOCATED_STACK) { 376 kmem_free(&kernel_map, (vm_offset_t)stack, td->td_kstack_size); 377 stack = NULL; 378 } 379 } 380 if (stack == NULL) { 381 stack = (void *)kmem_alloc_stack(&kernel_map, stksize); 382 flags |= TDF_ALLOCATED_STACK; 383 } 384 if (cpu < 0) { 385 cpu = ++cpu_rotator; 386 cpu_ccfence(); 387 cpu %= ncpus; 388 } 389 lwkt_init_thread(td, stack, stksize, flags, globaldata_find(cpu)); 390 return(td); 391 } 392 393 /* 394 * Initialize a preexisting thread structure. This function is used by 395 * lwkt_alloc_thread() and also used to initialize the per-cpu idlethread. 396 * 397 * All threads start out in a critical section at a priority of 398 * TDPRI_KERN_DAEMON. Higher level code will modify the priority as 399 * appropriate. This function may send an IPI message when the 400 * requested cpu is not the current cpu and consequently gd_tdallq may 401 * not be initialized synchronously from the point of view of the originating 402 * cpu. 403 * 404 * NOTE! we have to be careful in regards to creating threads for other cpus 405 * if SMP has not yet been activated. 406 */ 407 static void 408 lwkt_init_thread_remote(void *arg) 409 { 410 thread_t td = arg; 411 412 /* 413 * Protected by critical section held by IPI dispatch 414 */ 415 TAILQ_INSERT_TAIL(&td->td_gd->gd_tdallq, td, td_allq); 416 } 417 418 /* 419 * lwkt core thread structural initialization. 420 * 421 * NOTE: All threads are initialized as mpsafe threads. 422 */ 423 void 424 lwkt_init_thread(thread_t td, void *stack, int stksize, int flags, 425 struct globaldata *gd) 426 { 427 globaldata_t mygd = mycpu; 428 429 bzero(td, sizeof(struct thread)); 430 td->td_kstack = stack; 431 td->td_kstack_size = stksize; 432 td->td_flags = flags; 433 td->td_mpflags = 0; 434 td->td_gd = gd; 435 td->td_pri = TDPRI_KERN_DAEMON; 436 td->td_critcount = 1; 437 td->td_toks_have = NULL; 438 td->td_toks_stop = &td->td_toks_base; 439 if (lwkt_use_spin_port || (flags & TDF_FORCE_SPINPORT)) 440 lwkt_initport_spin(&td->td_msgport, td); 441 else 442 lwkt_initport_thread(&td->td_msgport, td); 443 pmap_init_thread(td); 444 /* 445 * Normally initializing a thread for a remote cpu requires sending an 446 * IPI. However, the idlethread is setup before the other cpus are 447 * activated so we have to treat it as a special case. XXX manipulation 448 * of gd_tdallq requires the BGL. 449 */ 450 if (gd == mygd || td == &gd->gd_idlethread) { 451 crit_enter_gd(mygd); 452 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq); 453 crit_exit_gd(mygd); 454 } else { 455 lwkt_send_ipiq(gd, lwkt_init_thread_remote, td); 456 } 457 dsched_new_thread(td); 458 } 459 460 void 461 lwkt_set_comm(thread_t td, const char *ctl, ...) 462 { 463 __va_list va; 464 465 __va_start(va, ctl); 466 kvsnprintf(td->td_comm, sizeof(td->td_comm), ctl, va); 467 __va_end(va); 468 KTR_LOG(ctxsw_newtd, td, td->td_comm); 469 } 470 471 /* 472 * Prevent the thread from getting destroyed. Note that unlike PHOLD/PRELE 473 * this does not prevent the thread from migrating to another cpu so the 474 * gd_tdallq state is not protected by this. 475 */ 476 void 477 lwkt_hold(thread_t td) 478 { 479 atomic_add_int(&td->td_refs, 1); 480 } 481 482 void 483 lwkt_rele(thread_t td) 484 { 485 KKASSERT(td->td_refs > 0); 486 atomic_add_int(&td->td_refs, -1); 487 } 488 489 void 490 lwkt_free_thread(thread_t td) 491 { 492 KKASSERT(td->td_refs == 0); 493 KKASSERT((td->td_flags & (TDF_RUNNING | TDF_PREEMPT_LOCK | 494 TDF_RUNQ | TDF_TSLEEPQ)) == 0); 495 if (td->td_flags & TDF_ALLOCATED_THREAD) { 496 objcache_put(thread_cache, td); 497 } else if (td->td_flags & TDF_ALLOCATED_STACK) { 498 /* client-allocated struct with internally allocated stack */ 499 KASSERT(td->td_kstack && td->td_kstack_size > 0, 500 ("lwkt_free_thread: corrupted stack")); 501 kmem_free(&kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size); 502 td->td_kstack = NULL; 503 td->td_kstack_size = 0; 504 } 505 KTR_LOG(ctxsw_deadtd, td); 506 } 507 508 509 /* 510 * Switch to the next runnable lwkt. If no LWKTs are runnable then 511 * switch to the idlethread. Switching must occur within a critical 512 * section to avoid races with the scheduling queue. 513 * 514 * We always have full control over our cpu's run queue. Other cpus 515 * that wish to manipulate our queue must use the cpu_*msg() calls to 516 * talk to our cpu, so a critical section is all that is needed and 517 * the result is very, very fast thread switching. 518 * 519 * The LWKT scheduler uses a fixed priority model and round-robins at 520 * each priority level. User process scheduling is a totally 521 * different beast and LWKT priorities should not be confused with 522 * user process priorities. 523 * 524 * PREEMPTION NOTE: Preemption occurs via lwkt_preempt(). lwkt_switch() 525 * is not called by the current thread in the preemption case, only when 526 * the preempting thread blocks (in order to return to the original thread). 527 * 528 * SPECIAL NOTE ON SWITCH ATOMICY: Certain operations such as thread 529 * migration and tsleep deschedule the current lwkt thread and call 530 * lwkt_switch(). In particular, the target cpu of the migration fully 531 * expects the thread to become non-runnable and can deadlock against 532 * cpusync operations if we run any IPIs prior to switching the thread out. 533 * 534 * WE MUST BE VERY CAREFUL NOT TO RUN SPLZ DIRECTLY OR INDIRECTLY IF 535 * THE CURRENT THREAD HAS BEEN DESCHEDULED! 536 */ 537 void 538 lwkt_switch(void) 539 { 540 globaldata_t gd = mycpu; 541 thread_t td = gd->gd_curthread; 542 thread_t ntd; 543 int spinning = 0; 544 545 KKASSERT(gd->gd_processing_ipiq == 0); 546 KKASSERT(td->td_flags & TDF_RUNNING); 547 548 /* 549 * Switching from within a 'fast' (non thread switched) interrupt or IPI 550 * is illegal. However, we may have to do it anyway if we hit a fatal 551 * kernel trap or we have paniced. 552 * 553 * If this case occurs save and restore the interrupt nesting level. 554 */ 555 if (gd->gd_intr_nesting_level) { 556 int savegdnest; 557 int savegdtrap; 558 559 if (gd->gd_trap_nesting_level == 0 && panic_cpu_gd != mycpu) { 560 panic("lwkt_switch: Attempt to switch from a " 561 "fast interrupt, ipi, or hard code section, " 562 "td %p\n", 563 td); 564 } else { 565 savegdnest = gd->gd_intr_nesting_level; 566 savegdtrap = gd->gd_trap_nesting_level; 567 gd->gd_intr_nesting_level = 0; 568 gd->gd_trap_nesting_level = 0; 569 if ((td->td_flags & TDF_PANICWARN) == 0) { 570 td->td_flags |= TDF_PANICWARN; 571 kprintf("Warning: thread switch from interrupt, IPI, " 572 "or hard code section.\n" 573 "thread %p (%s)\n", td, td->td_comm); 574 print_backtrace(-1); 575 } 576 lwkt_switch(); 577 gd->gd_intr_nesting_level = savegdnest; 578 gd->gd_trap_nesting_level = savegdtrap; 579 return; 580 } 581 } 582 583 /* 584 * Release our current user process designation if we are blocking 585 * or if a user reschedule was requested. 586 * 587 * NOTE: This function is NOT called if we are switching into or 588 * returning from a preemption. 589 * 590 * NOTE: Releasing our current user process designation may cause 591 * it to be assigned to another thread, which in turn will 592 * cause us to block in the usched acquire code when we attempt 593 * to return to userland. 594 * 595 * NOTE: On SMP systems this can be very nasty when heavy token 596 * contention is present so we want to be careful not to 597 * release the designation gratuitously. 598 */ 599 if (td->td_release && 600 (user_resched_wanted() || (td->td_flags & TDF_RUNQ) == 0)) { 601 td->td_release(td); 602 } 603 604 /* 605 * Release all tokens 606 */ 607 crit_enter_gd(gd); 608 if (TD_TOKS_HELD(td)) 609 lwkt_relalltokens(td); 610 611 /* 612 * We had better not be holding any spin locks, but don't get into an 613 * endless panic loop. 614 */ 615 KASSERT(gd->gd_spinlocks == 0 || panicstr != NULL, 616 ("lwkt_switch: still holding %d exclusive spinlocks!", 617 gd->gd_spinlocks)); 618 619 620 #ifdef INVARIANTS 621 if (td->td_cscount) { 622 kprintf("Diagnostic: attempt to switch while mastering cpusync: %p\n", 623 td); 624 if (panic_on_cscount) 625 panic("switching while mastering cpusync"); 626 } 627 #endif 628 629 /* 630 * If we had preempted another thread on this cpu, resume the preempted 631 * thread. This occurs transparently, whether the preempted thread 632 * was scheduled or not (it may have been preempted after descheduling 633 * itself). 634 * 635 * We have to setup the MP lock for the original thread after backing 636 * out the adjustment that was made to curthread when the original 637 * was preempted. 638 */ 639 if ((ntd = td->td_preempted) != NULL) { 640 KKASSERT(ntd->td_flags & TDF_PREEMPT_LOCK); 641 ntd->td_flags |= TDF_PREEMPT_DONE; 642 643 /* 644 * The interrupt may have woken a thread up, we need to properly 645 * set the reschedule flag if the originally interrupted thread is 646 * at a lower priority. 647 * 648 * The interrupt may not have descheduled. 649 */ 650 if (TAILQ_FIRST(&gd->gd_tdrunq) != ntd) 651 need_lwkt_resched(); 652 goto havethread_preempted; 653 } 654 655 /* 656 * If we cannot obtain ownership of the tokens we cannot immediately 657 * schedule the target thread. 658 * 659 * Reminder: Again, we cannot afford to run any IPIs in this path if 660 * the current thread has been descheduled. 661 */ 662 for (;;) { 663 clear_lwkt_resched(); 664 665 /* 666 * Hotpath - pull the head of the run queue and attempt to schedule 667 * it. 668 */ 669 ntd = TAILQ_FIRST(&gd->gd_tdrunq); 670 671 if (ntd == NULL) { 672 /* 673 * Runq is empty, switch to idle to allow it to halt. 674 */ 675 ntd = &gd->gd_idlethread; 676 if (gd->gd_trap_nesting_level == 0 && panicstr == NULL) 677 ASSERT_NO_TOKENS_HELD(ntd); 678 cpu_time.cp_msg[0] = 0; 679 cpu_time.cp_stallpc = 0; 680 goto haveidle; 681 } 682 683 /* 684 * Hotpath - schedule ntd. 685 * 686 * NOTE: For UP there is no mplock and lwkt_getalltokens() 687 * always succeeds. 688 */ 689 if (TD_TOKS_NOT_HELD(ntd) || 690 lwkt_getalltokens(ntd, (spinning >= lwkt_spin_loops))) 691 { 692 goto havethread; 693 } 694 695 /* 696 * Coldpath (SMP only since tokens always succeed on UP) 697 * 698 * We had some contention on the thread we wanted to schedule. 699 * What we do now is try to find a thread that we can schedule 700 * in its stead. 701 * 702 * The coldpath scan does NOT rearrange threads in the run list. 703 * The lwkt_schedulerclock() will assert need_lwkt_resched() on 704 * the next tick whenever the current head is not the current thread. 705 */ 706 #ifdef INVARIANTS 707 ++ntd->td_contended; 708 #endif 709 ++gd->gd_cnt.v_token_colls; 710 711 if (fairq_bypass > 0) 712 goto skip; 713 714 while ((ntd = TAILQ_NEXT(ntd, td_threadq)) != NULL) { 715 #ifndef NO_LWKT_SPLIT_USERPRI 716 /* 717 * Never schedule threads returning to userland or the 718 * user thread scheduler helper thread when higher priority 719 * threads are present. The runq is sorted by priority 720 * so we can give up traversing it when we find the first 721 * low priority thread. 722 */ 723 if (ntd->td_pri < TDPRI_KERN_LPSCHED) { 724 ntd = NULL; 725 break; 726 } 727 #endif 728 729 /* 730 * Try this one. 731 */ 732 if (TD_TOKS_NOT_HELD(ntd) || 733 lwkt_getalltokens(ntd, (spinning >= lwkt_spin_loops))) { 734 goto havethread; 735 } 736 #ifdef INVARIANTS 737 ++ntd->td_contended; 738 #endif 739 ++gd->gd_cnt.v_token_colls; 740 } 741 742 skip: 743 /* 744 * We exhausted the run list, meaning that all runnable threads 745 * are contested. 746 */ 747 cpu_pause(); 748 ntd = &gd->gd_idlethread; 749 if (gd->gd_trap_nesting_level == 0 && panicstr == NULL) 750 ASSERT_NO_TOKENS_HELD(ntd); 751 /* contention case, do not clear contention mask */ 752 753 /* 754 * We are going to have to retry but if the current thread is not 755 * on the runq we instead switch through the idle thread to get away 756 * from the current thread. We have to flag for lwkt reschedule 757 * to prevent the idle thread from halting. 758 * 759 * NOTE: A non-zero spinning is passed to lwkt_getalltokens() to 760 * instruct it to deal with the potential for deadlocks by 761 * ordering the tokens by address. 762 */ 763 if ((td->td_flags & TDF_RUNQ) == 0) { 764 need_lwkt_resched(); /* prevent hlt */ 765 goto haveidle; 766 } 767 #if defined(INVARIANTS) && defined(__amd64__) 768 if ((read_rflags() & PSL_I) == 0) { 769 cpu_enable_intr(); 770 panic("lwkt_switch() called with interrupts disabled"); 771 } 772 #endif 773 774 /* 775 * Number iterations so far. After a certain point we switch to 776 * a sorted-address/monitor/mwait version of lwkt_getalltokens() 777 */ 778 if (spinning < 0x7FFFFFFF) 779 ++spinning; 780 781 /* 782 * lwkt_getalltokens() failed in sorted token mode, we can use 783 * monitor/mwait in this case. 784 */ 785 if (spinning >= lwkt_spin_loops && 786 (cpu_mi_feature & CPU_MI_MONITOR) && 787 lwkt_spin_monitor) 788 { 789 cpu_mmw_pause_int(&gd->gd_reqflags, 790 (gd->gd_reqflags | RQF_SPINNING) & 791 ~RQF_IDLECHECK_WK_MASK); 792 } 793 794 /* 795 * We already checked that td is still scheduled so this should be 796 * safe. 797 */ 798 splz_check(); 799 800 /* 801 * This experimental resequencer is used as a fall-back to reduce 802 * hw cache line contention by placing each core's scheduler into a 803 * time-domain-multplexed slot. 804 * 805 * The resequencer is disabled by default. It's functionality has 806 * largely been superceeded by the token algorithm which limits races 807 * to a subset of cores. 808 * 809 * The resequencer algorithm tends to break down when more than 810 * 20 cores are contending. What appears to happen is that new 811 * tokens can be obtained out of address-sorted order by new cores 812 * while existing cores languish in long delays between retries and 813 * wind up being starved-out of the token acquisition. 814 */ 815 if (lwkt_spin_reseq && spinning >= lwkt_spin_reseq) { 816 int cseq = atomic_fetchadd_int(&lwkt_cseq_windex, 1); 817 int oseq; 818 819 while ((oseq = lwkt_cseq_rindex) != cseq) { 820 cpu_ccfence(); 821 #if 1 822 if (cpu_mi_feature & CPU_MI_MONITOR) { 823 cpu_mmw_pause_int(&lwkt_cseq_rindex, oseq); 824 } else { 825 #endif 826 cpu_pause(); 827 cpu_lfence(); 828 #if 1 829 } 830 #endif 831 } 832 DELAY(1); 833 atomic_add_int(&lwkt_cseq_rindex, 1); 834 } 835 /* highest level for(;;) loop */ 836 } 837 838 havethread: 839 /* 840 * Clear gd_idle_repeat when doing a normal switch to a non-idle 841 * thread. 842 */ 843 ntd->td_wmesg = NULL; 844 ++gd->gd_cnt.v_swtch; 845 gd->gd_idle_repeat = 0; 846 847 havethread_preempted: 848 /* 849 * If the new target does not need the MP lock and we are holding it, 850 * release the MP lock. If the new target requires the MP lock we have 851 * already acquired it for the target. 852 */ 853 ; 854 haveidle: 855 KASSERT(ntd->td_critcount, 856 ("priority problem in lwkt_switch %d %d", 857 td->td_critcount, ntd->td_critcount)); 858 859 if (td != ntd) { 860 /* 861 * Execute the actual thread switch operation. This function 862 * returns to the current thread and returns the previous thread 863 * (which may be different from the thread we switched to). 864 * 865 * We are responsible for marking ntd as TDF_RUNNING. 866 */ 867 KKASSERT((ntd->td_flags & TDF_RUNNING) == 0); 868 ++switch_count; 869 KTR_LOG(ctxsw_sw, gd->gd_cpuid, ntd); 870 ntd->td_flags |= TDF_RUNNING; 871 lwkt_switch_return(td->td_switch(ntd)); 872 /* ntd invalid, td_switch() can return a different thread_t */ 873 } 874 875 /* 876 * catch-all. XXX is this strictly needed? 877 */ 878 splz_check(); 879 880 /* NOTE: current cpu may have changed after switch */ 881 crit_exit_quick(td); 882 } 883 884 /* 885 * Called by assembly in the td_switch (thread restore path) for thread 886 * bootstrap cases which do not 'return' to lwkt_switch(). 887 */ 888 void 889 lwkt_switch_return(thread_t otd) 890 { 891 globaldata_t rgd; 892 893 /* 894 * Check if otd was migrating. Now that we are on ntd we can finish 895 * up the migration. This is a bit messy but it is the only place 896 * where td is known to be fully descheduled. 897 * 898 * We can only activate the migration if otd was migrating but not 899 * held on the cpu due to a preemption chain. We still have to 900 * clear TDF_RUNNING on the old thread either way. 901 * 902 * We are responsible for clearing the previously running thread's 903 * TDF_RUNNING. 904 */ 905 if ((rgd = otd->td_migrate_gd) != NULL && 906 (otd->td_flags & TDF_PREEMPT_LOCK) == 0) { 907 KKASSERT((otd->td_flags & (TDF_MIGRATING | TDF_RUNNING)) == 908 (TDF_MIGRATING | TDF_RUNNING)); 909 otd->td_migrate_gd = NULL; 910 otd->td_flags &= ~TDF_RUNNING; 911 lwkt_send_ipiq(rgd, lwkt_setcpu_remote, otd); 912 } else { 913 otd->td_flags &= ~TDF_RUNNING; 914 } 915 916 /* 917 * Final exit validations (see lwp_wait()). Note that otd becomes 918 * invalid the *instant* we set TDF_MP_EXITSIG. 919 */ 920 while (otd->td_flags & TDF_EXITING) { 921 u_int mpflags; 922 923 mpflags = otd->td_mpflags; 924 cpu_ccfence(); 925 926 if (mpflags & TDF_MP_EXITWAIT) { 927 if (atomic_cmpset_int(&otd->td_mpflags, mpflags, 928 mpflags | TDF_MP_EXITSIG)) { 929 wakeup(otd); 930 break; 931 } 932 } else { 933 if (atomic_cmpset_int(&otd->td_mpflags, mpflags, 934 mpflags | TDF_MP_EXITSIG)) { 935 wakeup(otd); 936 break; 937 } 938 } 939 } 940 } 941 942 /* 943 * Request that the target thread preempt the current thread. Preemption 944 * can only occur if our only critical section is the one that we were called 945 * with, the relative priority of the target thread is higher, and the target 946 * thread holds no tokens. This also only works if we are not holding any 947 * spinlocks (obviously). 948 * 949 * THE CALLER OF LWKT_PREEMPT() MUST BE IN A CRITICAL SECTION. Typically 950 * this is called via lwkt_schedule() through the td_preemptable callback. 951 * critcount is the managed critical priority that we should ignore in order 952 * to determine whether preemption is possible (aka usually just the crit 953 * priority of lwkt_schedule() itself). 954 * 955 * Preemption is typically limited to interrupt threads. 956 * 957 * Operation works in a fairly straight-forward manner. The normal 958 * scheduling code is bypassed and we switch directly to the target 959 * thread. When the target thread attempts to block or switch away 960 * code at the base of lwkt_switch() will switch directly back to our 961 * thread. Our thread is able to retain whatever tokens it holds and 962 * if the target needs one of them the target will switch back to us 963 * and reschedule itself normally. 964 */ 965 void 966 lwkt_preempt(thread_t ntd, int critcount) 967 { 968 struct globaldata *gd = mycpu; 969 thread_t xtd; 970 thread_t td; 971 int save_gd_intr_nesting_level; 972 973 /* 974 * The caller has put us in a critical section. We can only preempt 975 * if the caller of the caller was not in a critical section (basically 976 * a local interrupt), as determined by the 'critcount' parameter. We 977 * also can't preempt if the caller is holding any spinlocks (even if 978 * he isn't in a critical section). This also handles the tokens test. 979 * 980 * YYY The target thread must be in a critical section (else it must 981 * inherit our critical section? I dunno yet). 982 */ 983 KASSERT(ntd->td_critcount, ("BADCRIT0 %d", ntd->td_pri)); 984 985 td = gd->gd_curthread; 986 if (preempt_enable == 0) { 987 ++preempt_miss; 988 return; 989 } 990 if (ntd->td_pri <= td->td_pri) { 991 ++preempt_miss; 992 return; 993 } 994 if (td->td_critcount > critcount) { 995 ++preempt_miss; 996 return; 997 } 998 if (td->td_cscount) { 999 ++preempt_miss; 1000 return; 1001 } 1002 if (ntd->td_gd != gd) { 1003 ++preempt_miss; 1004 return; 1005 } 1006 /* 1007 * We don't have to check spinlocks here as they will also bump 1008 * td_critcount. 1009 * 1010 * Do not try to preempt if the target thread is holding any tokens. 1011 * We could try to acquire the tokens but this case is so rare there 1012 * is no need to support it. 1013 */ 1014 KKASSERT(gd->gd_spinlocks == 0); 1015 1016 if (TD_TOKS_HELD(ntd)) { 1017 ++preempt_miss; 1018 return; 1019 } 1020 if (td == ntd || ((td->td_flags | ntd->td_flags) & TDF_PREEMPT_LOCK)) { 1021 ++preempt_weird; 1022 return; 1023 } 1024 if (ntd->td_preempted) { 1025 ++preempt_hit; 1026 return; 1027 } 1028 KKASSERT(gd->gd_processing_ipiq == 0); 1029 1030 /* 1031 * Since we are able to preempt the current thread, there is no need to 1032 * call need_lwkt_resched(). 1033 * 1034 * We must temporarily clear gd_intr_nesting_level around the switch 1035 * since switchouts from the target thread are allowed (they will just 1036 * return to our thread), and since the target thread has its own stack. 1037 * 1038 * A preemption must switch back to the original thread, assert the 1039 * case. 1040 */ 1041 ++preempt_hit; 1042 ntd->td_preempted = td; 1043 td->td_flags |= TDF_PREEMPT_LOCK; 1044 KTR_LOG(ctxsw_pre, gd->gd_cpuid, ntd); 1045 save_gd_intr_nesting_level = gd->gd_intr_nesting_level; 1046 gd->gd_intr_nesting_level = 0; 1047 1048 KKASSERT((ntd->td_flags & TDF_RUNNING) == 0); 1049 ntd->td_flags |= TDF_RUNNING; 1050 xtd = td->td_switch(ntd); 1051 KKASSERT(xtd == ntd); 1052 lwkt_switch_return(xtd); 1053 gd->gd_intr_nesting_level = save_gd_intr_nesting_level; 1054 1055 KKASSERT(ntd->td_preempted && (td->td_flags & TDF_PREEMPT_DONE)); 1056 ntd->td_preempted = NULL; 1057 td->td_flags &= ~(TDF_PREEMPT_LOCK|TDF_PREEMPT_DONE); 1058 } 1059 1060 /* 1061 * Conditionally call splz() if gd_reqflags indicates work is pending. 1062 * This will work inside a critical section but not inside a hard code 1063 * section. 1064 * 1065 * (self contained on a per cpu basis) 1066 */ 1067 void 1068 splz_check(void) 1069 { 1070 globaldata_t gd = mycpu; 1071 thread_t td = gd->gd_curthread; 1072 1073 if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) && 1074 gd->gd_intr_nesting_level == 0 && 1075 td->td_nest_count < 2) 1076 { 1077 splz(); 1078 } 1079 } 1080 1081 /* 1082 * This version is integrated into crit_exit, reqflags has already 1083 * been tested but td_critcount has not. 1084 * 1085 * We only want to execute the splz() on the 1->0 transition of 1086 * critcount and not in a hard code section or if too deeply nested. 1087 * 1088 * NOTE: gd->gd_spinlocks is implied to be 0 when td_critcount is 0. 1089 */ 1090 void 1091 lwkt_maybe_splz(thread_t td) 1092 { 1093 globaldata_t gd = td->td_gd; 1094 1095 if (td->td_critcount == 0 && 1096 gd->gd_intr_nesting_level == 0 && 1097 td->td_nest_count < 2) 1098 { 1099 splz(); 1100 } 1101 } 1102 1103 /* 1104 * Drivers which set up processing co-threads can call this function to 1105 * run the co-thread at a higher priority and to allow it to preempt 1106 * normal threads. 1107 */ 1108 void 1109 lwkt_set_interrupt_support_thread(void) 1110 { 1111 thread_t td = curthread; 1112 1113 lwkt_setpri_self(TDPRI_INT_SUPPORT); 1114 td->td_flags |= TDF_INTTHREAD; 1115 td->td_preemptable = lwkt_preempt; 1116 } 1117 1118 1119 /* 1120 * This function is used to negotiate a passive release of the current 1121 * process/lwp designation with the user scheduler, allowing the user 1122 * scheduler to schedule another user thread. The related kernel thread 1123 * (curthread) continues running in the released state. 1124 */ 1125 void 1126 lwkt_passive_release(struct thread *td) 1127 { 1128 struct lwp *lp = td->td_lwp; 1129 1130 #ifndef NO_LWKT_SPLIT_USERPRI 1131 td->td_release = NULL; 1132 lwkt_setpri_self(TDPRI_KERN_USER); 1133 #endif 1134 1135 lp->lwp_proc->p_usched->release_curproc(lp); 1136 } 1137 1138 1139 /* 1140 * This implements a LWKT yield, allowing a kernel thread to yield to other 1141 * kernel threads at the same or higher priority. This function can be 1142 * called in a tight loop and will typically only yield once per tick. 1143 * 1144 * Most kernel threads run at the same priority in order to allow equal 1145 * sharing. 1146 * 1147 * (self contained on a per cpu basis) 1148 */ 1149 void 1150 lwkt_yield(void) 1151 { 1152 globaldata_t gd = mycpu; 1153 thread_t td = gd->gd_curthread; 1154 1155 if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) && td->td_nest_count < 2) 1156 splz(); 1157 if (lwkt_resched_wanted()) { 1158 lwkt_schedule_self(curthread); 1159 lwkt_switch(); 1160 } 1161 } 1162 1163 /* 1164 * The quick version processes pending interrupts and higher-priority 1165 * LWKT threads but will not round-robin same-priority LWKT threads. 1166 * 1167 * When called while attempting to return to userland the only same-pri 1168 * threads are the ones which have already tried to become the current 1169 * user process. 1170 */ 1171 void 1172 lwkt_yield_quick(void) 1173 { 1174 globaldata_t gd = mycpu; 1175 thread_t td = gd->gd_curthread; 1176 1177 if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) && td->td_nest_count < 2) 1178 splz(); 1179 if (lwkt_resched_wanted()) { 1180 if (TAILQ_FIRST(&gd->gd_tdrunq) == td) { 1181 clear_lwkt_resched(); 1182 } else { 1183 lwkt_schedule_self(curthread); 1184 lwkt_switch(); 1185 } 1186 } 1187 } 1188 1189 /* 1190 * This yield is designed for kernel threads with a user context. 1191 * 1192 * The kernel acting on behalf of the user is potentially cpu-bound, 1193 * this function will efficiently allow other threads to run and also 1194 * switch to other processes by releasing. 1195 * 1196 * The lwkt_user_yield() function is designed to have very low overhead 1197 * if no yield is determined to be needed. 1198 */ 1199 void 1200 lwkt_user_yield(void) 1201 { 1202 globaldata_t gd = mycpu; 1203 thread_t td = gd->gd_curthread; 1204 1205 /* 1206 * Always run any pending interrupts in case we are in a critical 1207 * section. 1208 */ 1209 if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) && td->td_nest_count < 2) 1210 splz(); 1211 1212 /* 1213 * Switch (which forces a release) if another kernel thread needs 1214 * the cpu, if userland wants us to resched, or if our kernel 1215 * quantum has run out. 1216 */ 1217 if (lwkt_resched_wanted() || 1218 user_resched_wanted()) 1219 { 1220 lwkt_switch(); 1221 } 1222 1223 #if 0 1224 /* 1225 * Reacquire the current process if we are released. 1226 * 1227 * XXX not implemented atm. The kernel may be holding locks and such, 1228 * so we want the thread to continue to receive cpu. 1229 */ 1230 if (td->td_release == NULL && lp) { 1231 lp->lwp_proc->p_usched->acquire_curproc(lp); 1232 td->td_release = lwkt_passive_release; 1233 lwkt_setpri_self(TDPRI_USER_NORM); 1234 } 1235 #endif 1236 } 1237 1238 /* 1239 * Generic schedule. Possibly schedule threads belonging to other cpus and 1240 * deal with threads that might be blocked on a wait queue. 1241 * 1242 * We have a little helper inline function which does additional work after 1243 * the thread has been enqueued, including dealing with preemption and 1244 * setting need_lwkt_resched() (which prevents the kernel from returning 1245 * to userland until it has processed higher priority threads). 1246 * 1247 * It is possible for this routine to be called after a failed _enqueue 1248 * (due to the target thread migrating, sleeping, or otherwise blocked). 1249 * We have to check that the thread is actually on the run queue! 1250 */ 1251 static __inline 1252 void 1253 _lwkt_schedule_post(globaldata_t gd, thread_t ntd, int ccount) 1254 { 1255 if (ntd->td_flags & TDF_RUNQ) { 1256 if (ntd->td_preemptable) { 1257 ntd->td_preemptable(ntd, ccount); /* YYY +token */ 1258 } 1259 } 1260 } 1261 1262 static __inline 1263 void 1264 _lwkt_schedule(thread_t td) 1265 { 1266 globaldata_t mygd = mycpu; 1267 1268 KASSERT(td != &td->td_gd->gd_idlethread, 1269 ("lwkt_schedule(): scheduling gd_idlethread is illegal!")); 1270 KKASSERT((td->td_flags & TDF_MIGRATING) == 0); 1271 crit_enter_gd(mygd); 1272 KKASSERT(td->td_lwp == NULL || 1273 (td->td_lwp->lwp_mpflags & LWP_MP_ONRUNQ) == 0); 1274 1275 if (td == mygd->gd_curthread) { 1276 _lwkt_enqueue(td); 1277 } else { 1278 /* 1279 * If we own the thread, there is no race (since we are in a 1280 * critical section). If we do not own the thread there might 1281 * be a race but the target cpu will deal with it. 1282 */ 1283 if (td->td_gd == mygd) { 1284 _lwkt_enqueue(td); 1285 _lwkt_schedule_post(mygd, td, 1); 1286 } else { 1287 lwkt_send_ipiq3(td->td_gd, lwkt_schedule_remote, td, 0); 1288 } 1289 } 1290 crit_exit_gd(mygd); 1291 } 1292 1293 void 1294 lwkt_schedule(thread_t td) 1295 { 1296 _lwkt_schedule(td); 1297 } 1298 1299 void 1300 lwkt_schedule_noresched(thread_t td) /* XXX not impl */ 1301 { 1302 _lwkt_schedule(td); 1303 } 1304 1305 /* 1306 * When scheduled remotely if frame != NULL the IPIQ is being 1307 * run via doreti or an interrupt then preemption can be allowed. 1308 * 1309 * To allow preemption we have to drop the critical section so only 1310 * one is present in _lwkt_schedule_post. 1311 */ 1312 static void 1313 lwkt_schedule_remote(void *arg, int arg2, struct intrframe *frame) 1314 { 1315 thread_t td = curthread; 1316 thread_t ntd = arg; 1317 1318 if (frame && ntd->td_preemptable) { 1319 crit_exit_noyield(td); 1320 _lwkt_schedule(ntd); 1321 crit_enter_quick(td); 1322 } else { 1323 _lwkt_schedule(ntd); 1324 } 1325 } 1326 1327 /* 1328 * Thread migration using a 'Pull' method. The thread may or may not be 1329 * the current thread. It MUST be descheduled and in a stable state. 1330 * lwkt_giveaway() must be called on the cpu owning the thread. 1331 * 1332 * At any point after lwkt_giveaway() is called, the target cpu may 1333 * 'pull' the thread by calling lwkt_acquire(). 1334 * 1335 * We have to make sure the thread is not sitting on a per-cpu tsleep 1336 * queue or it will blow up when it moves to another cpu. 1337 * 1338 * MPSAFE - must be called under very specific conditions. 1339 */ 1340 void 1341 lwkt_giveaway(thread_t td) 1342 { 1343 globaldata_t gd = mycpu; 1344 1345 crit_enter_gd(gd); 1346 if (td->td_flags & TDF_TSLEEPQ) 1347 tsleep_remove(td); 1348 KKASSERT(td->td_gd == gd); 1349 TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq); 1350 td->td_flags |= TDF_MIGRATING; 1351 crit_exit_gd(gd); 1352 } 1353 1354 void 1355 lwkt_acquire(thread_t td) 1356 { 1357 globaldata_t gd; 1358 globaldata_t mygd; 1359 int retry = 10000000; 1360 1361 KKASSERT(td->td_flags & TDF_MIGRATING); 1362 gd = td->td_gd; 1363 mygd = mycpu; 1364 if (gd != mycpu) { 1365 cpu_lfence(); 1366 KKASSERT((td->td_flags & TDF_RUNQ) == 0); 1367 crit_enter_gd(mygd); 1368 DEBUG_PUSH_INFO("lwkt_acquire"); 1369 while (td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) { 1370 lwkt_process_ipiq(); 1371 cpu_lfence(); 1372 if (--retry == 0) { 1373 kprintf("lwkt_acquire: stuck: td %p td->td_flags %08x\n", 1374 td, td->td_flags); 1375 retry = 10000000; 1376 } 1377 } 1378 DEBUG_POP_INFO(); 1379 cpu_mfence(); 1380 td->td_gd = mygd; 1381 TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq); 1382 td->td_flags &= ~TDF_MIGRATING; 1383 crit_exit_gd(mygd); 1384 } else { 1385 crit_enter_gd(mygd); 1386 TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq); 1387 td->td_flags &= ~TDF_MIGRATING; 1388 crit_exit_gd(mygd); 1389 } 1390 } 1391 1392 /* 1393 * Generic deschedule. Descheduling threads other then your own should be 1394 * done only in carefully controlled circumstances. Descheduling is 1395 * asynchronous. 1396 * 1397 * This function may block if the cpu has run out of messages. 1398 */ 1399 void 1400 lwkt_deschedule(thread_t td) 1401 { 1402 crit_enter(); 1403 if (td == curthread) { 1404 _lwkt_dequeue(td); 1405 } else { 1406 if (td->td_gd == mycpu) { 1407 _lwkt_dequeue(td); 1408 } else { 1409 lwkt_send_ipiq(td->td_gd, (ipifunc1_t)lwkt_deschedule, td); 1410 } 1411 } 1412 crit_exit(); 1413 } 1414 1415 /* 1416 * Set the target thread's priority. This routine does not automatically 1417 * switch to a higher priority thread, LWKT threads are not designed for 1418 * continuous priority changes. Yield if you want to switch. 1419 */ 1420 void 1421 lwkt_setpri(thread_t td, int pri) 1422 { 1423 if (td->td_pri != pri) { 1424 KKASSERT(pri >= 0); 1425 crit_enter(); 1426 if (td->td_flags & TDF_RUNQ) { 1427 KKASSERT(td->td_gd == mycpu); 1428 _lwkt_dequeue(td); 1429 td->td_pri = pri; 1430 _lwkt_enqueue(td); 1431 } else { 1432 td->td_pri = pri; 1433 } 1434 crit_exit(); 1435 } 1436 } 1437 1438 /* 1439 * Set the initial priority for a thread prior to it being scheduled for 1440 * the first time. The thread MUST NOT be scheduled before or during 1441 * this call. The thread may be assigned to a cpu other then the current 1442 * cpu. 1443 * 1444 * Typically used after a thread has been created with TDF_STOPPREQ, 1445 * and before the thread is initially scheduled. 1446 */ 1447 void 1448 lwkt_setpri_initial(thread_t td, int pri) 1449 { 1450 KKASSERT(pri >= 0); 1451 KKASSERT((td->td_flags & TDF_RUNQ) == 0); 1452 td->td_pri = pri; 1453 } 1454 1455 void 1456 lwkt_setpri_self(int pri) 1457 { 1458 thread_t td = curthread; 1459 1460 KKASSERT(pri >= 0 && pri <= TDPRI_MAX); 1461 crit_enter(); 1462 if (td->td_flags & TDF_RUNQ) { 1463 _lwkt_dequeue(td); 1464 td->td_pri = pri; 1465 _lwkt_enqueue(td); 1466 } else { 1467 td->td_pri = pri; 1468 } 1469 crit_exit(); 1470 } 1471 1472 /* 1473 * hz tick scheduler clock for LWKT threads 1474 */ 1475 void 1476 lwkt_schedulerclock(thread_t td) 1477 { 1478 globaldata_t gd = td->td_gd; 1479 thread_t xtd; 1480 1481 if (TAILQ_FIRST(&gd->gd_tdrunq) == td) { 1482 /* 1483 * If the current thread is at the head of the runq shift it to the 1484 * end of any equal-priority threads and request a LWKT reschedule 1485 * if it moved. 1486 * 1487 * Ignore upri in this situation. There will only be one user thread 1488 * in user mode, all others will be user threads running in kernel 1489 * mode and we have to make sure they get some cpu. 1490 */ 1491 xtd = TAILQ_NEXT(td, td_threadq); 1492 if (xtd && xtd->td_pri == td->td_pri) { 1493 TAILQ_REMOVE(&gd->gd_tdrunq, td, td_threadq); 1494 while (xtd && xtd->td_pri == td->td_pri) 1495 xtd = TAILQ_NEXT(xtd, td_threadq); 1496 if (xtd) 1497 TAILQ_INSERT_BEFORE(xtd, td, td_threadq); 1498 else 1499 TAILQ_INSERT_TAIL(&gd->gd_tdrunq, td, td_threadq); 1500 need_lwkt_resched(); 1501 } 1502 } else { 1503 /* 1504 * If we scheduled a thread other than the one at the head of the 1505 * queue always request a reschedule every tick. 1506 */ 1507 need_lwkt_resched(); 1508 } 1509 } 1510 1511 /* 1512 * Migrate the current thread to the specified cpu. 1513 * 1514 * This is accomplished by descheduling ourselves from the current cpu 1515 * and setting td_migrate_gd. The lwkt_switch() code will detect that the 1516 * 'old' thread wants to migrate after it has been completely switched out 1517 * and will complete the migration. 1518 * 1519 * TDF_MIGRATING prevents scheduling races while the thread is being migrated. 1520 * 1521 * We must be sure to release our current process designation (if a user 1522 * process) before clearing out any tsleepq we are on because the release 1523 * code may re-add us. 1524 * 1525 * We must be sure to remove ourselves from the current cpu's tsleepq 1526 * before potentially moving to another queue. The thread can be on 1527 * a tsleepq due to a left-over tsleep_interlock(). 1528 */ 1529 1530 void 1531 lwkt_setcpu_self(globaldata_t rgd) 1532 { 1533 thread_t td = curthread; 1534 1535 if (td->td_gd != rgd) { 1536 crit_enter_quick(td); 1537 1538 if (td->td_release) 1539 td->td_release(td); 1540 if (td->td_flags & TDF_TSLEEPQ) 1541 tsleep_remove(td); 1542 1543 /* 1544 * Set TDF_MIGRATING to prevent a spurious reschedule while we are 1545 * trying to deschedule ourselves and switch away, then deschedule 1546 * ourself, remove us from tdallq, and set td_migrate_gd. Finally, 1547 * call lwkt_switch() to complete the operation. 1548 */ 1549 td->td_flags |= TDF_MIGRATING; 1550 lwkt_deschedule_self(td); 1551 TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq); 1552 td->td_migrate_gd = rgd; 1553 lwkt_switch(); 1554 1555 /* 1556 * We are now on the target cpu 1557 */ 1558 KKASSERT(rgd == mycpu); 1559 TAILQ_INSERT_TAIL(&rgd->gd_tdallq, td, td_allq); 1560 crit_exit_quick(td); 1561 } 1562 } 1563 1564 void 1565 lwkt_migratecpu(int cpuid) 1566 { 1567 globaldata_t rgd; 1568 1569 rgd = globaldata_find(cpuid); 1570 lwkt_setcpu_self(rgd); 1571 } 1572 1573 /* 1574 * Remote IPI for cpu migration (called while in a critical section so we 1575 * do not have to enter another one). 1576 * 1577 * The thread (td) has already been completely descheduled from the 1578 * originating cpu and we can simply assert the case. The thread is 1579 * assigned to the new cpu and enqueued. 1580 * 1581 * The thread will re-add itself to tdallq when it resumes execution. 1582 */ 1583 static void 1584 lwkt_setcpu_remote(void *arg) 1585 { 1586 thread_t td = arg; 1587 globaldata_t gd = mycpu; 1588 1589 KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) == 0); 1590 td->td_gd = gd; 1591 cpu_mfence(); 1592 td->td_flags &= ~TDF_MIGRATING; 1593 KKASSERT(td->td_migrate_gd == NULL); 1594 KKASSERT(td->td_lwp == NULL || 1595 (td->td_lwp->lwp_mpflags & LWP_MP_ONRUNQ) == 0); 1596 _lwkt_enqueue(td); 1597 } 1598 1599 struct lwp * 1600 lwkt_preempted_proc(void) 1601 { 1602 thread_t td = curthread; 1603 while (td->td_preempted) 1604 td = td->td_preempted; 1605 return(td->td_lwp); 1606 } 1607 1608 /* 1609 * Create a kernel process/thread/whatever. It shares it's address space 1610 * with proc0 - ie: kernel only. 1611 * 1612 * If the cpu is not specified one will be selected. In the future 1613 * specifying a cpu of -1 will enable kernel thread migration between 1614 * cpus. 1615 */ 1616 int 1617 lwkt_create(void (*func)(void *), void *arg, struct thread **tdp, 1618 thread_t template, int tdflags, int cpu, const char *fmt, ...) 1619 { 1620 thread_t td; 1621 __va_list ap; 1622 1623 td = lwkt_alloc_thread(template, LWKT_THREAD_STACK, cpu, 1624 tdflags); 1625 if (tdp) 1626 *tdp = td; 1627 cpu_set_thread_handler(td, lwkt_exit, func, arg); 1628 1629 /* 1630 * Set up arg0 for 'ps' etc 1631 */ 1632 __va_start(ap, fmt); 1633 kvsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap); 1634 __va_end(ap); 1635 1636 /* 1637 * Schedule the thread to run 1638 */ 1639 if (td->td_flags & TDF_NOSTART) 1640 td->td_flags &= ~TDF_NOSTART; 1641 else 1642 lwkt_schedule(td); 1643 return 0; 1644 } 1645 1646 /* 1647 * Destroy an LWKT thread. Warning! This function is not called when 1648 * a process exits, cpu_proc_exit() directly calls cpu_thread_exit() and 1649 * uses a different reaping mechanism. 1650 */ 1651 void 1652 lwkt_exit(void) 1653 { 1654 thread_t td = curthread; 1655 thread_t std; 1656 globaldata_t gd; 1657 1658 /* 1659 * Do any cleanup that might block here 1660 */ 1661 if (td->td_flags & TDF_VERBOSE) 1662 kprintf("kthread %p %s has exited\n", td, td->td_comm); 1663 biosched_done(td); 1664 dsched_exit_thread(td); 1665 1666 /* 1667 * Get us into a critical section to interlock gd_freetd and loop 1668 * until we can get it freed. 1669 * 1670 * We have to cache the current td in gd_freetd because objcache_put()ing 1671 * it would rip it out from under us while our thread is still active. 1672 * 1673 * We are the current thread so of course our own TDF_RUNNING bit will 1674 * be set, so unlike the lwp reap code we don't wait for it to clear. 1675 */ 1676 gd = mycpu; 1677 crit_enter_quick(td); 1678 for (;;) { 1679 if (td->td_refs) { 1680 tsleep(td, 0, "tdreap", 1); 1681 continue; 1682 } 1683 if ((std = gd->gd_freetd) != NULL) { 1684 KKASSERT((std->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) == 0); 1685 gd->gd_freetd = NULL; 1686 objcache_put(thread_cache, std); 1687 continue; 1688 } 1689 break; 1690 } 1691 1692 /* 1693 * Remove thread resources from kernel lists and deschedule us for 1694 * the last time. We cannot block after this point or we may end 1695 * up with a stale td on the tsleepq. 1696 * 1697 * None of this may block, the critical section is the only thing 1698 * protecting tdallq and the only thing preventing new lwkt_hold() 1699 * thread refs now. 1700 */ 1701 if (td->td_flags & TDF_TSLEEPQ) 1702 tsleep_remove(td); 1703 lwkt_deschedule_self(td); 1704 lwkt_remove_tdallq(td); 1705 KKASSERT(td->td_refs == 0); 1706 1707 /* 1708 * Final cleanup 1709 */ 1710 KKASSERT(gd->gd_freetd == NULL); 1711 if (td->td_flags & TDF_ALLOCATED_THREAD) 1712 gd->gd_freetd = td; 1713 cpu_thread_exit(); 1714 } 1715 1716 void 1717 lwkt_remove_tdallq(thread_t td) 1718 { 1719 KKASSERT(td->td_gd == mycpu); 1720 TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq); 1721 } 1722 1723 /* 1724 * Code reduction and branch prediction improvements. Call/return 1725 * overhead on modern cpus often degenerates into 0 cycles due to 1726 * the cpu's branch prediction hardware and return pc cache. We 1727 * can take advantage of this by not inlining medium-complexity 1728 * functions and we can also reduce the branch prediction impact 1729 * by collapsing perfectly predictable branches into a single 1730 * procedure instead of duplicating it. 1731 * 1732 * Is any of this noticeable? Probably not, so I'll take the 1733 * smaller code size. 1734 */ 1735 void 1736 crit_exit_wrapper(__DEBUG_CRIT_ARG__) 1737 { 1738 _crit_exit(mycpu __DEBUG_CRIT_PASS_ARG__); 1739 } 1740 1741 void 1742 crit_panic(void) 1743 { 1744 thread_t td = curthread; 1745 int lcrit = td->td_critcount; 1746 1747 td->td_critcount = 0; 1748 panic("td_critcount is/would-go negative! %p %d", td, lcrit); 1749 /* NOT REACHED */ 1750 } 1751 1752 /* 1753 * Called from debugger/panic on cpus which have been stopped. We must still 1754 * process the IPIQ while stopped, even if we were stopped while in a critical 1755 * section (XXX). 1756 * 1757 * If we are dumping also try to process any pending interrupts. This may 1758 * or may not work depending on the state of the cpu at the point it was 1759 * stopped. 1760 */ 1761 void 1762 lwkt_smp_stopped(void) 1763 { 1764 globaldata_t gd = mycpu; 1765 1766 crit_enter_gd(gd); 1767 if (dumping) { 1768 lwkt_process_ipiq(); 1769 splz(); 1770 } else { 1771 lwkt_process_ipiq(); 1772 } 1773 crit_exit_gd(gd); 1774 } 1775