1 /* 2 * Copyright (c) 2003-2010 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 /* 36 * Each cpu in a system has its own self-contained light weight kernel 37 * thread scheduler, which means that generally speaking we only need 38 * to use a critical section to avoid problems. Foreign thread 39 * scheduling is queued via (async) IPIs. 40 */ 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/proc.h> 46 #include <sys/rtprio.h> 47 #include <sys/queue.h> 48 #include <sys/sysctl.h> 49 #include <sys/kthread.h> 50 #include <machine/cpu.h> 51 #include <sys/lock.h> 52 #include <sys/caps.h> 53 #include <sys/spinlock.h> 54 #include <sys/ktr.h> 55 56 #include <sys/thread2.h> 57 #include <sys/spinlock2.h> 58 #include <sys/mplock2.h> 59 60 #include <sys/dsched.h> 61 62 #include <vm/vm.h> 63 #include <vm/vm_param.h> 64 #include <vm/vm_kern.h> 65 #include <vm/vm_object.h> 66 #include <vm/vm_page.h> 67 #include <vm/vm_map.h> 68 #include <vm/vm_pager.h> 69 #include <vm/vm_extern.h> 70 71 #include <machine/stdarg.h> 72 #include <machine/smp.h> 73 74 #if !defined(KTR_CTXSW) 75 #define KTR_CTXSW KTR_ALL 76 #endif 77 KTR_INFO_MASTER(ctxsw); 78 KTR_INFO(KTR_CTXSW, ctxsw, sw, 0, "#cpu[%d].td = %p", 79 sizeof(int) + sizeof(struct thread *)); 80 KTR_INFO(KTR_CTXSW, ctxsw, pre, 1, "#cpu[%d].td = %p", 81 sizeof(int) + sizeof(struct thread *)); 82 KTR_INFO(KTR_CTXSW, ctxsw, newtd, 2, "#threads[%p].name = %s", 83 sizeof (struct thread *) + sizeof(char *)); 84 KTR_INFO(KTR_CTXSW, ctxsw, deadtd, 3, "#threads[%p].name = <dead>", sizeof (struct thread *)); 85 86 static MALLOC_DEFINE(M_THREAD, "thread", "lwkt threads"); 87 88 #ifdef INVARIANTS 89 static int panic_on_cscount = 0; 90 #endif 91 static __int64_t switch_count = 0; 92 static __int64_t preempt_hit = 0; 93 static __int64_t preempt_miss = 0; 94 static __int64_t preempt_weird = 0; 95 static __int64_t token_contention_count __debugvar = 0; 96 static int lwkt_use_spin_port; 97 static struct objcache *thread_cache; 98 99 #ifdef SMP 100 static void lwkt_schedule_remote(void *arg, int arg2, struct intrframe *frame); 101 #endif 102 103 extern void cpu_heavy_restore(void); 104 extern void cpu_lwkt_restore(void); 105 extern void cpu_kthread_restore(void); 106 extern void cpu_idle_restore(void); 107 108 #ifdef __x86_64__ 109 110 static int 111 jg_tos_ok(struct thread *td) 112 { 113 void *tos; 114 int tos_ok; 115 116 if (td == NULL) { 117 return 1; 118 } 119 KKASSERT(td->td_sp != NULL); 120 tos = ((void **)td->td_sp)[0]; 121 tos_ok = 0; 122 if ((tos == cpu_heavy_restore) || (tos == cpu_lwkt_restore) || 123 (tos == cpu_kthread_restore) || (tos == cpu_idle_restore)) { 124 tos_ok = 1; 125 } 126 return tos_ok; 127 } 128 129 #endif 130 131 /* 132 * We can make all thread ports use the spin backend instead of the thread 133 * backend. This should only be set to debug the spin backend. 134 */ 135 TUNABLE_INT("lwkt.use_spin_port", &lwkt_use_spin_port); 136 137 #ifdef INVARIANTS 138 SYSCTL_INT(_lwkt, OID_AUTO, panic_on_cscount, CTLFLAG_RW, &panic_on_cscount, 0, ""); 139 #endif 140 SYSCTL_QUAD(_lwkt, OID_AUTO, switch_count, CTLFLAG_RW, &switch_count, 0, ""); 141 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_hit, CTLFLAG_RW, &preempt_hit, 0, 142 "Successful preemption events"); 143 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_miss, CTLFLAG_RW, &preempt_miss, 0, 144 "Failed preemption events"); 145 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_weird, CTLFLAG_RW, &preempt_weird, 0, ""); 146 #ifdef INVARIANTS 147 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count, CTLFLAG_RW, 148 &token_contention_count, 0, "spinning due to token contention"); 149 #endif 150 151 /* 152 * These helper procedures handle the runq, they can only be called from 153 * within a critical section. 154 * 155 * WARNING! Prior to SMP being brought up it is possible to enqueue and 156 * dequeue threads belonging to other cpus, so be sure to use td->td_gd 157 * instead of 'mycpu' when referencing the globaldata structure. Once 158 * SMP live enqueuing and dequeueing only occurs on the current cpu. 159 */ 160 static __inline 161 void 162 _lwkt_dequeue(thread_t td) 163 { 164 if (td->td_flags & TDF_RUNQ) { 165 int nq = td->td_pri & TDPRI_MASK; 166 struct globaldata *gd = td->td_gd; 167 168 td->td_flags &= ~TDF_RUNQ; 169 TAILQ_REMOVE(&gd->gd_tdrunq[nq], td, td_threadq); 170 /* runqmask is passively cleaned up by the switcher */ 171 } 172 } 173 174 static __inline 175 void 176 _lwkt_enqueue(thread_t td) 177 { 178 if ((td->td_flags & (TDF_RUNQ|TDF_MIGRATING|TDF_BLOCKQ)) == 0) { 179 int nq = td->td_pri & TDPRI_MASK; 180 struct globaldata *gd = td->td_gd; 181 182 td->td_flags |= TDF_RUNQ; 183 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], td, td_threadq); 184 gd->gd_runqmask |= 1 << nq; 185 } 186 } 187 188 static __boolean_t 189 _lwkt_thread_ctor(void *obj, void *privdata, int ocflags) 190 { 191 struct thread *td = (struct thread *)obj; 192 193 td->td_kstack = NULL; 194 td->td_kstack_size = 0; 195 td->td_flags = TDF_ALLOCATED_THREAD; 196 return (1); 197 } 198 199 static void 200 _lwkt_thread_dtor(void *obj, void *privdata) 201 { 202 struct thread *td = (struct thread *)obj; 203 204 KASSERT(td->td_flags & TDF_ALLOCATED_THREAD, 205 ("_lwkt_thread_dtor: not allocated from objcache")); 206 KASSERT((td->td_flags & TDF_ALLOCATED_STACK) && td->td_kstack && 207 td->td_kstack_size > 0, 208 ("_lwkt_thread_dtor: corrupted stack")); 209 kmem_free(&kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size); 210 } 211 212 /* 213 * Initialize the lwkt s/system. 214 */ 215 void 216 lwkt_init(void) 217 { 218 /* An objcache has 2 magazines per CPU so divide cache size by 2. */ 219 thread_cache = objcache_create_mbacked(M_THREAD, sizeof(struct thread), 220 NULL, CACHE_NTHREADS/2, 221 _lwkt_thread_ctor, _lwkt_thread_dtor, NULL); 222 } 223 224 /* 225 * Schedule a thread to run. As the current thread we can always safely 226 * schedule ourselves, and a shortcut procedure is provided for that 227 * function. 228 * 229 * (non-blocking, self contained on a per cpu basis) 230 */ 231 void 232 lwkt_schedule_self(thread_t td) 233 { 234 crit_enter_quick(td); 235 KASSERT(td != &td->td_gd->gd_idlethread, ("lwkt_schedule_self(): scheduling gd_idlethread is illegal!")); 236 KKASSERT(td->td_lwp == NULL || (td->td_lwp->lwp_flag & LWP_ONRUNQ) == 0); 237 _lwkt_enqueue(td); 238 crit_exit_quick(td); 239 } 240 241 /* 242 * Deschedule a thread. 243 * 244 * (non-blocking, self contained on a per cpu basis) 245 */ 246 void 247 lwkt_deschedule_self(thread_t td) 248 { 249 crit_enter_quick(td); 250 _lwkt_dequeue(td); 251 crit_exit_quick(td); 252 } 253 254 /* 255 * LWKTs operate on a per-cpu basis 256 * 257 * WARNING! Called from early boot, 'mycpu' may not work yet. 258 */ 259 void 260 lwkt_gdinit(struct globaldata *gd) 261 { 262 int i; 263 264 for (i = 0; i < sizeof(gd->gd_tdrunq)/sizeof(gd->gd_tdrunq[0]); ++i) 265 TAILQ_INIT(&gd->gd_tdrunq[i]); 266 gd->gd_runqmask = 0; 267 TAILQ_INIT(&gd->gd_tdallq); 268 } 269 270 /* 271 * Create a new thread. The thread must be associated with a process context 272 * or LWKT start address before it can be scheduled. If the target cpu is 273 * -1 the thread will be created on the current cpu. 274 * 275 * If you intend to create a thread without a process context this function 276 * does everything except load the startup and switcher function. 277 */ 278 thread_t 279 lwkt_alloc_thread(struct thread *td, int stksize, int cpu, int flags) 280 { 281 globaldata_t gd = mycpu; 282 void *stack; 283 284 /* 285 * If static thread storage is not supplied allocate a thread. Reuse 286 * a cached free thread if possible. gd_freetd is used to keep an exiting 287 * thread intact through the exit. 288 */ 289 if (td == NULL) { 290 if ((td = gd->gd_freetd) != NULL) 291 gd->gd_freetd = NULL; 292 else 293 td = objcache_get(thread_cache, M_WAITOK); 294 KASSERT((td->td_flags & 295 (TDF_ALLOCATED_THREAD|TDF_RUNNING)) == TDF_ALLOCATED_THREAD, 296 ("lwkt_alloc_thread: corrupted td flags 0x%X", td->td_flags)); 297 flags |= td->td_flags & (TDF_ALLOCATED_THREAD|TDF_ALLOCATED_STACK); 298 } 299 300 /* 301 * Try to reuse cached stack. 302 */ 303 if ((stack = td->td_kstack) != NULL && td->td_kstack_size != stksize) { 304 if (flags & TDF_ALLOCATED_STACK) { 305 kmem_free(&kernel_map, (vm_offset_t)stack, td->td_kstack_size); 306 stack = NULL; 307 } 308 } 309 if (stack == NULL) { 310 stack = (void *)kmem_alloc(&kernel_map, stksize); 311 flags |= TDF_ALLOCATED_STACK; 312 } 313 if (cpu < 0) 314 lwkt_init_thread(td, stack, stksize, flags, gd); 315 else 316 lwkt_init_thread(td, stack, stksize, flags, globaldata_find(cpu)); 317 return(td); 318 } 319 320 /* 321 * Initialize a preexisting thread structure. This function is used by 322 * lwkt_alloc_thread() and also used to initialize the per-cpu idlethread. 323 * 324 * All threads start out in a critical section at a priority of 325 * TDPRI_KERN_DAEMON. Higher level code will modify the priority as 326 * appropriate. This function may send an IPI message when the 327 * requested cpu is not the current cpu and consequently gd_tdallq may 328 * not be initialized synchronously from the point of view of the originating 329 * cpu. 330 * 331 * NOTE! we have to be careful in regards to creating threads for other cpus 332 * if SMP has not yet been activated. 333 */ 334 #ifdef SMP 335 336 static void 337 lwkt_init_thread_remote(void *arg) 338 { 339 thread_t td = arg; 340 341 /* 342 * Protected by critical section held by IPI dispatch 343 */ 344 TAILQ_INSERT_TAIL(&td->td_gd->gd_tdallq, td, td_allq); 345 } 346 347 #endif 348 349 void 350 lwkt_init_thread(thread_t td, void *stack, int stksize, int flags, 351 struct globaldata *gd) 352 { 353 globaldata_t mygd = mycpu; 354 355 bzero(td, sizeof(struct thread)); 356 td->td_kstack = stack; 357 td->td_kstack_size = stksize; 358 td->td_flags = flags; 359 td->td_gd = gd; 360 td->td_pri = TDPRI_KERN_DAEMON + TDPRI_CRIT; 361 td->td_toks_stop = &td->td_toks_base; 362 #ifdef SMP 363 if ((flags & TDF_MPSAFE) == 0) 364 td->td_mpcount = 1; 365 #endif 366 if (lwkt_use_spin_port) 367 lwkt_initport_spin(&td->td_msgport); 368 else 369 lwkt_initport_thread(&td->td_msgport, td); 370 pmap_init_thread(td); 371 #ifdef SMP 372 /* 373 * Normally initializing a thread for a remote cpu requires sending an 374 * IPI. However, the idlethread is setup before the other cpus are 375 * activated so we have to treat it as a special case. XXX manipulation 376 * of gd_tdallq requires the BGL. 377 */ 378 if (gd == mygd || td == &gd->gd_idlethread) { 379 crit_enter_gd(mygd); 380 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq); 381 crit_exit_gd(mygd); 382 } else { 383 lwkt_send_ipiq(gd, lwkt_init_thread_remote, td); 384 } 385 #else 386 crit_enter_gd(mygd); 387 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq); 388 crit_exit_gd(mygd); 389 #endif 390 391 dsched_new_thread(td); 392 } 393 394 void 395 lwkt_set_comm(thread_t td, const char *ctl, ...) 396 { 397 __va_list va; 398 399 __va_start(va, ctl); 400 kvsnprintf(td->td_comm, sizeof(td->td_comm), ctl, va); 401 __va_end(va); 402 KTR_LOG(ctxsw_newtd, td, &td->td_comm[0]); 403 } 404 405 void 406 lwkt_hold(thread_t td) 407 { 408 ++td->td_refs; 409 } 410 411 void 412 lwkt_rele(thread_t td) 413 { 414 KKASSERT(td->td_refs > 0); 415 --td->td_refs; 416 } 417 418 void 419 lwkt_wait_free(thread_t td) 420 { 421 while (td->td_refs) 422 tsleep(td, 0, "tdreap", hz); 423 } 424 425 void 426 lwkt_free_thread(thread_t td) 427 { 428 KASSERT((td->td_flags & TDF_RUNNING) == 0, 429 ("lwkt_free_thread: did not exit! %p", td)); 430 431 if (td->td_flags & TDF_ALLOCATED_THREAD) { 432 objcache_put(thread_cache, td); 433 } else if (td->td_flags & TDF_ALLOCATED_STACK) { 434 /* client-allocated struct with internally allocated stack */ 435 KASSERT(td->td_kstack && td->td_kstack_size > 0, 436 ("lwkt_free_thread: corrupted stack")); 437 kmem_free(&kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size); 438 td->td_kstack = NULL; 439 td->td_kstack_size = 0; 440 } 441 KTR_LOG(ctxsw_deadtd, td); 442 } 443 444 445 /* 446 * Switch to the next runnable lwkt. If no LWKTs are runnable then 447 * switch to the idlethread. Switching must occur within a critical 448 * section to avoid races with the scheduling queue. 449 * 450 * We always have full control over our cpu's run queue. Other cpus 451 * that wish to manipulate our queue must use the cpu_*msg() calls to 452 * talk to our cpu, so a critical section is all that is needed and 453 * the result is very, very fast thread switching. 454 * 455 * The LWKT scheduler uses a fixed priority model and round-robins at 456 * each priority level. User process scheduling is a totally 457 * different beast and LWKT priorities should not be confused with 458 * user process priorities. 459 * 460 * The MP lock may be out of sync with the thread's td_mpcount. lwkt_switch() 461 * cleans it up. Note that the td_switch() function cannot do anything that 462 * requires the MP lock since the MP lock will have already been setup for 463 * the target thread (not the current thread). It's nice to have a scheduler 464 * that does not need the MP lock to work because it allows us to do some 465 * really cool high-performance MP lock optimizations. 466 * 467 * PREEMPTION NOTE: Preemption occurs via lwkt_preempt(). lwkt_switch() 468 * is not called by the current thread in the preemption case, only when 469 * the preempting thread blocks (in order to return to the original thread). 470 */ 471 void 472 lwkt_switch(void) 473 { 474 globaldata_t gd = mycpu; 475 thread_t td = gd->gd_curthread; 476 thread_t ntd; 477 #ifdef SMP 478 int mpheld; 479 #endif 480 481 /* 482 * Switching from within a 'fast' (non thread switched) interrupt or IPI 483 * is illegal. However, we may have to do it anyway if we hit a fatal 484 * kernel trap or we have paniced. 485 * 486 * If this case occurs save and restore the interrupt nesting level. 487 */ 488 if (gd->gd_intr_nesting_level) { 489 int savegdnest; 490 int savegdtrap; 491 492 if (gd->gd_trap_nesting_level == 0 && panicstr == NULL) { 493 panic("lwkt_switch: cannot switch from within " 494 "a fast interrupt, yet, td %p\n", td); 495 } else { 496 savegdnest = gd->gd_intr_nesting_level; 497 savegdtrap = gd->gd_trap_nesting_level; 498 gd->gd_intr_nesting_level = 0; 499 gd->gd_trap_nesting_level = 0; 500 if ((td->td_flags & TDF_PANICWARN) == 0) { 501 td->td_flags |= TDF_PANICWARN; 502 kprintf("Warning: thread switch from interrupt or IPI, " 503 "thread %p (%s)\n", td, td->td_comm); 504 print_backtrace(-1); 505 } 506 lwkt_switch(); 507 gd->gd_intr_nesting_level = savegdnest; 508 gd->gd_trap_nesting_level = savegdtrap; 509 return; 510 } 511 } 512 513 /* 514 * Passive release (used to transition from user to kernel mode 515 * when we block or switch rather then when we enter the kernel). 516 * This function is NOT called if we are switching into a preemption 517 * or returning from a preemption. Typically this causes us to lose 518 * our current process designation (if we have one) and become a true 519 * LWKT thread, and may also hand the current process designation to 520 * another process and schedule thread. 521 */ 522 if (td->td_release) 523 td->td_release(td); 524 525 crit_enter_gd(gd); 526 if (TD_TOKS_HELD(td)) 527 lwkt_relalltokens(td); 528 529 /* 530 * We had better not be holding any spin locks, but don't get into an 531 * endless panic loop. 532 */ 533 KASSERT(gd->gd_spinlock_rd == NULL || panicstr != NULL, 534 ("lwkt_switch: still holding a shared spinlock %p!", 535 gd->gd_spinlock_rd)); 536 KASSERT(gd->gd_spinlocks_wr == 0 || panicstr != NULL, 537 ("lwkt_switch: still holding %d exclusive spinlocks!", 538 gd->gd_spinlocks_wr)); 539 540 541 #ifdef SMP 542 /* 543 * td_mpcount cannot be used to determine if we currently hold the 544 * MP lock because get_mplock() will increment it prior to attempting 545 * to get the lock, and switch out if it can't. Our ownership of 546 * the actual lock will remain stable while we are in a critical section 547 * (but, of course, another cpu may own or release the lock so the 548 * actual value of mp_lock is not stable). 549 */ 550 mpheld = MP_LOCK_HELD(); 551 #ifdef INVARIANTS 552 if (td->td_cscount) { 553 kprintf("Diagnostic: attempt to switch while mastering cpusync: %p\n", 554 td); 555 if (panic_on_cscount) 556 panic("switching while mastering cpusync"); 557 } 558 #endif 559 #endif 560 if ((ntd = td->td_preempted) != NULL) { 561 /* 562 * We had preempted another thread on this cpu, resume the preempted 563 * thread. This occurs transparently, whether the preempted thread 564 * was scheduled or not (it may have been preempted after descheduling 565 * itself). 566 * 567 * We have to setup the MP lock for the original thread after backing 568 * out the adjustment that was made to curthread when the original 569 * was preempted. 570 */ 571 KKASSERT(ntd->td_flags & TDF_PREEMPT_LOCK); 572 #ifdef SMP 573 if (ntd->td_mpcount && mpheld == 0) { 574 panic("MPLOCK NOT HELD ON RETURN: %p %p %d %d", 575 td, ntd, td->td_mpcount, ntd->td_mpcount); 576 } 577 if (ntd->td_mpcount) { 578 td->td_mpcount -= ntd->td_mpcount; 579 KKASSERT(td->td_mpcount >= 0); 580 } 581 #endif 582 ntd->td_flags |= TDF_PREEMPT_DONE; 583 584 /* 585 * The interrupt may have woken a thread up, we need to properly 586 * set the reschedule flag if the originally interrupted thread is 587 * at a lower priority. 588 */ 589 if (gd->gd_runqmask > (2 << (ntd->td_pri & TDPRI_MASK)) - 1) 590 need_lwkt_resched(); 591 /* YYY release mp lock on switchback if original doesn't need it */ 592 } else { 593 /* 594 * Priority queue / round-robin at each priority. Note that user 595 * processes run at a fixed, low priority and the user process 596 * scheduler deals with interactions between user processes 597 * by scheduling and descheduling them from the LWKT queue as 598 * necessary. 599 * 600 * We have to adjust the MP lock for the target thread. If we 601 * need the MP lock and cannot obtain it we try to locate a 602 * thread that does not need the MP lock. If we cannot, we spin 603 * instead of HLT. 604 * 605 * A similar issue exists for the tokens held by the target thread. 606 * If we cannot obtain ownership of the tokens we cannot immediately 607 * schedule the thread. 608 */ 609 610 /* 611 * If an LWKT reschedule was requested, well that is what we are 612 * doing now so clear it. 613 */ 614 clear_lwkt_resched(); 615 again: 616 if (gd->gd_runqmask) { 617 int nq = bsrl(gd->gd_runqmask); 618 if ((ntd = TAILQ_FIRST(&gd->gd_tdrunq[nq])) == NULL) { 619 gd->gd_runqmask &= ~(1 << nq); 620 goto again; 621 } 622 #ifdef SMP 623 /* 624 * THREAD SELECTION FOR AN SMP MACHINE BUILD 625 * 626 * If the target needs the MP lock and we couldn't get it, 627 * or if the target is holding tokens and we could not 628 * gain ownership of the tokens, continue looking for a 629 * thread to schedule and spin instead of HLT if we can't. 630 * 631 * NOTE: the mpheld variable invalid after this conditional, it 632 * can change due to both cpu_try_mplock() returning success 633 * AND interactions in lwkt_getalltokens() due to the fact that 634 * we are trying to check the mpcount of a thread other then 635 * the current thread. Because of this, if the current thread 636 * is not holding td_mpcount, an IPI indirectly run via 637 * lwkt_getalltokens() can obtain and release the MP lock and 638 * cause the core MP lock to be released. 639 */ 640 if ((ntd->td_mpcount && mpheld == 0 && !cpu_try_mplock()) || 641 (TD_TOKS_HELD(ntd) && lwkt_getalltokens(ntd) == 0) 642 ) { 643 u_int32_t rqmask = gd->gd_runqmask; 644 645 cpu_pause(); 646 647 mpheld = MP_LOCK_HELD(); 648 ntd = NULL; 649 while (rqmask) { 650 TAILQ_FOREACH(ntd, &gd->gd_tdrunq[nq], td_threadq) { 651 if (ntd->td_mpcount && !mpheld && !cpu_try_mplock()) { 652 /* spinning due to MP lock being held */ 653 continue; 654 } 655 656 /* 657 * mpheld state invalid after getalltokens call returns 658 * failure, but the variable is only needed for 659 * the loop. 660 */ 661 if (TD_TOKS_HELD(ntd) && !lwkt_getalltokens(ntd)) { 662 /* spinning due to token contention */ 663 #ifdef INVARIANTS 664 ++token_contention_count; 665 #endif 666 mpheld = MP_LOCK_HELD(); 667 continue; 668 } 669 break; 670 } 671 if (ntd) 672 break; 673 rqmask &= ~(1 << nq); 674 nq = bsrl(rqmask); 675 676 /* 677 * We have two choices. We can either refuse to run a 678 * user thread when a kernel thread needs the MP lock 679 * but could not get it, or we can allow it to run but 680 * then expect an IPI (hopefully) later on to force a 681 * reschedule when the MP lock might become available. 682 */ 683 if (nq < TDPRI_KERN_LPSCHED) { 684 break; /* for now refuse to run */ 685 #if 0 686 if (chain_mplock == 0) 687 break; 688 /* continue loop, allow user threads to be scheduled */ 689 #endif 690 } 691 } 692 693 /* 694 * Case where a (kernel) thread needed the MP lock and could 695 * not get one, and we may or may not have found another 696 * thread which does not need the MP lock to run while 697 * we wait (ntd). 698 */ 699 if (ntd == NULL) { 700 ntd = &gd->gd_idlethread; 701 ntd->td_flags |= TDF_IDLE_NOHLT; 702 set_mplock_contention_mask(gd); 703 cpu_mplock_contested(); 704 goto using_idle_thread; 705 } else { 706 clr_mplock_contention_mask(gd); 707 ++gd->gd_cnt.v_swtch; 708 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); 709 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); 710 } 711 } else { 712 clr_mplock_contention_mask(gd); 713 ++gd->gd_cnt.v_swtch; 714 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); 715 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); 716 } 717 #else 718 /* 719 * THREAD SELECTION FOR A UP MACHINE BUILD. We don't have to 720 * worry about tokens or the BGL. However, we still have 721 * to call lwkt_getalltokens() in order to properly detect 722 * stale tokens. This call cannot fail for a UP build! 723 */ 724 lwkt_getalltokens(ntd); 725 ++gd->gd_cnt.v_swtch; 726 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); 727 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); 728 #endif 729 } else { 730 /* 731 * We have nothing to run but only let the idle loop halt 732 * the cpu if there are no pending interrupts. 733 */ 734 ntd = &gd->gd_idlethread; 735 if (gd->gd_reqflags & RQF_IDLECHECK_MASK) 736 ntd->td_flags |= TDF_IDLE_NOHLT; 737 #ifdef SMP 738 using_idle_thread: 739 /* 740 * The idle thread should not be holding the MP lock unless we 741 * are trapping in the kernel or in a panic. Since we select the 742 * idle thread unconditionally when no other thread is available, 743 * if the MP lock is desired during a panic or kernel trap, we 744 * have to loop in the scheduler until we get it. 745 */ 746 if (ntd->td_mpcount) { 747 mpheld = MP_LOCK_HELD(); 748 if (gd->gd_trap_nesting_level == 0 && panicstr == NULL) 749 panic("Idle thread %p was holding the BGL!", ntd); 750 if (mpheld == 0) 751 goto again; 752 } 753 #endif 754 } 755 } 756 KASSERT(ntd->td_pri >= TDPRI_CRIT, 757 ("priority problem in lwkt_switch %d %d", td->td_pri, ntd->td_pri)); 758 759 /* 760 * Do the actual switch. If the new target does not need the MP lock 761 * and we are holding it, release the MP lock. If the new target requires 762 * the MP lock we have already acquired it for the target. 763 */ 764 #ifdef SMP 765 if (ntd->td_mpcount == 0 ) { 766 if (MP_LOCK_HELD()) 767 cpu_rel_mplock(); 768 } else { 769 ASSERT_MP_LOCK_HELD(ntd); 770 } 771 #endif 772 if (td != ntd) { 773 ++switch_count; 774 #ifdef __x86_64__ 775 { 776 int tos_ok __debugvar = jg_tos_ok(ntd); 777 KKASSERT(tos_ok); 778 } 779 #endif 780 KTR_LOG(ctxsw_sw, gd->gd_cpuid, ntd); 781 td->td_switch(ntd); 782 } 783 /* NOTE: current cpu may have changed after switch */ 784 crit_exit_quick(td); 785 } 786 787 /* 788 * Request that the target thread preempt the current thread. Preemption 789 * only works under a specific set of conditions: 790 * 791 * - We are not preempting ourselves 792 * - The target thread is owned by the current cpu 793 * - We are not currently being preempted 794 * - The target is not currently being preempted 795 * - We are not holding any spin locks 796 * - The target thread is not holding any tokens 797 * - We are able to satisfy the target's MP lock requirements (if any). 798 * 799 * THE CALLER OF LWKT_PREEMPT() MUST BE IN A CRITICAL SECTION. Typically 800 * this is called via lwkt_schedule() through the td_preemptable callback. 801 * critpri is the managed critical priority that we should ignore in order 802 * to determine whether preemption is possible (aka usually just the crit 803 * priority of lwkt_schedule() itself). 804 * 805 * XXX at the moment we run the target thread in a critical section during 806 * the preemption in order to prevent the target from taking interrupts 807 * that *WE* can't. Preemption is strictly limited to interrupt threads 808 * and interrupt-like threads, outside of a critical section, and the 809 * preempted source thread will be resumed the instant the target blocks 810 * whether or not the source is scheduled (i.e. preemption is supposed to 811 * be as transparent as possible). 812 * 813 * The target thread inherits our MP count (added to its own) for the 814 * duration of the preemption in order to preserve the atomicy of the 815 * MP lock during the preemption. Therefore, any preempting targets must be 816 * careful in regards to MP assertions. Note that the MP count may be 817 * out of sync with the physical mp_lock, but we do not have to preserve 818 * the original ownership of the lock if it was out of synch (that is, we 819 * can leave it synchronized on return). 820 */ 821 void 822 lwkt_preempt(thread_t ntd, int critpri) 823 { 824 struct globaldata *gd = mycpu; 825 thread_t td; 826 #ifdef SMP 827 int mpheld; 828 int savecnt; 829 #endif 830 831 /* 832 * The caller has put us in a critical section. We can only preempt 833 * if the caller of the caller was not in a critical section (basically 834 * a local interrupt), as determined by the 'critpri' parameter. We 835 * also can't preempt if the caller is holding any spinlocks (even if 836 * he isn't in a critical section). This also handles the tokens test. 837 * 838 * YYY The target thread must be in a critical section (else it must 839 * inherit our critical section? I dunno yet). 840 * 841 * Set need_lwkt_resched() unconditionally for now YYY. 842 */ 843 KASSERT(ntd->td_pri >= TDPRI_CRIT, ("BADCRIT0 %d", ntd->td_pri)); 844 845 td = gd->gd_curthread; 846 if ((ntd->td_pri & TDPRI_MASK) <= (td->td_pri & TDPRI_MASK)) { 847 ++preempt_miss; 848 return; 849 } 850 if ((td->td_pri & ~TDPRI_MASK) > critpri) { 851 ++preempt_miss; 852 need_lwkt_resched(); 853 return; 854 } 855 #ifdef SMP 856 if (ntd->td_gd != gd) { 857 ++preempt_miss; 858 need_lwkt_resched(); 859 return; 860 } 861 #endif 862 /* 863 * Take the easy way out and do not preempt if we are holding 864 * any spinlocks. We could test whether the thread(s) being 865 * preempted interlock against the target thread's tokens and whether 866 * we can get all the target thread's tokens, but this situation 867 * should not occur very often so its easier to simply not preempt. 868 * Also, plain spinlocks are impossible to figure out at this point so 869 * just don't preempt. 870 * 871 * Do not try to preempt if the target thread is holding any tokens. 872 * We could try to acquire the tokens but this case is so rare there 873 * is no need to support it. 874 */ 875 if (gd->gd_spinlock_rd || gd->gd_spinlocks_wr) { 876 ++preempt_miss; 877 need_lwkt_resched(); 878 return; 879 } 880 if (TD_TOKS_HELD(ntd)) { 881 ++preempt_miss; 882 need_lwkt_resched(); 883 return; 884 } 885 if (td == ntd || ((td->td_flags | ntd->td_flags) & TDF_PREEMPT_LOCK)) { 886 ++preempt_weird; 887 need_lwkt_resched(); 888 return; 889 } 890 if (ntd->td_preempted) { 891 ++preempt_hit; 892 need_lwkt_resched(); 893 return; 894 } 895 #ifdef SMP 896 /* 897 * note: an interrupt might have occured just as we were transitioning 898 * to or from the MP lock. In this case td_mpcount will be pre-disposed 899 * (non-zero) but not actually synchronized with the actual state of the 900 * lock. We can use it to imply an MP lock requirement for the 901 * preemption but we cannot use it to test whether we hold the MP lock 902 * or not. 903 */ 904 savecnt = td->td_mpcount; 905 mpheld = MP_LOCK_HELD(); 906 ntd->td_mpcount += td->td_mpcount; 907 if (mpheld == 0 && ntd->td_mpcount && !cpu_try_mplock()) { 908 ntd->td_mpcount -= td->td_mpcount; 909 ++preempt_miss; 910 need_lwkt_resched(); 911 return; 912 } 913 #endif 914 915 /* 916 * Since we are able to preempt the current thread, there is no need to 917 * call need_lwkt_resched(). 918 */ 919 ++preempt_hit; 920 ntd->td_preempted = td; 921 td->td_flags |= TDF_PREEMPT_LOCK; 922 KTR_LOG(ctxsw_pre, gd->gd_cpuid, ntd); 923 td->td_switch(ntd); 924 925 KKASSERT(ntd->td_preempted && (td->td_flags & TDF_PREEMPT_DONE)); 926 #ifdef SMP 927 KKASSERT(savecnt == td->td_mpcount); 928 mpheld = MP_LOCK_HELD(); 929 if (mpheld && td->td_mpcount == 0) 930 cpu_rel_mplock(); 931 else if (mpheld == 0 && td->td_mpcount) 932 panic("lwkt_preempt(): MP lock was not held through"); 933 #endif 934 ntd->td_preempted = NULL; 935 td->td_flags &= ~(TDF_PREEMPT_LOCK|TDF_PREEMPT_DONE); 936 } 937 938 /* 939 * Conditionally call splz() if gd_reqflags indicates work is pending. 940 * 941 * td_nest_count prevents deep nesting via splz() or doreti() which 942 * might otherwise blow out the kernel stack. Note that except for 943 * this special case, we MUST call splz() here to handle any 944 * pending ints, particularly after we switch, or we might accidently 945 * halt the cpu with interrupts pending. 946 * 947 * (self contained on a per cpu basis) 948 */ 949 void 950 splz_check(void) 951 { 952 globaldata_t gd = mycpu; 953 thread_t td = gd->gd_curthread; 954 955 if (gd->gd_reqflags && td->td_nest_count < 2) 956 splz(); 957 } 958 959 /* 960 * This implements a normal yield which will yield to equal priority 961 * threads as well as higher priority threads. Note that gd_reqflags 962 * tests will be handled by the crit_exit() call in lwkt_switch(). 963 * 964 * (self contained on a per cpu basis) 965 */ 966 void 967 lwkt_yield(void) 968 { 969 lwkt_schedule_self(curthread); 970 lwkt_switch(); 971 } 972 973 /* 974 * This function is used along with the lwkt_passive_recover() inline 975 * by the trap code to negotiate a passive release of the current 976 * process/lwp designation with the user scheduler. 977 */ 978 void 979 lwkt_passive_release(struct thread *td) 980 { 981 struct lwp *lp = td->td_lwp; 982 983 td->td_release = NULL; 984 lwkt_setpri_self(TDPRI_KERN_USER); 985 lp->lwp_proc->p_usched->release_curproc(lp); 986 } 987 988 /* 989 * Make a kernel thread act as if it were in user mode with regards 990 * to scheduling, to avoid becoming cpu-bound in the kernel. Kernel 991 * loops which may be potentially cpu-bound can call lwkt_user_yield(). 992 * 993 * The lwkt_user_yield() function is designed to have very low overhead 994 * if no yield is determined to be needed. 995 */ 996 void 997 lwkt_user_yield(void) 998 { 999 thread_t td = curthread; 1000 struct lwp *lp = td->td_lwp; 1001 1002 #ifdef SMP 1003 /* 1004 * XXX SEVERE TEMPORARY HACK. A cpu-bound operation running in the 1005 * kernel can prevent other cpus from servicing interrupt threads 1006 * which still require the MP lock (which is a lot of them). This 1007 * has a chaining effect since if the interrupt is blocked, so is 1008 * the event, so normal scheduling will not pick up on the problem. 1009 */ 1010 if (mp_lock_contention_mask && td->td_mpcount) { 1011 yield_mplock(td); 1012 } 1013 #endif 1014 1015 /* 1016 * Another kernel thread wants the cpu 1017 */ 1018 if (lwkt_resched_wanted()) 1019 lwkt_switch(); 1020 1021 /* 1022 * If the user scheduler has asynchronously determined that the current 1023 * process (when running in user mode) needs to lose the cpu then make 1024 * sure we are released. 1025 */ 1026 if (user_resched_wanted()) { 1027 if (td->td_release) 1028 td->td_release(td); 1029 } 1030 1031 /* 1032 * If we are released reduce our priority 1033 */ 1034 if (td->td_release == NULL) { 1035 if (lwkt_check_resched(td) > 0) 1036 lwkt_switch(); 1037 if (lp) { 1038 lp->lwp_proc->p_usched->acquire_curproc(lp); 1039 td->td_release = lwkt_passive_release; 1040 lwkt_setpri_self(TDPRI_USER_NORM); 1041 } 1042 } 1043 } 1044 1045 /* 1046 * Return 0 if no runnable threads are pending at the same or higher 1047 * priority as the passed thread. 1048 * 1049 * Return 1 if runnable threads are pending at the same priority. 1050 * 1051 * Return 2 if runnable threads are pending at a higher priority. 1052 */ 1053 int 1054 lwkt_check_resched(thread_t td) 1055 { 1056 int pri = td->td_pri & TDPRI_MASK; 1057 1058 if (td->td_gd->gd_runqmask > (2 << pri) - 1) 1059 return(2); 1060 if (TAILQ_NEXT(td, td_threadq)) 1061 return(1); 1062 return(0); 1063 } 1064 1065 /* 1066 * Generic schedule. Possibly schedule threads belonging to other cpus and 1067 * deal with threads that might be blocked on a wait queue. 1068 * 1069 * We have a little helper inline function which does additional work after 1070 * the thread has been enqueued, including dealing with preemption and 1071 * setting need_lwkt_resched() (which prevents the kernel from returning 1072 * to userland until it has processed higher priority threads). 1073 * 1074 * It is possible for this routine to be called after a failed _enqueue 1075 * (due to the target thread migrating, sleeping, or otherwise blocked). 1076 * We have to check that the thread is actually on the run queue! 1077 * 1078 * reschedok is an optimized constant propagated from lwkt_schedule() or 1079 * lwkt_schedule_noresched(). By default it is non-zero, causing a 1080 * reschedule to be requested if the target thread has a higher priority. 1081 * The port messaging code will set MSG_NORESCHED and cause reschedok to 1082 * be 0, prevented undesired reschedules. 1083 */ 1084 static __inline 1085 void 1086 _lwkt_schedule_post(globaldata_t gd, thread_t ntd, int cpri, int reschedok) 1087 { 1088 thread_t otd; 1089 1090 if (ntd->td_flags & TDF_RUNQ) { 1091 if (ntd->td_preemptable && reschedok) { 1092 ntd->td_preemptable(ntd, cpri); /* YYY +token */ 1093 } else if (reschedok) { 1094 otd = curthread; 1095 if ((ntd->td_pri & TDPRI_MASK) > (otd->td_pri & TDPRI_MASK)) 1096 need_lwkt_resched(); 1097 } 1098 } 1099 } 1100 1101 static __inline 1102 void 1103 _lwkt_schedule(thread_t td, int reschedok) 1104 { 1105 globaldata_t mygd = mycpu; 1106 1107 KASSERT(td != &td->td_gd->gd_idlethread, ("lwkt_schedule(): scheduling gd_idlethread is illegal!")); 1108 crit_enter_gd(mygd); 1109 KKASSERT(td->td_lwp == NULL || (td->td_lwp->lwp_flag & LWP_ONRUNQ) == 0); 1110 if (td == mygd->gd_curthread) { 1111 _lwkt_enqueue(td); 1112 } else { 1113 /* 1114 * If we own the thread, there is no race (since we are in a 1115 * critical section). If we do not own the thread there might 1116 * be a race but the target cpu will deal with it. 1117 */ 1118 #ifdef SMP 1119 if (td->td_gd == mygd) { 1120 _lwkt_enqueue(td); 1121 _lwkt_schedule_post(mygd, td, TDPRI_CRIT, reschedok); 1122 } else { 1123 lwkt_send_ipiq3(td->td_gd, lwkt_schedule_remote, td, 0); 1124 } 1125 #else 1126 _lwkt_enqueue(td); 1127 _lwkt_schedule_post(mygd, td, TDPRI_CRIT, reschedok); 1128 #endif 1129 } 1130 crit_exit_gd(mygd); 1131 } 1132 1133 void 1134 lwkt_schedule(thread_t td) 1135 { 1136 _lwkt_schedule(td, 1); 1137 } 1138 1139 void 1140 lwkt_schedule_noresched(thread_t td) 1141 { 1142 _lwkt_schedule(td, 0); 1143 } 1144 1145 #ifdef SMP 1146 1147 /* 1148 * When scheduled remotely if frame != NULL the IPIQ is being 1149 * run via doreti or an interrupt then preemption can be allowed. 1150 * 1151 * To allow preemption we have to drop the critical section so only 1152 * one is present in _lwkt_schedule_post. 1153 */ 1154 static void 1155 lwkt_schedule_remote(void *arg, int arg2, struct intrframe *frame) 1156 { 1157 thread_t td = curthread; 1158 thread_t ntd = arg; 1159 1160 if (frame && ntd->td_preemptable) { 1161 crit_exit_noyield(td); 1162 _lwkt_schedule(ntd, 1); 1163 crit_enter_quick(td); 1164 } else { 1165 _lwkt_schedule(ntd, 1); 1166 } 1167 } 1168 1169 /* 1170 * Thread migration using a 'Pull' method. The thread may or may not be 1171 * the current thread. It MUST be descheduled and in a stable state. 1172 * lwkt_giveaway() must be called on the cpu owning the thread. 1173 * 1174 * At any point after lwkt_giveaway() is called, the target cpu may 1175 * 'pull' the thread by calling lwkt_acquire(). 1176 * 1177 * We have to make sure the thread is not sitting on a per-cpu tsleep 1178 * queue or it will blow up when it moves to another cpu. 1179 * 1180 * MPSAFE - must be called under very specific conditions. 1181 */ 1182 void 1183 lwkt_giveaway(thread_t td) 1184 { 1185 globaldata_t gd = mycpu; 1186 1187 crit_enter_gd(gd); 1188 if (td->td_flags & TDF_TSLEEPQ) 1189 tsleep_remove(td); 1190 KKASSERT(td->td_gd == gd); 1191 TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq); 1192 td->td_flags |= TDF_MIGRATING; 1193 crit_exit_gd(gd); 1194 } 1195 1196 void 1197 lwkt_acquire(thread_t td) 1198 { 1199 globaldata_t gd; 1200 globaldata_t mygd; 1201 1202 KKASSERT(td->td_flags & TDF_MIGRATING); 1203 gd = td->td_gd; 1204 mygd = mycpu; 1205 if (gd != mycpu) { 1206 cpu_lfence(); 1207 KKASSERT((td->td_flags & TDF_RUNQ) == 0); 1208 crit_enter_gd(mygd); 1209 while (td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) { 1210 #ifdef SMP 1211 lwkt_process_ipiq(); 1212 #endif 1213 cpu_lfence(); 1214 } 1215 td->td_gd = mygd; 1216 TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq); 1217 td->td_flags &= ~TDF_MIGRATING; 1218 crit_exit_gd(mygd); 1219 } else { 1220 crit_enter_gd(mygd); 1221 TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq); 1222 td->td_flags &= ~TDF_MIGRATING; 1223 crit_exit_gd(mygd); 1224 } 1225 } 1226 1227 #endif 1228 1229 /* 1230 * Generic deschedule. Descheduling threads other then your own should be 1231 * done only in carefully controlled circumstances. Descheduling is 1232 * asynchronous. 1233 * 1234 * This function may block if the cpu has run out of messages. 1235 */ 1236 void 1237 lwkt_deschedule(thread_t td) 1238 { 1239 crit_enter(); 1240 #ifdef SMP 1241 if (td == curthread) { 1242 _lwkt_dequeue(td); 1243 } else { 1244 if (td->td_gd == mycpu) { 1245 _lwkt_dequeue(td); 1246 } else { 1247 lwkt_send_ipiq(td->td_gd, (ipifunc1_t)lwkt_deschedule, td); 1248 } 1249 } 1250 #else 1251 _lwkt_dequeue(td); 1252 #endif 1253 crit_exit(); 1254 } 1255 1256 /* 1257 * Set the target thread's priority. This routine does not automatically 1258 * switch to a higher priority thread, LWKT threads are not designed for 1259 * continuous priority changes. Yield if you want to switch. 1260 * 1261 * We have to retain the critical section count which uses the high bits 1262 * of the td_pri field. The specified priority may also indicate zero or 1263 * more critical sections by adding TDPRI_CRIT*N. 1264 * 1265 * Note that we requeue the thread whether it winds up on a different runq 1266 * or not. uio_yield() depends on this and the routine is not normally 1267 * called with the same priority otherwise. 1268 */ 1269 void 1270 lwkt_setpri(thread_t td, int pri) 1271 { 1272 KKASSERT(pri >= 0); 1273 KKASSERT(td->td_gd == mycpu); 1274 crit_enter(); 1275 if (td->td_flags & TDF_RUNQ) { 1276 _lwkt_dequeue(td); 1277 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1278 _lwkt_enqueue(td); 1279 } else { 1280 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1281 } 1282 crit_exit(); 1283 } 1284 1285 /* 1286 * Set the initial priority for a thread prior to it being scheduled for 1287 * the first time. The thread MUST NOT be scheduled before or during 1288 * this call. The thread may be assigned to a cpu other then the current 1289 * cpu. 1290 * 1291 * Typically used after a thread has been created with TDF_STOPPREQ, 1292 * and before the thread is initially scheduled. 1293 */ 1294 void 1295 lwkt_setpri_initial(thread_t td, int pri) 1296 { 1297 KKASSERT(pri >= 0); 1298 KKASSERT((td->td_flags & TDF_RUNQ) == 0); 1299 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1300 } 1301 1302 void 1303 lwkt_setpri_self(int pri) 1304 { 1305 thread_t td = curthread; 1306 1307 KKASSERT(pri >= 0 && pri <= TDPRI_MAX); 1308 crit_enter(); 1309 if (td->td_flags & TDF_RUNQ) { 1310 _lwkt_dequeue(td); 1311 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1312 _lwkt_enqueue(td); 1313 } else { 1314 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1315 } 1316 crit_exit(); 1317 } 1318 1319 /* 1320 * Migrate the current thread to the specified cpu. 1321 * 1322 * This is accomplished by descheduling ourselves from the current cpu, 1323 * moving our thread to the tdallq of the target cpu, IPI messaging the 1324 * target cpu, and switching out. TDF_MIGRATING prevents scheduling 1325 * races while the thread is being migrated. 1326 * 1327 * We must be sure to remove ourselves from the current cpu's tsleepq 1328 * before potentially moving to another queue. The thread can be on 1329 * a tsleepq due to a left-over tsleep_interlock(). 1330 */ 1331 #ifdef SMP 1332 static void lwkt_setcpu_remote(void *arg); 1333 #endif 1334 1335 void 1336 lwkt_setcpu_self(globaldata_t rgd) 1337 { 1338 #ifdef SMP 1339 thread_t td = curthread; 1340 1341 if (td->td_gd != rgd) { 1342 crit_enter_quick(td); 1343 if (td->td_flags & TDF_TSLEEPQ) 1344 tsleep_remove(td); 1345 td->td_flags |= TDF_MIGRATING; 1346 lwkt_deschedule_self(td); 1347 TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq); 1348 lwkt_send_ipiq(rgd, (ipifunc1_t)lwkt_setcpu_remote, td); 1349 lwkt_switch(); 1350 /* we are now on the target cpu */ 1351 TAILQ_INSERT_TAIL(&rgd->gd_tdallq, td, td_allq); 1352 crit_exit_quick(td); 1353 } 1354 #endif 1355 } 1356 1357 void 1358 lwkt_migratecpu(int cpuid) 1359 { 1360 #ifdef SMP 1361 globaldata_t rgd; 1362 1363 rgd = globaldata_find(cpuid); 1364 lwkt_setcpu_self(rgd); 1365 #endif 1366 } 1367 1368 /* 1369 * Remote IPI for cpu migration (called while in a critical section so we 1370 * do not have to enter another one). The thread has already been moved to 1371 * our cpu's allq, but we must wait for the thread to be completely switched 1372 * out on the originating cpu before we schedule it on ours or the stack 1373 * state may be corrupt. We clear TDF_MIGRATING after flushing the GD 1374 * change to main memory. 1375 * 1376 * XXX The use of TDF_MIGRATING might not be sufficient to avoid races 1377 * against wakeups. It is best if this interface is used only when there 1378 * are no pending events that might try to schedule the thread. 1379 */ 1380 #ifdef SMP 1381 static void 1382 lwkt_setcpu_remote(void *arg) 1383 { 1384 thread_t td = arg; 1385 globaldata_t gd = mycpu; 1386 1387 while (td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) { 1388 #ifdef SMP 1389 lwkt_process_ipiq(); 1390 #endif 1391 cpu_lfence(); 1392 } 1393 td->td_gd = gd; 1394 cpu_sfence(); 1395 td->td_flags &= ~TDF_MIGRATING; 1396 KKASSERT(td->td_lwp == NULL || (td->td_lwp->lwp_flag & LWP_ONRUNQ) == 0); 1397 _lwkt_enqueue(td); 1398 } 1399 #endif 1400 1401 struct lwp * 1402 lwkt_preempted_proc(void) 1403 { 1404 thread_t td = curthread; 1405 while (td->td_preempted) 1406 td = td->td_preempted; 1407 return(td->td_lwp); 1408 } 1409 1410 /* 1411 * Create a kernel process/thread/whatever. It shares it's address space 1412 * with proc0 - ie: kernel only. 1413 * 1414 * NOTE! By default new threads are created with the MP lock held. A 1415 * thread which does not require the MP lock should release it by calling 1416 * rel_mplock() at the start of the new thread. 1417 */ 1418 int 1419 lwkt_create(void (*func)(void *), void *arg, 1420 struct thread **tdp, thread_t template, int tdflags, int cpu, 1421 const char *fmt, ...) 1422 { 1423 thread_t td; 1424 __va_list ap; 1425 1426 td = lwkt_alloc_thread(template, LWKT_THREAD_STACK, cpu, 1427 tdflags); 1428 if (tdp) 1429 *tdp = td; 1430 cpu_set_thread_handler(td, lwkt_exit, func, arg); 1431 1432 /* 1433 * Set up arg0 for 'ps' etc 1434 */ 1435 __va_start(ap, fmt); 1436 kvsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap); 1437 __va_end(ap); 1438 1439 /* 1440 * Schedule the thread to run 1441 */ 1442 if ((td->td_flags & TDF_STOPREQ) == 0) 1443 lwkt_schedule(td); 1444 else 1445 td->td_flags &= ~TDF_STOPREQ; 1446 return 0; 1447 } 1448 1449 /* 1450 * Destroy an LWKT thread. Warning! This function is not called when 1451 * a process exits, cpu_proc_exit() directly calls cpu_thread_exit() and 1452 * uses a different reaping mechanism. 1453 */ 1454 void 1455 lwkt_exit(void) 1456 { 1457 thread_t td = curthread; 1458 thread_t std; 1459 globaldata_t gd; 1460 1461 if (td->td_flags & TDF_VERBOSE) 1462 kprintf("kthread %p %s has exited\n", td, td->td_comm); 1463 caps_exit(td); 1464 1465 /* 1466 * Get us into a critical section to interlock gd_freetd and loop 1467 * until we can get it freed. 1468 * 1469 * We have to cache the current td in gd_freetd because objcache_put()ing 1470 * it would rip it out from under us while our thread is still active. 1471 */ 1472 gd = mycpu; 1473 crit_enter_quick(td); 1474 while ((std = gd->gd_freetd) != NULL) { 1475 gd->gd_freetd = NULL; 1476 objcache_put(thread_cache, std); 1477 } 1478 1479 /* 1480 * Remove thread resources from kernel lists and deschedule us for 1481 * the last time. 1482 */ 1483 if (td->td_flags & TDF_TSLEEPQ) 1484 tsleep_remove(td); 1485 biosched_done(td); 1486 dsched_exit_thread(td); 1487 lwkt_deschedule_self(td); 1488 lwkt_remove_tdallq(td); 1489 if (td->td_flags & TDF_ALLOCATED_THREAD) 1490 gd->gd_freetd = td; 1491 cpu_thread_exit(); 1492 } 1493 1494 void 1495 lwkt_remove_tdallq(thread_t td) 1496 { 1497 KKASSERT(td->td_gd == mycpu); 1498 TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq); 1499 } 1500 1501 void 1502 crit_panic(void) 1503 { 1504 thread_t td = curthread; 1505 int lpri = td->td_pri; 1506 1507 td->td_pri = 0; 1508 panic("td_pri is/would-go negative! %p %d", td, lpri); 1509 } 1510 1511 #ifdef SMP 1512 1513 /* 1514 * Called from debugger/panic on cpus which have been stopped. We must still 1515 * process the IPIQ while stopped, even if we were stopped while in a critical 1516 * section (XXX). 1517 * 1518 * If we are dumping also try to process any pending interrupts. This may 1519 * or may not work depending on the state of the cpu at the point it was 1520 * stopped. 1521 */ 1522 void 1523 lwkt_smp_stopped(void) 1524 { 1525 globaldata_t gd = mycpu; 1526 1527 crit_enter_gd(gd); 1528 if (dumping) { 1529 lwkt_process_ipiq(); 1530 splz(); 1531 } else { 1532 lwkt_process_ipiq(); 1533 } 1534 crit_exit_gd(gd); 1535 } 1536 1537 #endif 1538