1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/kern/lwkt_thread.c,v 1.68 2004/07/29 09:02:33 dillon Exp $ 35 */ 36 37 /* 38 * Each cpu in a system has its own self-contained light weight kernel 39 * thread scheduler, which means that generally speaking we only need 40 * to use a critical section to avoid problems. Foreign thread 41 * scheduling is queued via (async) IPIs. 42 */ 43 44 #ifdef _KERNEL 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/kernel.h> 49 #include <sys/proc.h> 50 #include <sys/rtprio.h> 51 #include <sys/queue.h> 52 #include <sys/thread2.h> 53 #include <sys/sysctl.h> 54 #include <sys/kthread.h> 55 #include <machine/cpu.h> 56 #include <sys/lock.h> 57 #include <sys/caps.h> 58 59 #include <vm/vm.h> 60 #include <vm/vm_param.h> 61 #include <vm/vm_kern.h> 62 #include <vm/vm_object.h> 63 #include <vm/vm_page.h> 64 #include <vm/vm_map.h> 65 #include <vm/vm_pager.h> 66 #include <vm/vm_extern.h> 67 #include <vm/vm_zone.h> 68 69 #include <machine/stdarg.h> 70 #include <machine/ipl.h> 71 #include <machine/smp.h> 72 73 #else 74 75 #include <sys/stdint.h> 76 #include <libcaps/thread.h> 77 #include <sys/thread.h> 78 #include <sys/msgport.h> 79 #include <sys/errno.h> 80 #include <libcaps/globaldata.h> 81 #include <machine/cpufunc.h> 82 #include <sys/thread2.h> 83 #include <sys/msgport2.h> 84 #include <stdio.h> 85 #include <stdlib.h> 86 #include <string.h> 87 #include <machine/lock.h> 88 89 #endif 90 91 static int untimely_switch = 0; 92 #ifdef INVARIANTS 93 static int panic_on_cscount = 0; 94 #endif 95 static __int64_t switch_count = 0; 96 static __int64_t preempt_hit = 0; 97 static __int64_t preempt_miss = 0; 98 static __int64_t preempt_weird = 0; 99 100 #ifdef _KERNEL 101 102 SYSCTL_INT(_lwkt, OID_AUTO, untimely_switch, CTLFLAG_RW, &untimely_switch, 0, ""); 103 #ifdef INVARIANTS 104 SYSCTL_INT(_lwkt, OID_AUTO, panic_on_cscount, CTLFLAG_RW, &panic_on_cscount, 0, ""); 105 #endif 106 SYSCTL_QUAD(_lwkt, OID_AUTO, switch_count, CTLFLAG_RW, &switch_count, 0, ""); 107 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_hit, CTLFLAG_RW, &preempt_hit, 0, ""); 108 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_miss, CTLFLAG_RW, &preempt_miss, 0, ""); 109 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_weird, CTLFLAG_RW, &preempt_weird, 0, ""); 110 111 #endif 112 113 /* 114 * These helper procedures handle the runq, they can only be called from 115 * within a critical section. 116 * 117 * WARNING! Prior to SMP being brought up it is possible to enqueue and 118 * dequeue threads belonging to other cpus, so be sure to use td->td_gd 119 * instead of 'mycpu' when referencing the globaldata structure. Once 120 * SMP live enqueuing and dequeueing only occurs on the current cpu. 121 */ 122 static __inline 123 void 124 _lwkt_dequeue(thread_t td) 125 { 126 if (td->td_flags & TDF_RUNQ) { 127 int nq = td->td_pri & TDPRI_MASK; 128 struct globaldata *gd = td->td_gd; 129 130 td->td_flags &= ~TDF_RUNQ; 131 TAILQ_REMOVE(&gd->gd_tdrunq[nq], td, td_threadq); 132 /* runqmask is passively cleaned up by the switcher */ 133 } 134 } 135 136 static __inline 137 void 138 _lwkt_enqueue(thread_t td) 139 { 140 if ((td->td_flags & (TDF_RUNQ|TDF_MIGRATING)) == 0) { 141 int nq = td->td_pri & TDPRI_MASK; 142 struct globaldata *gd = td->td_gd; 143 144 td->td_flags |= TDF_RUNQ; 145 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], td, td_threadq); 146 gd->gd_runqmask |= 1 << nq; 147 } 148 } 149 150 /* 151 * Schedule a thread to run. As the current thread we can always safely 152 * schedule ourselves, and a shortcut procedure is provided for that 153 * function. 154 * 155 * (non-blocking, self contained on a per cpu basis) 156 */ 157 void 158 lwkt_schedule_self(thread_t td) 159 { 160 crit_enter_quick(td); 161 KASSERT(td->td_wait == NULL, ("lwkt_schedule_self(): td_wait not NULL!")); 162 KASSERT(td != &td->td_gd->gd_idlethread, ("lwkt_schedule_self(): scheduling gd_idlethread is illegal!")); 163 _lwkt_enqueue(td); 164 #ifdef _KERNEL 165 if (td->td_proc && td->td_proc->p_stat == SSLEEP) 166 panic("SCHED SELF PANIC"); 167 #endif 168 crit_exit_quick(td); 169 } 170 171 /* 172 * Deschedule a thread. 173 * 174 * (non-blocking, self contained on a per cpu basis) 175 */ 176 void 177 lwkt_deschedule_self(thread_t td) 178 { 179 crit_enter_quick(td); 180 KASSERT(td->td_wait == NULL, ("lwkt_schedule_self(): td_wait not NULL!")); 181 _lwkt_dequeue(td); 182 crit_exit_quick(td); 183 } 184 185 #ifdef _KERNEL 186 187 /* 188 * LWKTs operate on a per-cpu basis 189 * 190 * WARNING! Called from early boot, 'mycpu' may not work yet. 191 */ 192 void 193 lwkt_gdinit(struct globaldata *gd) 194 { 195 int i; 196 197 for (i = 0; i < sizeof(gd->gd_tdrunq)/sizeof(gd->gd_tdrunq[0]); ++i) 198 TAILQ_INIT(&gd->gd_tdrunq[i]); 199 gd->gd_runqmask = 0; 200 TAILQ_INIT(&gd->gd_tdallq); 201 } 202 203 #endif /* _KERNEL */ 204 205 /* 206 * Initialize a thread wait structure prior to first use. 207 * 208 * NOTE! called from low level boot code, we cannot do anything fancy! 209 */ 210 void 211 lwkt_wait_init(lwkt_wait_t w) 212 { 213 lwkt_token_init(&w->wa_token); 214 TAILQ_INIT(&w->wa_waitq); 215 w->wa_gen = 0; 216 w->wa_count = 0; 217 } 218 219 /* 220 * Create a new thread. The thread must be associated with a process context 221 * or LWKT start address before it can be scheduled. If the target cpu is 222 * -1 the thread will be created on the current cpu. 223 * 224 * If you intend to create a thread without a process context this function 225 * does everything except load the startup and switcher function. 226 */ 227 thread_t 228 lwkt_alloc_thread(struct thread *td, int stksize, int cpu) 229 { 230 void *stack; 231 int flags = 0; 232 globaldata_t gd = mycpu; 233 234 if (td == NULL) { 235 crit_enter_gd(gd); 236 if (gd->gd_tdfreecount > 0) { 237 --gd->gd_tdfreecount; 238 td = TAILQ_FIRST(&gd->gd_tdfreeq); 239 KASSERT(td != NULL && (td->td_flags & TDF_RUNNING) == 0, 240 ("lwkt_alloc_thread: unexpected NULL or corrupted td")); 241 TAILQ_REMOVE(&gd->gd_tdfreeq, td, td_threadq); 242 crit_exit_gd(gd); 243 flags = td->td_flags & (TDF_ALLOCATED_STACK|TDF_ALLOCATED_THREAD); 244 } else { 245 crit_exit_gd(gd); 246 #ifdef _KERNEL 247 td = zalloc(thread_zone); 248 #else 249 td = malloc(sizeof(struct thread)); 250 #endif 251 td->td_kstack = NULL; 252 td->td_kstack_size = 0; 253 flags |= TDF_ALLOCATED_THREAD; 254 } 255 } 256 if ((stack = td->td_kstack) != NULL && td->td_kstack_size != stksize) { 257 if (flags & TDF_ALLOCATED_STACK) { 258 kmem_free(kernel_map, (vm_offset_t)stack, td->td_kstack_size); 259 stack = NULL; 260 } 261 } 262 if (stack == NULL) { 263 #ifdef _KERNEL 264 stack = (void *)kmem_alloc(kernel_map, stksize); 265 #else 266 stack = libcaps_alloc_stack(stksize); 267 #endif 268 flags |= TDF_ALLOCATED_STACK; 269 } 270 if (cpu < 0) 271 lwkt_init_thread(td, stack, stksize, flags, mycpu); 272 else 273 lwkt_init_thread(td, stack, stksize, flags, globaldata_find(cpu)); 274 return(td); 275 } 276 277 #ifdef _KERNEL 278 279 /* 280 * Initialize a preexisting thread structure. This function is used by 281 * lwkt_alloc_thread() and also used to initialize the per-cpu idlethread. 282 * 283 * All threads start out in a critical section at a priority of 284 * TDPRI_KERN_DAEMON. Higher level code will modify the priority as 285 * appropriate. This function may send an IPI message when the 286 * requested cpu is not the current cpu and consequently gd_tdallq may 287 * not be initialized synchronously from the point of view of the originating 288 * cpu. 289 * 290 * NOTE! we have to be careful in regards to creating threads for other cpus 291 * if SMP has not yet been activated. 292 */ 293 #ifdef SMP 294 295 static void 296 lwkt_init_thread_remote(void *arg) 297 { 298 thread_t td = arg; 299 300 TAILQ_INSERT_TAIL(&td->td_gd->gd_tdallq, td, td_allq); 301 } 302 303 #endif 304 305 void 306 lwkt_init_thread(thread_t td, void *stack, int stksize, int flags, 307 struct globaldata *gd) 308 { 309 globaldata_t mygd = mycpu; 310 311 bzero(td, sizeof(struct thread)); 312 td->td_kstack = stack; 313 td->td_kstack_size = stksize; 314 td->td_flags |= flags; 315 td->td_gd = gd; 316 td->td_pri = TDPRI_KERN_DAEMON + TDPRI_CRIT; 317 lwkt_initport(&td->td_msgport, td); 318 pmap_init_thread(td); 319 #ifdef SMP 320 /* 321 * Normally initializing a thread for a remote cpu requires sending an 322 * IPI. However, the idlethread is setup before the other cpus are 323 * activated so we have to treat it as a special case. XXX manipulation 324 * of gd_tdallq requires the BGL. 325 */ 326 if (gd == mygd || td == &gd->gd_idlethread) { 327 crit_enter_gd(mygd); 328 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq); 329 crit_exit_gd(mygd); 330 } else { 331 lwkt_send_ipiq(gd, lwkt_init_thread_remote, td); 332 } 333 #else 334 crit_enter_gd(mygd); 335 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq); 336 crit_exit_gd(mygd); 337 #endif 338 } 339 340 #endif /* _KERNEL */ 341 342 void 343 lwkt_set_comm(thread_t td, const char *ctl, ...) 344 { 345 __va_list va; 346 347 __va_start(va, ctl); 348 vsnprintf(td->td_comm, sizeof(td->td_comm), ctl, va); 349 __va_end(va); 350 } 351 352 void 353 lwkt_hold(thread_t td) 354 { 355 ++td->td_refs; 356 } 357 358 void 359 lwkt_rele(thread_t td) 360 { 361 KKASSERT(td->td_refs > 0); 362 --td->td_refs; 363 } 364 365 #ifdef _KERNEL 366 367 void 368 lwkt_wait_free(thread_t td) 369 { 370 while (td->td_refs) 371 tsleep(td, 0, "tdreap", hz); 372 } 373 374 #endif 375 376 void 377 lwkt_free_thread(thread_t td) 378 { 379 struct globaldata *gd = mycpu; 380 381 KASSERT((td->td_flags & TDF_RUNNING) == 0, 382 ("lwkt_free_thread: did not exit! %p", td)); 383 384 crit_enter_gd(gd); 385 TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq); 386 if (gd->gd_tdfreecount < CACHE_NTHREADS && 387 (td->td_flags & TDF_ALLOCATED_THREAD) 388 ) { 389 ++gd->gd_tdfreecount; 390 TAILQ_INSERT_HEAD(&gd->gd_tdfreeq, td, td_threadq); 391 crit_exit_gd(gd); 392 } else { 393 crit_exit_gd(gd); 394 if (td->td_kstack && (td->td_flags & TDF_ALLOCATED_STACK)) { 395 #ifdef _KERNEL 396 kmem_free(kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size); 397 #else 398 libcaps_free_stack(td->td_kstack, td->td_kstack_size); 399 #endif 400 /* gd invalid */ 401 td->td_kstack = NULL; 402 td->td_kstack_size = 0; 403 } 404 if (td->td_flags & TDF_ALLOCATED_THREAD) { 405 #ifdef _KERNEL 406 zfree(thread_zone, td); 407 #else 408 free(td); 409 #endif 410 } 411 } 412 } 413 414 415 /* 416 * Switch to the next runnable lwkt. If no LWKTs are runnable then 417 * switch to the idlethread. Switching must occur within a critical 418 * section to avoid races with the scheduling queue. 419 * 420 * We always have full control over our cpu's run queue. Other cpus 421 * that wish to manipulate our queue must use the cpu_*msg() calls to 422 * talk to our cpu, so a critical section is all that is needed and 423 * the result is very, very fast thread switching. 424 * 425 * The LWKT scheduler uses a fixed priority model and round-robins at 426 * each priority level. User process scheduling is a totally 427 * different beast and LWKT priorities should not be confused with 428 * user process priorities. 429 * 430 * The MP lock may be out of sync with the thread's td_mpcount. lwkt_switch() 431 * cleans it up. Note that the td_switch() function cannot do anything that 432 * requires the MP lock since the MP lock will have already been setup for 433 * the target thread (not the current thread). It's nice to have a scheduler 434 * that does not need the MP lock to work because it allows us to do some 435 * really cool high-performance MP lock optimizations. 436 */ 437 438 void 439 lwkt_switch(void) 440 { 441 globaldata_t gd = mycpu; 442 thread_t td = gd->gd_curthread; 443 thread_t ntd; 444 #ifdef SMP 445 int mpheld; 446 #endif 447 448 /* 449 * Switching from within a 'fast' (non thread switched) interrupt is 450 * illegal. 451 */ 452 if (gd->gd_intr_nesting_level && panicstr == NULL) { 453 panic("lwkt_switch: cannot switch from within a fast interrupt, yet"); 454 } 455 456 /* 457 * Passive release (used to transition from user to kernel mode 458 * when we block or switch rather then when we enter the kernel). 459 * This function is NOT called if we are switching into a preemption 460 * or returning from a preemption. Typically this causes us to lose 461 * our current process designation (if we have one) and become a true 462 * LWKT thread, and may also hand the current process designation to 463 * another process and schedule thread. 464 */ 465 if (td->td_release) 466 td->td_release(td); 467 468 crit_enter_gd(gd); 469 ++switch_count; 470 471 #ifdef SMP 472 /* 473 * td_mpcount cannot be used to determine if we currently hold the 474 * MP lock because get_mplock() will increment it prior to attempting 475 * to get the lock, and switch out if it can't. Our ownership of 476 * the actual lock will remain stable while we are in a critical section 477 * (but, of course, another cpu may own or release the lock so the 478 * actual value of mp_lock is not stable). 479 */ 480 mpheld = MP_LOCK_HELD(); 481 #ifdef INVARIANTS 482 if (td->td_cscount) { 483 printf("Diagnostic: attempt to switch while mastering cpusync: %p\n", 484 td); 485 if (panic_on_cscount) 486 panic("switching while mastering cpusync"); 487 } 488 #endif 489 #endif 490 if ((ntd = td->td_preempted) != NULL) { 491 /* 492 * We had preempted another thread on this cpu, resume the preempted 493 * thread. This occurs transparently, whether the preempted thread 494 * was scheduled or not (it may have been preempted after descheduling 495 * itself). 496 * 497 * We have to setup the MP lock for the original thread after backing 498 * out the adjustment that was made to curthread when the original 499 * was preempted. 500 */ 501 KKASSERT(ntd->td_flags & TDF_PREEMPT_LOCK); 502 #ifdef SMP 503 if (ntd->td_mpcount && mpheld == 0) { 504 panic("MPLOCK NOT HELD ON RETURN: %p %p %d %d", 505 td, ntd, td->td_mpcount, ntd->td_mpcount); 506 } 507 if (ntd->td_mpcount) { 508 td->td_mpcount -= ntd->td_mpcount; 509 KKASSERT(td->td_mpcount >= 0); 510 } 511 #endif 512 ntd->td_flags |= TDF_PREEMPT_DONE; 513 514 /* 515 * XXX. The interrupt may have woken a thread up, we need to properly 516 * set the reschedule flag if the originally interrupted thread is at 517 * a lower priority. 518 */ 519 if (gd->gd_runqmask > (2 << (ntd->td_pri & TDPRI_MASK)) - 1) 520 need_lwkt_resched(); 521 /* YYY release mp lock on switchback if original doesn't need it */ 522 } else { 523 /* 524 * Priority queue / round-robin at each priority. Note that user 525 * processes run at a fixed, low priority and the user process 526 * scheduler deals with interactions between user processes 527 * by scheduling and descheduling them from the LWKT queue as 528 * necessary. 529 * 530 * We have to adjust the MP lock for the target thread. If we 531 * need the MP lock and cannot obtain it we try to locate a 532 * thread that does not need the MP lock. If we cannot, we spin 533 * instead of HLT. 534 * 535 * A similar issue exists for the tokens held by the target thread. 536 * If we cannot obtain ownership of the tokens we cannot immediately 537 * schedule the thread. 538 */ 539 540 /* 541 * We are switching threads. If there are any pending requests for 542 * tokens we can satisfy all of them here. 543 */ 544 #ifdef SMP 545 if (gd->gd_tokreqbase) 546 lwkt_drain_token_requests(); 547 #endif 548 549 /* 550 * If an LWKT reschedule was requested, well that is what we are 551 * doing now so clear it. 552 */ 553 clear_lwkt_resched(); 554 again: 555 if (gd->gd_runqmask) { 556 int nq = bsrl(gd->gd_runqmask); 557 if ((ntd = TAILQ_FIRST(&gd->gd_tdrunq[nq])) == NULL) { 558 gd->gd_runqmask &= ~(1 << nq); 559 goto again; 560 } 561 #ifdef SMP 562 /* 563 * If the target needs the MP lock and we couldn't get it, 564 * or if the target is holding tokens and we could not 565 * gain ownership of the tokens, continue looking for a 566 * thread to schedule and spin instead of HLT if we can't. 567 */ 568 if ((ntd->td_mpcount && mpheld == 0 && !cpu_try_mplock()) || 569 (ntd->td_toks && lwkt_chktokens(ntd) == 0) 570 ) { 571 u_int32_t rqmask = gd->gd_runqmask; 572 while (rqmask) { 573 TAILQ_FOREACH(ntd, &gd->gd_tdrunq[nq], td_threadq) { 574 if (ntd->td_mpcount && !mpheld && !cpu_try_mplock()) 575 continue; 576 mpheld = MP_LOCK_HELD(); 577 if (ntd->td_toks && !lwkt_chktokens(ntd)) 578 continue; 579 break; 580 } 581 if (ntd) 582 break; 583 rqmask &= ~(1 << nq); 584 nq = bsrl(rqmask); 585 } 586 if (ntd == NULL) { 587 ntd = &gd->gd_idlethread; 588 ntd->td_flags |= TDF_IDLE_NOHLT; 589 } else { 590 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); 591 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); 592 } 593 } else { 594 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); 595 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); 596 } 597 #else 598 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); 599 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); 600 #endif 601 } else { 602 /* 603 * We have nothing to run but only let the idle loop halt 604 * the cpu if there are no pending interrupts. 605 */ 606 ntd = &gd->gd_idlethread; 607 if (gd->gd_reqflags & RQF_IDLECHECK_MASK) 608 ntd->td_flags |= TDF_IDLE_NOHLT; 609 } 610 } 611 KASSERT(ntd->td_pri >= TDPRI_CRIT, 612 ("priority problem in lwkt_switch %d %d", td->td_pri, ntd->td_pri)); 613 614 /* 615 * Do the actual switch. If the new target does not need the MP lock 616 * and we are holding it, release the MP lock. If the new target requires 617 * the MP lock we have already acquired it for the target. 618 */ 619 #ifdef SMP 620 if (ntd->td_mpcount == 0 ) { 621 if (MP_LOCK_HELD()) 622 cpu_rel_mplock(); 623 } else { 624 ASSERT_MP_LOCK_HELD(); 625 } 626 #endif 627 if (td != ntd) 628 td->td_switch(ntd); 629 /* NOTE: current cpu may have changed after switch */ 630 crit_exit_quick(td); 631 } 632 633 /* 634 * Request that the target thread preempt the current thread. Preemption 635 * only works under a specific set of conditions: 636 * 637 * - We are not preempting ourselves 638 * - The target thread is owned by the current cpu 639 * - We are not currently being preempted 640 * - The target is not currently being preempted 641 * - We are able to satisfy the target's MP lock requirements (if any). 642 * 643 * THE CALLER OF LWKT_PREEMPT() MUST BE IN A CRITICAL SECTION. Typically 644 * this is called via lwkt_schedule() through the td_preemptable callback. 645 * critpri is the managed critical priority that we should ignore in order 646 * to determine whether preemption is possible (aka usually just the crit 647 * priority of lwkt_schedule() itself). 648 * 649 * XXX at the moment we run the target thread in a critical section during 650 * the preemption in order to prevent the target from taking interrupts 651 * that *WE* can't. Preemption is strictly limited to interrupt threads 652 * and interrupt-like threads, outside of a critical section, and the 653 * preempted source thread will be resumed the instant the target blocks 654 * whether or not the source is scheduled (i.e. preemption is supposed to 655 * be as transparent as possible). 656 * 657 * The target thread inherits our MP count (added to its own) for the 658 * duration of the preemption in order to preserve the atomicy of the 659 * MP lock during the preemption. Therefore, any preempting targets must be 660 * careful in regards to MP assertions. Note that the MP count may be 661 * out of sync with the physical mp_lock, but we do not have to preserve 662 * the original ownership of the lock if it was out of synch (that is, we 663 * can leave it synchronized on return). 664 */ 665 void 666 lwkt_preempt(thread_t ntd, int critpri) 667 { 668 struct globaldata *gd = mycpu; 669 thread_t td; 670 #ifdef SMP 671 int mpheld; 672 int savecnt; 673 #endif 674 675 /* 676 * The caller has put us in a critical section. We can only preempt 677 * if the caller of the caller was not in a critical section (basically 678 * a local interrupt), as determined by the 'critpri' parameter. 679 * 680 * YYY The target thread must be in a critical section (else it must 681 * inherit our critical section? I dunno yet). 682 * 683 * Any tokens held by the target may not be held by thread(s) being 684 * preempted. We take the easy way out and do not preempt if 685 * the target is holding tokens. 686 * 687 * Set need_lwkt_resched() unconditionally for now YYY. 688 */ 689 KASSERT(ntd->td_pri >= TDPRI_CRIT, ("BADCRIT0 %d", ntd->td_pri)); 690 691 td = gd->gd_curthread; 692 if ((ntd->td_pri & TDPRI_MASK) <= (td->td_pri & TDPRI_MASK)) { 693 ++preempt_miss; 694 return; 695 } 696 if ((td->td_pri & ~TDPRI_MASK) > critpri) { 697 ++preempt_miss; 698 need_lwkt_resched(); 699 return; 700 } 701 #ifdef SMP 702 if (ntd->td_gd != gd) { 703 ++preempt_miss; 704 need_lwkt_resched(); 705 return; 706 } 707 #endif 708 /* 709 * Take the easy way out and do not preempt if the target is holding 710 * one or more tokens. We could test whether the thread(s) being 711 * preempted interlock against the target thread's tokens and whether 712 * we can get all the target thread's tokens, but this situation 713 * should not occur very often so its easier to simply not preempt. 714 */ 715 if (ntd->td_toks != NULL) { 716 ++preempt_miss; 717 need_lwkt_resched(); 718 return; 719 } 720 if (td == ntd || ((td->td_flags | ntd->td_flags) & TDF_PREEMPT_LOCK)) { 721 ++preempt_weird; 722 need_lwkt_resched(); 723 return; 724 } 725 if (ntd->td_preempted) { 726 ++preempt_hit; 727 need_lwkt_resched(); 728 return; 729 } 730 #ifdef SMP 731 /* 732 * note: an interrupt might have occured just as we were transitioning 733 * to or from the MP lock. In this case td_mpcount will be pre-disposed 734 * (non-zero) but not actually synchronized with the actual state of the 735 * lock. We can use it to imply an MP lock requirement for the 736 * preemption but we cannot use it to test whether we hold the MP lock 737 * or not. 738 */ 739 savecnt = td->td_mpcount; 740 mpheld = MP_LOCK_HELD(); 741 ntd->td_mpcount += td->td_mpcount; 742 if (mpheld == 0 && ntd->td_mpcount && !cpu_try_mplock()) { 743 ntd->td_mpcount -= td->td_mpcount; 744 ++preempt_miss; 745 need_lwkt_resched(); 746 return; 747 } 748 #endif 749 750 /* 751 * Since we are able to preempt the current thread, there is no need to 752 * call need_lwkt_resched(). 753 */ 754 ++preempt_hit; 755 ntd->td_preempted = td; 756 td->td_flags |= TDF_PREEMPT_LOCK; 757 td->td_switch(ntd); 758 KKASSERT(ntd->td_preempted && (td->td_flags & TDF_PREEMPT_DONE)); 759 #ifdef SMP 760 KKASSERT(savecnt == td->td_mpcount); 761 mpheld = MP_LOCK_HELD(); 762 if (mpheld && td->td_mpcount == 0) 763 cpu_rel_mplock(); 764 else if (mpheld == 0 && td->td_mpcount) 765 panic("lwkt_preempt(): MP lock was not held through"); 766 #endif 767 ntd->td_preempted = NULL; 768 td->td_flags &= ~(TDF_PREEMPT_LOCK|TDF_PREEMPT_DONE); 769 } 770 771 /* 772 * Yield our thread while higher priority threads are pending. This is 773 * typically called when we leave a critical section but it can be safely 774 * called while we are in a critical section. 775 * 776 * This function will not generally yield to equal priority threads but it 777 * can occur as a side effect. Note that lwkt_switch() is called from 778 * inside the critical section to prevent its own crit_exit() from reentering 779 * lwkt_yield_quick(). 780 * 781 * gd_reqflags indicates that *something* changed, e.g. an interrupt or softint 782 * came along but was blocked and made pending. 783 * 784 * (self contained on a per cpu basis) 785 */ 786 void 787 lwkt_yield_quick(void) 788 { 789 globaldata_t gd = mycpu; 790 thread_t td = gd->gd_curthread; 791 792 /* 793 * gd_reqflags is cleared in splz if the cpl is 0. If we were to clear 794 * it with a non-zero cpl then we might not wind up calling splz after 795 * a task switch when the critical section is exited even though the 796 * new task could accept the interrupt. 797 * 798 * XXX from crit_exit() only called after last crit section is released. 799 * If called directly will run splz() even if in a critical section. 800 * 801 * td_nest_count prevent deep nesting via splz() or doreti(). Note that 802 * except for this special case, we MUST call splz() here to handle any 803 * pending ints, particularly after we switch, or we might accidently 804 * halt the cpu with interrupts pending. 805 */ 806 if (gd->gd_reqflags && td->td_nest_count < 2) 807 splz(); 808 809 /* 810 * YYY enabling will cause wakeup() to task-switch, which really 811 * confused the old 4.x code. This is a good way to simulate 812 * preemption and MP without actually doing preemption or MP, because a 813 * lot of code assumes that wakeup() does not block. 814 */ 815 if (untimely_switch && td->td_nest_count == 0 && 816 gd->gd_intr_nesting_level == 0 817 ) { 818 crit_enter_quick(td); 819 /* 820 * YYY temporary hacks until we disassociate the userland scheduler 821 * from the LWKT scheduler. 822 */ 823 if (td->td_flags & TDF_RUNQ) { 824 lwkt_switch(); /* will not reenter yield function */ 825 } else { 826 lwkt_schedule_self(td); /* make sure we are scheduled */ 827 lwkt_switch(); /* will not reenter yield function */ 828 lwkt_deschedule_self(td); /* make sure we are descheduled */ 829 } 830 crit_exit_noyield(td); 831 } 832 } 833 834 /* 835 * This implements a normal yield which, unlike _quick, will yield to equal 836 * priority threads as well. Note that gd_reqflags tests will be handled by 837 * the crit_exit() call in lwkt_switch(). 838 * 839 * (self contained on a per cpu basis) 840 */ 841 void 842 lwkt_yield(void) 843 { 844 lwkt_schedule_self(curthread); 845 lwkt_switch(); 846 } 847 848 /* 849 * Generic schedule. Possibly schedule threads belonging to other cpus and 850 * deal with threads that might be blocked on a wait queue. 851 * 852 * We have a little helper inline function which does additional work after 853 * the thread has been enqueued, including dealing with preemption and 854 * setting need_lwkt_resched() (which prevents the kernel from returning 855 * to userland until it has processed higher priority threads). 856 */ 857 static __inline 858 void 859 _lwkt_schedule_post(globaldata_t gd, thread_t ntd, int cpri) 860 { 861 if (ntd->td_preemptable) { 862 ntd->td_preemptable(ntd, cpri); /* YYY +token */ 863 } else if ((ntd->td_flags & TDF_NORESCHED) == 0 && 864 (ntd->td_pri & TDPRI_MASK) > (gd->gd_curthread->td_pri & TDPRI_MASK) 865 ) { 866 need_lwkt_resched(); 867 } 868 } 869 870 void 871 lwkt_schedule(thread_t td) 872 { 873 globaldata_t mygd = mycpu; 874 875 #ifdef INVARIANTS 876 KASSERT(td != &td->td_gd->gd_idlethread, ("lwkt_schedule(): scheduling gd_idlethread is illegal!")); 877 if ((td->td_flags & TDF_PREEMPT_LOCK) == 0 && td->td_proc 878 && td->td_proc->p_stat == SSLEEP 879 ) { 880 printf("PANIC schedule curtd = %p (%d %d) target %p (%d %d)\n", 881 curthread, 882 curthread->td_proc ? curthread->td_proc->p_pid : -1, 883 curthread->td_proc ? curthread->td_proc->p_stat : -1, 884 td, 885 td->td_proc ? curthread->td_proc->p_pid : -1, 886 td->td_proc ? curthread->td_proc->p_stat : -1 887 ); 888 panic("SCHED PANIC"); 889 } 890 #endif 891 crit_enter_gd(mygd); 892 if (td == mygd->gd_curthread) { 893 _lwkt_enqueue(td); 894 } else { 895 lwkt_wait_t w; 896 897 /* 898 * If the thread is on a wait list we have to send our scheduling 899 * request to the owner of the wait structure. Otherwise we send 900 * the scheduling request to the cpu owning the thread. Races 901 * are ok, the target will forward the message as necessary (the 902 * message may chase the thread around before it finally gets 903 * acted upon). 904 * 905 * (remember, wait structures use stable storage) 906 * 907 * NOTE: tokens no longer enter a critical section, so we only need 908 * to account for the crit_enter() above when calling 909 * _lwkt_schedule_post(). 910 */ 911 if ((w = td->td_wait) != NULL) { 912 lwkt_tokref wref; 913 914 if (lwkt_trytoken(&wref, &w->wa_token)) { 915 TAILQ_REMOVE(&w->wa_waitq, td, td_threadq); 916 --w->wa_count; 917 td->td_wait = NULL; 918 #ifdef SMP 919 if (td->td_gd == mygd) { 920 _lwkt_enqueue(td); 921 _lwkt_schedule_post(mygd, td, TDPRI_CRIT); 922 } else { 923 lwkt_send_ipiq(td->td_gd, (ipifunc_t)lwkt_schedule, td); 924 } 925 #else 926 _lwkt_enqueue(td); 927 _lwkt_schedule_post(mygd, td, TDPRI_CRIT); 928 #endif 929 lwkt_reltoken(&wref); 930 } else { 931 lwkt_send_ipiq(w->wa_token.t_cpu, (ipifunc_t)lwkt_schedule, td); 932 } 933 } else { 934 /* 935 * If the wait structure is NULL and we own the thread, there 936 * is no race (since we are in a critical section). If we 937 * do not own the thread there might be a race but the 938 * target cpu will deal with it. 939 */ 940 #ifdef SMP 941 if (td->td_gd == mygd) { 942 _lwkt_enqueue(td); 943 _lwkt_schedule_post(mygd, td, TDPRI_CRIT); 944 } else { 945 lwkt_send_ipiq(td->td_gd, (ipifunc_t)lwkt_schedule, td); 946 } 947 #else 948 _lwkt_enqueue(td); 949 _lwkt_schedule_post(mygd, td, TDPRI_CRIT); 950 #endif 951 } 952 } 953 crit_exit_gd(mygd); 954 } 955 956 /* 957 * Managed acquisition. This code assumes that the MP lock is held for 958 * the tdallq operation and that the thread has been descheduled from its 959 * original cpu. We also have to wait for the thread to be entirely switched 960 * out on its original cpu (this is usually fast enough that we never loop) 961 * since the LWKT system does not have to hold the MP lock while switching 962 * and the target may have released it before switching. 963 */ 964 void 965 lwkt_acquire(thread_t td) 966 { 967 globaldata_t gd; 968 globaldata_t mygd; 969 970 gd = td->td_gd; 971 mygd = mycpu; 972 KKASSERT((td->td_flags & TDF_RUNQ) == 0); 973 while (td->td_flags & TDF_RUNNING) /* XXX spin */ 974 cpu_mb1(); 975 if (gd != mygd) { 976 crit_enter_gd(mygd); 977 TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq); /* protected by BGL */ 978 td->td_gd = mygd; 979 TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq); /* protected by BGL */ 980 crit_exit_gd(mygd); 981 } 982 } 983 984 /* 985 * Generic deschedule. Descheduling threads other then your own should be 986 * done only in carefully controlled circumstances. Descheduling is 987 * asynchronous. 988 * 989 * This function may block if the cpu has run out of messages. 990 */ 991 void 992 lwkt_deschedule(thread_t td) 993 { 994 crit_enter(); 995 if (td == curthread) { 996 _lwkt_dequeue(td); 997 } else { 998 if (td->td_gd == mycpu) { 999 _lwkt_dequeue(td); 1000 } else { 1001 lwkt_send_ipiq(td->td_gd, (ipifunc_t)lwkt_deschedule, td); 1002 } 1003 } 1004 crit_exit(); 1005 } 1006 1007 /* 1008 * Set the target thread's priority. This routine does not automatically 1009 * switch to a higher priority thread, LWKT threads are not designed for 1010 * continuous priority changes. Yield if you want to switch. 1011 * 1012 * We have to retain the critical section count which uses the high bits 1013 * of the td_pri field. The specified priority may also indicate zero or 1014 * more critical sections by adding TDPRI_CRIT*N. 1015 * 1016 * Note that we requeue the thread whether it winds up on a different runq 1017 * or not. uio_yield() depends on this and the routine is not normally 1018 * called with the same priority otherwise. 1019 */ 1020 void 1021 lwkt_setpri(thread_t td, int pri) 1022 { 1023 KKASSERT(pri >= 0); 1024 KKASSERT(td->td_gd == mycpu); 1025 crit_enter(); 1026 if (td->td_flags & TDF_RUNQ) { 1027 _lwkt_dequeue(td); 1028 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1029 _lwkt_enqueue(td); 1030 } else { 1031 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1032 } 1033 crit_exit(); 1034 } 1035 1036 void 1037 lwkt_setpri_self(int pri) 1038 { 1039 thread_t td = curthread; 1040 1041 KKASSERT(pri >= 0 && pri <= TDPRI_MAX); 1042 crit_enter(); 1043 if (td->td_flags & TDF_RUNQ) { 1044 _lwkt_dequeue(td); 1045 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1046 _lwkt_enqueue(td); 1047 } else { 1048 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1049 } 1050 crit_exit(); 1051 } 1052 1053 /* 1054 * Determine if there is a runnable thread at a higher priority then 1055 * the current thread. lwkt_setpri() does not check this automatically. 1056 * Return 1 if there is, 0 if there isn't. 1057 * 1058 * Example: if bit 31 of runqmask is set and the current thread is priority 1059 * 30, then we wind up checking the mask: 0x80000000 against 0x7fffffff. 1060 * 1061 * If nq reaches 31 the shift operation will overflow to 0 and we will wind 1062 * up comparing against 0xffffffff, a comparison that will always be false. 1063 */ 1064 int 1065 lwkt_checkpri_self(void) 1066 { 1067 globaldata_t gd = mycpu; 1068 thread_t td = gd->gd_curthread; 1069 int nq = td->td_pri & TDPRI_MASK; 1070 1071 while (gd->gd_runqmask > (__uint32_t)(2 << nq) - 1) { 1072 if (TAILQ_FIRST(&gd->gd_tdrunq[nq + 1])) 1073 return(1); 1074 ++nq; 1075 } 1076 return(0); 1077 } 1078 1079 /* 1080 * Migrate the current thread to the specified cpu. The BGL must be held 1081 * (for the gd_tdallq manipulation XXX). This is accomplished by 1082 * descheduling ourselves from the current cpu, moving our thread to the 1083 * tdallq of the target cpu, IPI messaging the target cpu, and switching out. 1084 * TDF_MIGRATING prevents scheduling races while the thread is being migrated. 1085 */ 1086 #ifdef SMP 1087 static void lwkt_setcpu_remote(void *arg); 1088 #endif 1089 1090 void 1091 lwkt_setcpu_self(globaldata_t rgd) 1092 { 1093 #ifdef SMP 1094 thread_t td = curthread; 1095 1096 if (td->td_gd != rgd) { 1097 crit_enter_quick(td); 1098 td->td_flags |= TDF_MIGRATING; 1099 lwkt_deschedule_self(td); 1100 TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq); /* protected by BGL */ 1101 TAILQ_INSERT_TAIL(&rgd->gd_tdallq, td, td_allq); /* protected by BGL */ 1102 lwkt_send_ipiq(rgd, (ipifunc_t)lwkt_setcpu_remote, td); 1103 lwkt_switch(); 1104 /* we are now on the target cpu */ 1105 crit_exit_quick(td); 1106 } 1107 #endif 1108 } 1109 1110 /* 1111 * Remote IPI for cpu migration (called while in a critical section so we 1112 * do not have to enter another one). The thread has already been moved to 1113 * our cpu's allq, but we must wait for the thread to be completely switched 1114 * out on the originating cpu before we schedule it on ours or the stack 1115 * state may be corrupt. We clear TDF_MIGRATING after flushing the GD 1116 * change to main memory. 1117 * 1118 * XXX The use of TDF_MIGRATING might not be sufficient to avoid races 1119 * against wakeups. It is best if this interface is used only when there 1120 * are no pending events that might try to schedule the thread. 1121 */ 1122 #ifdef SMP 1123 static void 1124 lwkt_setcpu_remote(void *arg) 1125 { 1126 thread_t td = arg; 1127 globaldata_t gd = mycpu; 1128 1129 while (td->td_flags & TDF_RUNNING) 1130 cpu_mb1(); 1131 td->td_gd = gd; 1132 cpu_mb2(); 1133 td->td_flags &= ~TDF_MIGRATING; 1134 _lwkt_enqueue(td); 1135 } 1136 #endif 1137 1138 struct proc * 1139 lwkt_preempted_proc(void) 1140 { 1141 thread_t td = curthread; 1142 while (td->td_preempted) 1143 td = td->td_preempted; 1144 return(td->td_proc); 1145 } 1146 1147 /* 1148 * Block on the specified wait queue until signaled. A generation number 1149 * must be supplied to interlock the wait queue. The function will 1150 * return immediately if the generation number does not match the wait 1151 * structure's generation number. 1152 */ 1153 void 1154 lwkt_block(lwkt_wait_t w, const char *wmesg, int *gen) 1155 { 1156 thread_t td = curthread; 1157 lwkt_tokref ilock; 1158 1159 lwkt_gettoken(&ilock, &w->wa_token); 1160 crit_enter(); 1161 if (w->wa_gen == *gen) { 1162 _lwkt_dequeue(td); 1163 TAILQ_INSERT_TAIL(&w->wa_waitq, td, td_threadq); 1164 ++w->wa_count; 1165 td->td_wait = w; 1166 td->td_wmesg = wmesg; 1167 again: 1168 lwkt_switch(); 1169 if (td->td_wmesg != NULL) { 1170 _lwkt_dequeue(td); 1171 goto again; 1172 } 1173 } 1174 crit_exit(); 1175 *gen = w->wa_gen; 1176 lwkt_reltoken(&ilock); 1177 } 1178 1179 /* 1180 * Signal a wait queue. We gain ownership of the wait queue in order to 1181 * signal it. Once a thread is removed from the wait queue we have to 1182 * deal with the cpu owning the thread. 1183 * 1184 * Note: alternatively we could message the target cpu owning the wait 1185 * queue. YYY implement as sysctl. 1186 */ 1187 void 1188 lwkt_signal(lwkt_wait_t w, int count) 1189 { 1190 thread_t td; 1191 lwkt_tokref ilock; 1192 1193 lwkt_gettoken(&ilock, &w->wa_token); 1194 ++w->wa_gen; 1195 crit_enter(); 1196 if (count < 0) 1197 count = w->wa_count; 1198 while ((td = TAILQ_FIRST(&w->wa_waitq)) != NULL && count) { 1199 --count; 1200 --w->wa_count; 1201 TAILQ_REMOVE(&w->wa_waitq, td, td_threadq); 1202 td->td_wait = NULL; 1203 td->td_wmesg = NULL; 1204 if (td->td_gd == mycpu) { 1205 _lwkt_enqueue(td); 1206 } else { 1207 lwkt_send_ipiq(td->td_gd, (ipifunc_t)lwkt_schedule, td); 1208 } 1209 } 1210 crit_exit(); 1211 lwkt_reltoken(&ilock); 1212 } 1213 1214 /* 1215 * Create a kernel process/thread/whatever. It shares it's address space 1216 * with proc0 - ie: kernel only. 1217 * 1218 * NOTE! By default new threads are created with the MP lock held. A 1219 * thread which does not require the MP lock should release it by calling 1220 * rel_mplock() at the start of the new thread. 1221 */ 1222 int 1223 lwkt_create(void (*func)(void *), void *arg, 1224 struct thread **tdp, thread_t template, int tdflags, int cpu, 1225 const char *fmt, ...) 1226 { 1227 thread_t td; 1228 __va_list ap; 1229 1230 td = lwkt_alloc_thread(template, LWKT_THREAD_STACK, cpu); 1231 if (tdp) 1232 *tdp = td; 1233 cpu_set_thread_handler(td, lwkt_exit, func, arg); 1234 td->td_flags |= TDF_VERBOSE | tdflags; 1235 #ifdef SMP 1236 td->td_mpcount = 1; 1237 #endif 1238 1239 /* 1240 * Set up arg0 for 'ps' etc 1241 */ 1242 __va_start(ap, fmt); 1243 vsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap); 1244 __va_end(ap); 1245 1246 /* 1247 * Schedule the thread to run 1248 */ 1249 if ((td->td_flags & TDF_STOPREQ) == 0) 1250 lwkt_schedule(td); 1251 else 1252 td->td_flags &= ~TDF_STOPREQ; 1253 return 0; 1254 } 1255 1256 /* 1257 * kthread_* is specific to the kernel and is not needed by userland. 1258 */ 1259 #ifdef _KERNEL 1260 1261 /* 1262 * Destroy an LWKT thread. Warning! This function is not called when 1263 * a process exits, cpu_proc_exit() directly calls cpu_thread_exit() and 1264 * uses a different reaping mechanism. 1265 */ 1266 void 1267 lwkt_exit(void) 1268 { 1269 thread_t td = curthread; 1270 globaldata_t gd; 1271 1272 if (td->td_flags & TDF_VERBOSE) 1273 printf("kthread %p %s has exited\n", td, td->td_comm); 1274 caps_exit(td); 1275 crit_enter_quick(td); 1276 lwkt_deschedule_self(td); 1277 gd = mycpu; 1278 KKASSERT(gd == td->td_gd); 1279 TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq); 1280 if (td->td_flags & TDF_ALLOCATED_THREAD) { 1281 ++gd->gd_tdfreecount; 1282 TAILQ_INSERT_TAIL(&gd->gd_tdfreeq, td, td_threadq); 1283 } 1284 cpu_thread_exit(); 1285 } 1286 1287 #endif /* _KERNEL */ 1288 1289 void 1290 crit_panic(void) 1291 { 1292 thread_t td = curthread; 1293 int lpri = td->td_pri; 1294 1295 td->td_pri = 0; 1296 panic("td_pri is/would-go negative! %p %d", td, lpri); 1297 } 1298 1299