1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/kern/lwkt_thread.c,v 1.70 2004/10/13 18:42:34 eirikn Exp $ 35 */ 36 37 /* 38 * Each cpu in a system has its own self-contained light weight kernel 39 * thread scheduler, which means that generally speaking we only need 40 * to use a critical section to avoid problems. Foreign thread 41 * scheduling is queued via (async) IPIs. 42 */ 43 44 #ifdef _KERNEL 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/kernel.h> 49 #include <sys/proc.h> 50 #include <sys/rtprio.h> 51 #include <sys/queue.h> 52 #include <sys/thread2.h> 53 #include <sys/sysctl.h> 54 #include <sys/kthread.h> 55 #include <machine/cpu.h> 56 #include <sys/lock.h> 57 #include <sys/caps.h> 58 59 #include <vm/vm.h> 60 #include <vm/vm_param.h> 61 #include <vm/vm_kern.h> 62 #include <vm/vm_object.h> 63 #include <vm/vm_page.h> 64 #include <vm/vm_map.h> 65 #include <vm/vm_pager.h> 66 #include <vm/vm_extern.h> 67 #include <vm/vm_zone.h> 68 69 #include <machine/stdarg.h> 70 #include <machine/ipl.h> 71 #include <machine/smp.h> 72 73 #else 74 75 #include <sys/stdint.h> 76 #include <libcaps/thread.h> 77 #include <sys/thread.h> 78 #include <sys/msgport.h> 79 #include <sys/errno.h> 80 #include <libcaps/globaldata.h> 81 #include <machine/cpufunc.h> 82 #include <sys/thread2.h> 83 #include <sys/msgport2.h> 84 #include <stdio.h> 85 #include <stdlib.h> 86 #include <string.h> 87 #include <machine/lock.h> 88 #include <machine/atomic.h> 89 #include <machine/cpu.h> 90 91 #endif 92 93 static int untimely_switch = 0; 94 #ifdef INVARIANTS 95 static int panic_on_cscount = 0; 96 #endif 97 static __int64_t switch_count = 0; 98 static __int64_t preempt_hit = 0; 99 static __int64_t preempt_miss = 0; 100 static __int64_t preempt_weird = 0; 101 102 #ifdef _KERNEL 103 104 SYSCTL_INT(_lwkt, OID_AUTO, untimely_switch, CTLFLAG_RW, &untimely_switch, 0, ""); 105 #ifdef INVARIANTS 106 SYSCTL_INT(_lwkt, OID_AUTO, panic_on_cscount, CTLFLAG_RW, &panic_on_cscount, 0, ""); 107 #endif 108 SYSCTL_QUAD(_lwkt, OID_AUTO, switch_count, CTLFLAG_RW, &switch_count, 0, ""); 109 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_hit, CTLFLAG_RW, &preempt_hit, 0, ""); 110 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_miss, CTLFLAG_RW, &preempt_miss, 0, ""); 111 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_weird, CTLFLAG_RW, &preempt_weird, 0, ""); 112 113 #endif 114 115 /* 116 * These helper procedures handle the runq, they can only be called from 117 * within a critical section. 118 * 119 * WARNING! Prior to SMP being brought up it is possible to enqueue and 120 * dequeue threads belonging to other cpus, so be sure to use td->td_gd 121 * instead of 'mycpu' when referencing the globaldata structure. Once 122 * SMP live enqueuing and dequeueing only occurs on the current cpu. 123 */ 124 static __inline 125 void 126 _lwkt_dequeue(thread_t td) 127 { 128 if (td->td_flags & TDF_RUNQ) { 129 int nq = td->td_pri & TDPRI_MASK; 130 struct globaldata *gd = td->td_gd; 131 132 td->td_flags &= ~TDF_RUNQ; 133 TAILQ_REMOVE(&gd->gd_tdrunq[nq], td, td_threadq); 134 /* runqmask is passively cleaned up by the switcher */ 135 } 136 } 137 138 static __inline 139 void 140 _lwkt_enqueue(thread_t td) 141 { 142 if ((td->td_flags & (TDF_RUNQ|TDF_MIGRATING)) == 0) { 143 int nq = td->td_pri & TDPRI_MASK; 144 struct globaldata *gd = td->td_gd; 145 146 td->td_flags |= TDF_RUNQ; 147 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], td, td_threadq); 148 gd->gd_runqmask |= 1 << nq; 149 } 150 } 151 152 /* 153 * Schedule a thread to run. As the current thread we can always safely 154 * schedule ourselves, and a shortcut procedure is provided for that 155 * function. 156 * 157 * (non-blocking, self contained on a per cpu basis) 158 */ 159 void 160 lwkt_schedule_self(thread_t td) 161 { 162 crit_enter_quick(td); 163 KASSERT(td->td_wait == NULL, ("lwkt_schedule_self(): td_wait not NULL!")); 164 KASSERT(td != &td->td_gd->gd_idlethread, ("lwkt_schedule_self(): scheduling gd_idlethread is illegal!")); 165 _lwkt_enqueue(td); 166 #ifdef _KERNEL 167 if (td->td_proc && td->td_proc->p_stat == SSLEEP) 168 panic("SCHED SELF PANIC"); 169 #endif 170 crit_exit_quick(td); 171 } 172 173 /* 174 * Deschedule a thread. 175 * 176 * (non-blocking, self contained on a per cpu basis) 177 */ 178 void 179 lwkt_deschedule_self(thread_t td) 180 { 181 crit_enter_quick(td); 182 KASSERT(td->td_wait == NULL, ("lwkt_schedule_self(): td_wait not NULL!")); 183 _lwkt_dequeue(td); 184 crit_exit_quick(td); 185 } 186 187 #ifdef _KERNEL 188 189 /* 190 * LWKTs operate on a per-cpu basis 191 * 192 * WARNING! Called from early boot, 'mycpu' may not work yet. 193 */ 194 void 195 lwkt_gdinit(struct globaldata *gd) 196 { 197 int i; 198 199 for (i = 0; i < sizeof(gd->gd_tdrunq)/sizeof(gd->gd_tdrunq[0]); ++i) 200 TAILQ_INIT(&gd->gd_tdrunq[i]); 201 gd->gd_runqmask = 0; 202 TAILQ_INIT(&gd->gd_tdallq); 203 } 204 205 #endif /* _KERNEL */ 206 207 /* 208 * Initialize a thread wait structure prior to first use. 209 * 210 * NOTE! called from low level boot code, we cannot do anything fancy! 211 */ 212 void 213 lwkt_wait_init(lwkt_wait_t w) 214 { 215 lwkt_token_init(&w->wa_token); 216 TAILQ_INIT(&w->wa_waitq); 217 w->wa_gen = 0; 218 w->wa_count = 0; 219 } 220 221 /* 222 * Create a new thread. The thread must be associated with a process context 223 * or LWKT start address before it can be scheduled. If the target cpu is 224 * -1 the thread will be created on the current cpu. 225 * 226 * If you intend to create a thread without a process context this function 227 * does everything except load the startup and switcher function. 228 */ 229 thread_t 230 lwkt_alloc_thread(struct thread *td, int stksize, int cpu) 231 { 232 void *stack; 233 int flags = 0; 234 globaldata_t gd = mycpu; 235 236 if (td == NULL) { 237 crit_enter_gd(gd); 238 if (gd->gd_tdfreecount > 0) { 239 --gd->gd_tdfreecount; 240 td = TAILQ_FIRST(&gd->gd_tdfreeq); 241 KASSERT(td != NULL && (td->td_flags & TDF_RUNNING) == 0, 242 ("lwkt_alloc_thread: unexpected NULL or corrupted td")); 243 TAILQ_REMOVE(&gd->gd_tdfreeq, td, td_threadq); 244 crit_exit_gd(gd); 245 flags = td->td_flags & (TDF_ALLOCATED_STACK|TDF_ALLOCATED_THREAD); 246 } else { 247 crit_exit_gd(gd); 248 #ifdef _KERNEL 249 td = zalloc(thread_zone); 250 #else 251 td = malloc(sizeof(struct thread)); 252 #endif 253 td->td_kstack = NULL; 254 td->td_kstack_size = 0; 255 flags |= TDF_ALLOCATED_THREAD; 256 } 257 } 258 if ((stack = td->td_kstack) != NULL && td->td_kstack_size != stksize) { 259 if (flags & TDF_ALLOCATED_STACK) { 260 #ifdef _KERNEL 261 kmem_free(kernel_map, (vm_offset_t)stack, td->td_kstack_size); 262 #else 263 libcaps_free_stack(stack, td->td_kstack_size); 264 #endif 265 stack = NULL; 266 } 267 } 268 if (stack == NULL) { 269 #ifdef _KERNEL 270 stack = (void *)kmem_alloc(kernel_map, stksize); 271 #else 272 stack = libcaps_alloc_stack(stksize); 273 #endif 274 flags |= TDF_ALLOCATED_STACK; 275 } 276 if (cpu < 0) 277 lwkt_init_thread(td, stack, stksize, flags, mycpu); 278 else 279 lwkt_init_thread(td, stack, stksize, flags, globaldata_find(cpu)); 280 return(td); 281 } 282 283 #ifdef _KERNEL 284 285 /* 286 * Initialize a preexisting thread structure. This function is used by 287 * lwkt_alloc_thread() and also used to initialize the per-cpu idlethread. 288 * 289 * All threads start out in a critical section at a priority of 290 * TDPRI_KERN_DAEMON. Higher level code will modify the priority as 291 * appropriate. This function may send an IPI message when the 292 * requested cpu is not the current cpu and consequently gd_tdallq may 293 * not be initialized synchronously from the point of view of the originating 294 * cpu. 295 * 296 * NOTE! we have to be careful in regards to creating threads for other cpus 297 * if SMP has not yet been activated. 298 */ 299 #ifdef SMP 300 301 static void 302 lwkt_init_thread_remote(void *arg) 303 { 304 thread_t td = arg; 305 306 TAILQ_INSERT_TAIL(&td->td_gd->gd_tdallq, td, td_allq); 307 } 308 309 #endif 310 311 void 312 lwkt_init_thread(thread_t td, void *stack, int stksize, int flags, 313 struct globaldata *gd) 314 { 315 globaldata_t mygd = mycpu; 316 317 bzero(td, sizeof(struct thread)); 318 td->td_kstack = stack; 319 td->td_kstack_size = stksize; 320 td->td_flags |= flags; 321 td->td_gd = gd; 322 td->td_pri = TDPRI_KERN_DAEMON + TDPRI_CRIT; 323 lwkt_initport(&td->td_msgport, td); 324 pmap_init_thread(td); 325 #ifdef SMP 326 /* 327 * Normally initializing a thread for a remote cpu requires sending an 328 * IPI. However, the idlethread is setup before the other cpus are 329 * activated so we have to treat it as a special case. XXX manipulation 330 * of gd_tdallq requires the BGL. 331 */ 332 if (gd == mygd || td == &gd->gd_idlethread) { 333 crit_enter_gd(mygd); 334 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq); 335 crit_exit_gd(mygd); 336 } else { 337 lwkt_send_ipiq(gd, lwkt_init_thread_remote, td); 338 } 339 #else 340 crit_enter_gd(mygd); 341 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq); 342 crit_exit_gd(mygd); 343 #endif 344 } 345 346 #endif /* _KERNEL */ 347 348 void 349 lwkt_set_comm(thread_t td, const char *ctl, ...) 350 { 351 __va_list va; 352 353 __va_start(va, ctl); 354 vsnprintf(td->td_comm, sizeof(td->td_comm), ctl, va); 355 __va_end(va); 356 } 357 358 void 359 lwkt_hold(thread_t td) 360 { 361 ++td->td_refs; 362 } 363 364 void 365 lwkt_rele(thread_t td) 366 { 367 KKASSERT(td->td_refs > 0); 368 --td->td_refs; 369 } 370 371 #ifdef _KERNEL 372 373 void 374 lwkt_wait_free(thread_t td) 375 { 376 while (td->td_refs) 377 tsleep(td, 0, "tdreap", hz); 378 } 379 380 #endif 381 382 void 383 lwkt_free_thread(thread_t td) 384 { 385 struct globaldata *gd = mycpu; 386 387 KASSERT((td->td_flags & TDF_RUNNING) == 0, 388 ("lwkt_free_thread: did not exit! %p", td)); 389 390 crit_enter_gd(gd); 391 TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq); 392 if (gd->gd_tdfreecount < CACHE_NTHREADS && 393 (td->td_flags & TDF_ALLOCATED_THREAD) 394 ) { 395 ++gd->gd_tdfreecount; 396 TAILQ_INSERT_HEAD(&gd->gd_tdfreeq, td, td_threadq); 397 crit_exit_gd(gd); 398 } else { 399 crit_exit_gd(gd); 400 if (td->td_kstack && (td->td_flags & TDF_ALLOCATED_STACK)) { 401 #ifdef _KERNEL 402 kmem_free(kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size); 403 #else 404 libcaps_free_stack(td->td_kstack, td->td_kstack_size); 405 #endif 406 /* gd invalid */ 407 td->td_kstack = NULL; 408 td->td_kstack_size = 0; 409 } 410 if (td->td_flags & TDF_ALLOCATED_THREAD) { 411 #ifdef _KERNEL 412 zfree(thread_zone, td); 413 #else 414 free(td); 415 #endif 416 } 417 } 418 } 419 420 421 /* 422 * Switch to the next runnable lwkt. If no LWKTs are runnable then 423 * switch to the idlethread. Switching must occur within a critical 424 * section to avoid races with the scheduling queue. 425 * 426 * We always have full control over our cpu's run queue. Other cpus 427 * that wish to manipulate our queue must use the cpu_*msg() calls to 428 * talk to our cpu, so a critical section is all that is needed and 429 * the result is very, very fast thread switching. 430 * 431 * The LWKT scheduler uses a fixed priority model and round-robins at 432 * each priority level. User process scheduling is a totally 433 * different beast and LWKT priorities should not be confused with 434 * user process priorities. 435 * 436 * The MP lock may be out of sync with the thread's td_mpcount. lwkt_switch() 437 * cleans it up. Note that the td_switch() function cannot do anything that 438 * requires the MP lock since the MP lock will have already been setup for 439 * the target thread (not the current thread). It's nice to have a scheduler 440 * that does not need the MP lock to work because it allows us to do some 441 * really cool high-performance MP lock optimizations. 442 */ 443 444 void 445 lwkt_switch(void) 446 { 447 globaldata_t gd = mycpu; 448 thread_t td = gd->gd_curthread; 449 thread_t ntd; 450 #ifdef SMP 451 int mpheld; 452 #endif 453 454 /* 455 * Switching from within a 'fast' (non thread switched) interrupt is 456 * illegal. 457 */ 458 if (gd->gd_intr_nesting_level && panicstr == NULL) { 459 panic("lwkt_switch: cannot switch from within a fast interrupt, yet"); 460 } 461 462 /* 463 * Passive release (used to transition from user to kernel mode 464 * when we block or switch rather then when we enter the kernel). 465 * This function is NOT called if we are switching into a preemption 466 * or returning from a preemption. Typically this causes us to lose 467 * our current process designation (if we have one) and become a true 468 * LWKT thread, and may also hand the current process designation to 469 * another process and schedule thread. 470 */ 471 if (td->td_release) 472 td->td_release(td); 473 474 crit_enter_gd(gd); 475 ++switch_count; 476 477 #ifdef SMP 478 /* 479 * td_mpcount cannot be used to determine if we currently hold the 480 * MP lock because get_mplock() will increment it prior to attempting 481 * to get the lock, and switch out if it can't. Our ownership of 482 * the actual lock will remain stable while we are in a critical section 483 * (but, of course, another cpu may own or release the lock so the 484 * actual value of mp_lock is not stable). 485 */ 486 mpheld = MP_LOCK_HELD(); 487 #ifdef INVARIANTS 488 if (td->td_cscount) { 489 printf("Diagnostic: attempt to switch while mastering cpusync: %p\n", 490 td); 491 if (panic_on_cscount) 492 panic("switching while mastering cpusync"); 493 } 494 #endif 495 #endif 496 if ((ntd = td->td_preempted) != NULL) { 497 /* 498 * We had preempted another thread on this cpu, resume the preempted 499 * thread. This occurs transparently, whether the preempted thread 500 * was scheduled or not (it may have been preempted after descheduling 501 * itself). 502 * 503 * We have to setup the MP lock for the original thread after backing 504 * out the adjustment that was made to curthread when the original 505 * was preempted. 506 */ 507 KKASSERT(ntd->td_flags & TDF_PREEMPT_LOCK); 508 #ifdef SMP 509 if (ntd->td_mpcount && mpheld == 0) { 510 panic("MPLOCK NOT HELD ON RETURN: %p %p %d %d", 511 td, ntd, td->td_mpcount, ntd->td_mpcount); 512 } 513 if (ntd->td_mpcount) { 514 td->td_mpcount -= ntd->td_mpcount; 515 KKASSERT(td->td_mpcount >= 0); 516 } 517 #endif 518 ntd->td_flags |= TDF_PREEMPT_DONE; 519 520 /* 521 * XXX. The interrupt may have woken a thread up, we need to properly 522 * set the reschedule flag if the originally interrupted thread is at 523 * a lower priority. 524 */ 525 if (gd->gd_runqmask > (2 << (ntd->td_pri & TDPRI_MASK)) - 1) 526 need_lwkt_resched(); 527 /* YYY release mp lock on switchback if original doesn't need it */ 528 } else { 529 /* 530 * Priority queue / round-robin at each priority. Note that user 531 * processes run at a fixed, low priority and the user process 532 * scheduler deals with interactions between user processes 533 * by scheduling and descheduling them from the LWKT queue as 534 * necessary. 535 * 536 * We have to adjust the MP lock for the target thread. If we 537 * need the MP lock and cannot obtain it we try to locate a 538 * thread that does not need the MP lock. If we cannot, we spin 539 * instead of HLT. 540 * 541 * A similar issue exists for the tokens held by the target thread. 542 * If we cannot obtain ownership of the tokens we cannot immediately 543 * schedule the thread. 544 */ 545 546 /* 547 * We are switching threads. If there are any pending requests for 548 * tokens we can satisfy all of them here. 549 */ 550 #ifdef SMP 551 if (gd->gd_tokreqbase) 552 lwkt_drain_token_requests(); 553 #endif 554 555 /* 556 * If an LWKT reschedule was requested, well that is what we are 557 * doing now so clear it. 558 */ 559 clear_lwkt_resched(); 560 again: 561 if (gd->gd_runqmask) { 562 int nq = bsrl(gd->gd_runqmask); 563 if ((ntd = TAILQ_FIRST(&gd->gd_tdrunq[nq])) == NULL) { 564 gd->gd_runqmask &= ~(1 << nq); 565 goto again; 566 } 567 #ifdef SMP 568 /* 569 * If the target needs the MP lock and we couldn't get it, 570 * or if the target is holding tokens and we could not 571 * gain ownership of the tokens, continue looking for a 572 * thread to schedule and spin instead of HLT if we can't. 573 */ 574 if ((ntd->td_mpcount && mpheld == 0 && !cpu_try_mplock()) || 575 (ntd->td_toks && lwkt_chktokens(ntd) == 0) 576 ) { 577 u_int32_t rqmask = gd->gd_runqmask; 578 while (rqmask) { 579 TAILQ_FOREACH(ntd, &gd->gd_tdrunq[nq], td_threadq) { 580 if (ntd->td_mpcount && !mpheld && !cpu_try_mplock()) 581 continue; 582 mpheld = MP_LOCK_HELD(); 583 if (ntd->td_toks && !lwkt_chktokens(ntd)) 584 continue; 585 break; 586 } 587 if (ntd) 588 break; 589 rqmask &= ~(1 << nq); 590 nq = bsrl(rqmask); 591 } 592 if (ntd == NULL) { 593 ntd = &gd->gd_idlethread; 594 ntd->td_flags |= TDF_IDLE_NOHLT; 595 } else { 596 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); 597 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); 598 } 599 } else { 600 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); 601 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); 602 } 603 #else 604 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); 605 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); 606 #endif 607 } else { 608 /* 609 * We have nothing to run but only let the idle loop halt 610 * the cpu if there are no pending interrupts. 611 */ 612 ntd = &gd->gd_idlethread; 613 if (gd->gd_reqflags & RQF_IDLECHECK_MASK) 614 ntd->td_flags |= TDF_IDLE_NOHLT; 615 } 616 } 617 KASSERT(ntd->td_pri >= TDPRI_CRIT, 618 ("priority problem in lwkt_switch %d %d", td->td_pri, ntd->td_pri)); 619 620 /* 621 * Do the actual switch. If the new target does not need the MP lock 622 * and we are holding it, release the MP lock. If the new target requires 623 * the MP lock we have already acquired it for the target. 624 */ 625 #ifdef SMP 626 if (ntd->td_mpcount == 0 ) { 627 if (MP_LOCK_HELD()) 628 cpu_rel_mplock(); 629 } else { 630 ASSERT_MP_LOCK_HELD(); 631 } 632 #endif 633 if (td != ntd) 634 td->td_switch(ntd); 635 /* NOTE: current cpu may have changed after switch */ 636 crit_exit_quick(td); 637 } 638 639 /* 640 * Request that the target thread preempt the current thread. Preemption 641 * only works under a specific set of conditions: 642 * 643 * - We are not preempting ourselves 644 * - The target thread is owned by the current cpu 645 * - We are not currently being preempted 646 * - The target is not currently being preempted 647 * - We are able to satisfy the target's MP lock requirements (if any). 648 * 649 * THE CALLER OF LWKT_PREEMPT() MUST BE IN A CRITICAL SECTION. Typically 650 * this is called via lwkt_schedule() through the td_preemptable callback. 651 * critpri is the managed critical priority that we should ignore in order 652 * to determine whether preemption is possible (aka usually just the crit 653 * priority of lwkt_schedule() itself). 654 * 655 * XXX at the moment we run the target thread in a critical section during 656 * the preemption in order to prevent the target from taking interrupts 657 * that *WE* can't. Preemption is strictly limited to interrupt threads 658 * and interrupt-like threads, outside of a critical section, and the 659 * preempted source thread will be resumed the instant the target blocks 660 * whether or not the source is scheduled (i.e. preemption is supposed to 661 * be as transparent as possible). 662 * 663 * The target thread inherits our MP count (added to its own) for the 664 * duration of the preemption in order to preserve the atomicy of the 665 * MP lock during the preemption. Therefore, any preempting targets must be 666 * careful in regards to MP assertions. Note that the MP count may be 667 * out of sync with the physical mp_lock, but we do not have to preserve 668 * the original ownership of the lock if it was out of synch (that is, we 669 * can leave it synchronized on return). 670 */ 671 void 672 lwkt_preempt(thread_t ntd, int critpri) 673 { 674 struct globaldata *gd = mycpu; 675 thread_t td; 676 #ifdef SMP 677 int mpheld; 678 int savecnt; 679 #endif 680 681 /* 682 * The caller has put us in a critical section. We can only preempt 683 * if the caller of the caller was not in a critical section (basically 684 * a local interrupt), as determined by the 'critpri' parameter. 685 * 686 * YYY The target thread must be in a critical section (else it must 687 * inherit our critical section? I dunno yet). 688 * 689 * Any tokens held by the target may not be held by thread(s) being 690 * preempted. We take the easy way out and do not preempt if 691 * the target is holding tokens. 692 * 693 * Set need_lwkt_resched() unconditionally for now YYY. 694 */ 695 KASSERT(ntd->td_pri >= TDPRI_CRIT, ("BADCRIT0 %d", ntd->td_pri)); 696 697 td = gd->gd_curthread; 698 if ((ntd->td_pri & TDPRI_MASK) <= (td->td_pri & TDPRI_MASK)) { 699 ++preempt_miss; 700 return; 701 } 702 if ((td->td_pri & ~TDPRI_MASK) > critpri) { 703 ++preempt_miss; 704 need_lwkt_resched(); 705 return; 706 } 707 #ifdef SMP 708 if (ntd->td_gd != gd) { 709 ++preempt_miss; 710 need_lwkt_resched(); 711 return; 712 } 713 #endif 714 /* 715 * Take the easy way out and do not preempt if the target is holding 716 * one or more tokens. We could test whether the thread(s) being 717 * preempted interlock against the target thread's tokens and whether 718 * we can get all the target thread's tokens, but this situation 719 * should not occur very often so its easier to simply not preempt. 720 */ 721 if (ntd->td_toks != NULL) { 722 ++preempt_miss; 723 need_lwkt_resched(); 724 return; 725 } 726 if (td == ntd || ((td->td_flags | ntd->td_flags) & TDF_PREEMPT_LOCK)) { 727 ++preempt_weird; 728 need_lwkt_resched(); 729 return; 730 } 731 if (ntd->td_preempted) { 732 ++preempt_hit; 733 need_lwkt_resched(); 734 return; 735 } 736 #ifdef SMP 737 /* 738 * note: an interrupt might have occured just as we were transitioning 739 * to or from the MP lock. In this case td_mpcount will be pre-disposed 740 * (non-zero) but not actually synchronized with the actual state of the 741 * lock. We can use it to imply an MP lock requirement for the 742 * preemption but we cannot use it to test whether we hold the MP lock 743 * or not. 744 */ 745 savecnt = td->td_mpcount; 746 mpheld = MP_LOCK_HELD(); 747 ntd->td_mpcount += td->td_mpcount; 748 if (mpheld == 0 && ntd->td_mpcount && !cpu_try_mplock()) { 749 ntd->td_mpcount -= td->td_mpcount; 750 ++preempt_miss; 751 need_lwkt_resched(); 752 return; 753 } 754 #endif 755 756 /* 757 * Since we are able to preempt the current thread, there is no need to 758 * call need_lwkt_resched(). 759 */ 760 ++preempt_hit; 761 ntd->td_preempted = td; 762 td->td_flags |= TDF_PREEMPT_LOCK; 763 td->td_switch(ntd); 764 KKASSERT(ntd->td_preempted && (td->td_flags & TDF_PREEMPT_DONE)); 765 #ifdef SMP 766 KKASSERT(savecnt == td->td_mpcount); 767 mpheld = MP_LOCK_HELD(); 768 if (mpheld && td->td_mpcount == 0) 769 cpu_rel_mplock(); 770 else if (mpheld == 0 && td->td_mpcount) 771 panic("lwkt_preempt(): MP lock was not held through"); 772 #endif 773 ntd->td_preempted = NULL; 774 td->td_flags &= ~(TDF_PREEMPT_LOCK|TDF_PREEMPT_DONE); 775 } 776 777 /* 778 * Yield our thread while higher priority threads are pending. This is 779 * typically called when we leave a critical section but it can be safely 780 * called while we are in a critical section. 781 * 782 * This function will not generally yield to equal priority threads but it 783 * can occur as a side effect. Note that lwkt_switch() is called from 784 * inside the critical section to prevent its own crit_exit() from reentering 785 * lwkt_yield_quick(). 786 * 787 * gd_reqflags indicates that *something* changed, e.g. an interrupt or softint 788 * came along but was blocked and made pending. 789 * 790 * (self contained on a per cpu basis) 791 */ 792 void 793 lwkt_yield_quick(void) 794 { 795 globaldata_t gd = mycpu; 796 thread_t td = gd->gd_curthread; 797 798 /* 799 * gd_reqflags is cleared in splz if the cpl is 0. If we were to clear 800 * it with a non-zero cpl then we might not wind up calling splz after 801 * a task switch when the critical section is exited even though the 802 * new task could accept the interrupt. 803 * 804 * XXX from crit_exit() only called after last crit section is released. 805 * If called directly will run splz() even if in a critical section. 806 * 807 * td_nest_count prevent deep nesting via splz() or doreti(). Note that 808 * except for this special case, we MUST call splz() here to handle any 809 * pending ints, particularly after we switch, or we might accidently 810 * halt the cpu with interrupts pending. 811 */ 812 if (gd->gd_reqflags && td->td_nest_count < 2) 813 splz(); 814 815 /* 816 * YYY enabling will cause wakeup() to task-switch, which really 817 * confused the old 4.x code. This is a good way to simulate 818 * preemption and MP without actually doing preemption or MP, because a 819 * lot of code assumes that wakeup() does not block. 820 */ 821 if (untimely_switch && td->td_nest_count == 0 && 822 gd->gd_intr_nesting_level == 0 823 ) { 824 crit_enter_quick(td); 825 /* 826 * YYY temporary hacks until we disassociate the userland scheduler 827 * from the LWKT scheduler. 828 */ 829 if (td->td_flags & TDF_RUNQ) { 830 lwkt_switch(); /* will not reenter yield function */ 831 } else { 832 lwkt_schedule_self(td); /* make sure we are scheduled */ 833 lwkt_switch(); /* will not reenter yield function */ 834 lwkt_deschedule_self(td); /* make sure we are descheduled */ 835 } 836 crit_exit_noyield(td); 837 } 838 } 839 840 /* 841 * This implements a normal yield which, unlike _quick, will yield to equal 842 * priority threads as well. Note that gd_reqflags tests will be handled by 843 * the crit_exit() call in lwkt_switch(). 844 * 845 * (self contained on a per cpu basis) 846 */ 847 void 848 lwkt_yield(void) 849 { 850 lwkt_schedule_self(curthread); 851 lwkt_switch(); 852 } 853 854 /* 855 * Generic schedule. Possibly schedule threads belonging to other cpus and 856 * deal with threads that might be blocked on a wait queue. 857 * 858 * We have a little helper inline function which does additional work after 859 * the thread has been enqueued, including dealing with preemption and 860 * setting need_lwkt_resched() (which prevents the kernel from returning 861 * to userland until it has processed higher priority threads). 862 */ 863 static __inline 864 void 865 _lwkt_schedule_post(globaldata_t gd, thread_t ntd, int cpri) 866 { 867 if (ntd->td_preemptable) { 868 ntd->td_preemptable(ntd, cpri); /* YYY +token */ 869 } else if ((ntd->td_flags & TDF_NORESCHED) == 0 && 870 (ntd->td_pri & TDPRI_MASK) > (gd->gd_curthread->td_pri & TDPRI_MASK) 871 ) { 872 need_lwkt_resched(); 873 } 874 } 875 876 void 877 lwkt_schedule(thread_t td) 878 { 879 globaldata_t mygd = mycpu; 880 881 #ifdef INVARIANTS 882 KASSERT(td != &td->td_gd->gd_idlethread, ("lwkt_schedule(): scheduling gd_idlethread is illegal!")); 883 if ((td->td_flags & TDF_PREEMPT_LOCK) == 0 && td->td_proc 884 && td->td_proc->p_stat == SSLEEP 885 ) { 886 printf("PANIC schedule curtd = %p (%d %d) target %p (%d %d)\n", 887 curthread, 888 curthread->td_proc ? curthread->td_proc->p_pid : -1, 889 curthread->td_proc ? curthread->td_proc->p_stat : -1, 890 td, 891 td->td_proc ? curthread->td_proc->p_pid : -1, 892 td->td_proc ? curthread->td_proc->p_stat : -1 893 ); 894 panic("SCHED PANIC"); 895 } 896 #endif 897 crit_enter_gd(mygd); 898 if (td == mygd->gd_curthread) { 899 _lwkt_enqueue(td); 900 } else { 901 lwkt_wait_t w; 902 903 /* 904 * If the thread is on a wait list we have to send our scheduling 905 * request to the owner of the wait structure. Otherwise we send 906 * the scheduling request to the cpu owning the thread. Races 907 * are ok, the target will forward the message as necessary (the 908 * message may chase the thread around before it finally gets 909 * acted upon). 910 * 911 * (remember, wait structures use stable storage) 912 * 913 * NOTE: tokens no longer enter a critical section, so we only need 914 * to account for the crit_enter() above when calling 915 * _lwkt_schedule_post(). 916 */ 917 if ((w = td->td_wait) != NULL) { 918 lwkt_tokref wref; 919 920 if (lwkt_trytoken(&wref, &w->wa_token)) { 921 TAILQ_REMOVE(&w->wa_waitq, td, td_threadq); 922 --w->wa_count; 923 td->td_wait = NULL; 924 #ifdef SMP 925 if (td->td_gd == mygd) { 926 _lwkt_enqueue(td); 927 _lwkt_schedule_post(mygd, td, TDPRI_CRIT); 928 } else { 929 lwkt_send_ipiq(td->td_gd, (ipifunc_t)lwkt_schedule, td); 930 } 931 #else 932 _lwkt_enqueue(td); 933 _lwkt_schedule_post(mygd, td, TDPRI_CRIT); 934 #endif 935 lwkt_reltoken(&wref); 936 } else { 937 lwkt_send_ipiq(w->wa_token.t_cpu, (ipifunc_t)lwkt_schedule, td); 938 } 939 } else { 940 /* 941 * If the wait structure is NULL and we own the thread, there 942 * is no race (since we are in a critical section). If we 943 * do not own the thread there might be a race but the 944 * target cpu will deal with it. 945 */ 946 #ifdef SMP 947 if (td->td_gd == mygd) { 948 _lwkt_enqueue(td); 949 _lwkt_schedule_post(mygd, td, TDPRI_CRIT); 950 } else { 951 lwkt_send_ipiq(td->td_gd, (ipifunc_t)lwkt_schedule, td); 952 } 953 #else 954 _lwkt_enqueue(td); 955 _lwkt_schedule_post(mygd, td, TDPRI_CRIT); 956 #endif 957 } 958 } 959 crit_exit_gd(mygd); 960 } 961 962 /* 963 * Managed acquisition. This code assumes that the MP lock is held for 964 * the tdallq operation and that the thread has been descheduled from its 965 * original cpu. We also have to wait for the thread to be entirely switched 966 * out on its original cpu (this is usually fast enough that we never loop) 967 * since the LWKT system does not have to hold the MP lock while switching 968 * and the target may have released it before switching. 969 */ 970 void 971 lwkt_acquire(thread_t td) 972 { 973 globaldata_t gd; 974 globaldata_t mygd; 975 976 gd = td->td_gd; 977 mygd = mycpu; 978 KKASSERT((td->td_flags & TDF_RUNQ) == 0); 979 while (td->td_flags & TDF_RUNNING) /* XXX spin */ 980 cpu_mb1(); 981 if (gd != mygd) { 982 crit_enter_gd(mygd); 983 TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq); /* protected by BGL */ 984 td->td_gd = mygd; 985 TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq); /* protected by BGL */ 986 crit_exit_gd(mygd); 987 } 988 } 989 990 /* 991 * Generic deschedule. Descheduling threads other then your own should be 992 * done only in carefully controlled circumstances. Descheduling is 993 * asynchronous. 994 * 995 * This function may block if the cpu has run out of messages. 996 */ 997 void 998 lwkt_deschedule(thread_t td) 999 { 1000 crit_enter(); 1001 if (td == curthread) { 1002 _lwkt_dequeue(td); 1003 } else { 1004 if (td->td_gd == mycpu) { 1005 _lwkt_dequeue(td); 1006 } else { 1007 lwkt_send_ipiq(td->td_gd, (ipifunc_t)lwkt_deschedule, td); 1008 } 1009 } 1010 crit_exit(); 1011 } 1012 1013 /* 1014 * Set the target thread's priority. This routine does not automatically 1015 * switch to a higher priority thread, LWKT threads are not designed for 1016 * continuous priority changes. Yield if you want to switch. 1017 * 1018 * We have to retain the critical section count which uses the high bits 1019 * of the td_pri field. The specified priority may also indicate zero or 1020 * more critical sections by adding TDPRI_CRIT*N. 1021 * 1022 * Note that we requeue the thread whether it winds up on a different runq 1023 * or not. uio_yield() depends on this and the routine is not normally 1024 * called with the same priority otherwise. 1025 */ 1026 void 1027 lwkt_setpri(thread_t td, int pri) 1028 { 1029 KKASSERT(pri >= 0); 1030 KKASSERT(td->td_gd == mycpu); 1031 crit_enter(); 1032 if (td->td_flags & TDF_RUNQ) { 1033 _lwkt_dequeue(td); 1034 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1035 _lwkt_enqueue(td); 1036 } else { 1037 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1038 } 1039 crit_exit(); 1040 } 1041 1042 void 1043 lwkt_setpri_self(int pri) 1044 { 1045 thread_t td = curthread; 1046 1047 KKASSERT(pri >= 0 && pri <= TDPRI_MAX); 1048 crit_enter(); 1049 if (td->td_flags & TDF_RUNQ) { 1050 _lwkt_dequeue(td); 1051 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1052 _lwkt_enqueue(td); 1053 } else { 1054 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1055 } 1056 crit_exit(); 1057 } 1058 1059 /* 1060 * Determine if there is a runnable thread at a higher priority then 1061 * the current thread. lwkt_setpri() does not check this automatically. 1062 * Return 1 if there is, 0 if there isn't. 1063 * 1064 * Example: if bit 31 of runqmask is set and the current thread is priority 1065 * 30, then we wind up checking the mask: 0x80000000 against 0x7fffffff. 1066 * 1067 * If nq reaches 31 the shift operation will overflow to 0 and we will wind 1068 * up comparing against 0xffffffff, a comparison that will always be false. 1069 */ 1070 int 1071 lwkt_checkpri_self(void) 1072 { 1073 globaldata_t gd = mycpu; 1074 thread_t td = gd->gd_curthread; 1075 int nq = td->td_pri & TDPRI_MASK; 1076 1077 while (gd->gd_runqmask > (__uint32_t)(2 << nq) - 1) { 1078 if (TAILQ_FIRST(&gd->gd_tdrunq[nq + 1])) 1079 return(1); 1080 ++nq; 1081 } 1082 return(0); 1083 } 1084 1085 /* 1086 * Migrate the current thread to the specified cpu. The BGL must be held 1087 * (for the gd_tdallq manipulation XXX). This is accomplished by 1088 * descheduling ourselves from the current cpu, moving our thread to the 1089 * tdallq of the target cpu, IPI messaging the target cpu, and switching out. 1090 * TDF_MIGRATING prevents scheduling races while the thread is being migrated. 1091 */ 1092 #ifdef SMP 1093 static void lwkt_setcpu_remote(void *arg); 1094 #endif 1095 1096 void 1097 lwkt_setcpu_self(globaldata_t rgd) 1098 { 1099 #ifdef SMP 1100 thread_t td = curthread; 1101 1102 if (td->td_gd != rgd) { 1103 crit_enter_quick(td); 1104 td->td_flags |= TDF_MIGRATING; 1105 lwkt_deschedule_self(td); 1106 TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq); /* protected by BGL */ 1107 TAILQ_INSERT_TAIL(&rgd->gd_tdallq, td, td_allq); /* protected by BGL */ 1108 lwkt_send_ipiq(rgd, (ipifunc_t)lwkt_setcpu_remote, td); 1109 lwkt_switch(); 1110 /* we are now on the target cpu */ 1111 crit_exit_quick(td); 1112 } 1113 #endif 1114 } 1115 1116 /* 1117 * Remote IPI for cpu migration (called while in a critical section so we 1118 * do not have to enter another one). The thread has already been moved to 1119 * our cpu's allq, but we must wait for the thread to be completely switched 1120 * out on the originating cpu before we schedule it on ours or the stack 1121 * state may be corrupt. We clear TDF_MIGRATING after flushing the GD 1122 * change to main memory. 1123 * 1124 * XXX The use of TDF_MIGRATING might not be sufficient to avoid races 1125 * against wakeups. It is best if this interface is used only when there 1126 * are no pending events that might try to schedule the thread. 1127 */ 1128 #ifdef SMP 1129 static void 1130 lwkt_setcpu_remote(void *arg) 1131 { 1132 thread_t td = arg; 1133 globaldata_t gd = mycpu; 1134 1135 while (td->td_flags & TDF_RUNNING) 1136 cpu_mb1(); 1137 td->td_gd = gd; 1138 cpu_mb2(); 1139 td->td_flags &= ~TDF_MIGRATING; 1140 _lwkt_enqueue(td); 1141 } 1142 #endif 1143 1144 struct proc * 1145 lwkt_preempted_proc(void) 1146 { 1147 thread_t td = curthread; 1148 while (td->td_preempted) 1149 td = td->td_preempted; 1150 return(td->td_proc); 1151 } 1152 1153 /* 1154 * Block on the specified wait queue until signaled. A generation number 1155 * must be supplied to interlock the wait queue. The function will 1156 * return immediately if the generation number does not match the wait 1157 * structure's generation number. 1158 */ 1159 void 1160 lwkt_block(lwkt_wait_t w, const char *wmesg, int *gen) 1161 { 1162 thread_t td = curthread; 1163 lwkt_tokref ilock; 1164 1165 lwkt_gettoken(&ilock, &w->wa_token); 1166 crit_enter(); 1167 if (w->wa_gen == *gen) { 1168 _lwkt_dequeue(td); 1169 TAILQ_INSERT_TAIL(&w->wa_waitq, td, td_threadq); 1170 ++w->wa_count; 1171 td->td_wait = w; 1172 td->td_wmesg = wmesg; 1173 again: 1174 lwkt_switch(); 1175 if (td->td_wmesg != NULL) { 1176 _lwkt_dequeue(td); 1177 goto again; 1178 } 1179 } 1180 crit_exit(); 1181 *gen = w->wa_gen; 1182 lwkt_reltoken(&ilock); 1183 } 1184 1185 /* 1186 * Signal a wait queue. We gain ownership of the wait queue in order to 1187 * signal it. Once a thread is removed from the wait queue we have to 1188 * deal with the cpu owning the thread. 1189 * 1190 * Note: alternatively we could message the target cpu owning the wait 1191 * queue. YYY implement as sysctl. 1192 */ 1193 void 1194 lwkt_signal(lwkt_wait_t w, int count) 1195 { 1196 thread_t td; 1197 lwkt_tokref ilock; 1198 1199 lwkt_gettoken(&ilock, &w->wa_token); 1200 ++w->wa_gen; 1201 crit_enter(); 1202 if (count < 0) 1203 count = w->wa_count; 1204 while ((td = TAILQ_FIRST(&w->wa_waitq)) != NULL && count) { 1205 --count; 1206 --w->wa_count; 1207 TAILQ_REMOVE(&w->wa_waitq, td, td_threadq); 1208 td->td_wait = NULL; 1209 td->td_wmesg = NULL; 1210 if (td->td_gd == mycpu) { 1211 _lwkt_enqueue(td); 1212 } else { 1213 lwkt_send_ipiq(td->td_gd, (ipifunc_t)lwkt_schedule, td); 1214 } 1215 } 1216 crit_exit(); 1217 lwkt_reltoken(&ilock); 1218 } 1219 1220 /* 1221 * Create a kernel process/thread/whatever. It shares it's address space 1222 * with proc0 - ie: kernel only. 1223 * 1224 * NOTE! By default new threads are created with the MP lock held. A 1225 * thread which does not require the MP lock should release it by calling 1226 * rel_mplock() at the start of the new thread. 1227 */ 1228 int 1229 lwkt_create(void (*func)(void *), void *arg, 1230 struct thread **tdp, thread_t template, int tdflags, int cpu, 1231 const char *fmt, ...) 1232 { 1233 thread_t td; 1234 __va_list ap; 1235 1236 td = lwkt_alloc_thread(template, LWKT_THREAD_STACK, cpu); 1237 if (tdp) 1238 *tdp = td; 1239 cpu_set_thread_handler(td, lwkt_exit, func, arg); 1240 td->td_flags |= TDF_VERBOSE | tdflags; 1241 #ifdef SMP 1242 td->td_mpcount = 1; 1243 #endif 1244 1245 /* 1246 * Set up arg0 for 'ps' etc 1247 */ 1248 __va_start(ap, fmt); 1249 vsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap); 1250 __va_end(ap); 1251 1252 /* 1253 * Schedule the thread to run 1254 */ 1255 if ((td->td_flags & TDF_STOPREQ) == 0) 1256 lwkt_schedule(td); 1257 else 1258 td->td_flags &= ~TDF_STOPREQ; 1259 return 0; 1260 } 1261 1262 /* 1263 * kthread_* is specific to the kernel and is not needed by userland. 1264 */ 1265 #ifdef _KERNEL 1266 1267 /* 1268 * Destroy an LWKT thread. Warning! This function is not called when 1269 * a process exits, cpu_proc_exit() directly calls cpu_thread_exit() and 1270 * uses a different reaping mechanism. 1271 */ 1272 void 1273 lwkt_exit(void) 1274 { 1275 thread_t td = curthread; 1276 globaldata_t gd; 1277 1278 if (td->td_flags & TDF_VERBOSE) 1279 printf("kthread %p %s has exited\n", td, td->td_comm); 1280 caps_exit(td); 1281 crit_enter_quick(td); 1282 lwkt_deschedule_self(td); 1283 gd = mycpu; 1284 KKASSERT(gd == td->td_gd); 1285 TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq); 1286 if (td->td_flags & TDF_ALLOCATED_THREAD) { 1287 ++gd->gd_tdfreecount; 1288 TAILQ_INSERT_TAIL(&gd->gd_tdfreeq, td, td_threadq); 1289 } 1290 cpu_thread_exit(); 1291 } 1292 1293 #endif /* _KERNEL */ 1294 1295 void 1296 crit_panic(void) 1297 { 1298 thread_t td = curthread; 1299 int lpri = td->td_pri; 1300 1301 td->td_pri = 0; 1302 panic("td_pri is/would-go negative! %p %d", td, lpri); 1303 } 1304 1305