1 /* 2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $DragonFly: src/sys/kern/lwkt_thread.c,v 1.56 2004/03/01 06:33:17 dillon Exp $ 27 */ 28 29 /* 30 * Each cpu in a system has its own self-contained light weight kernel 31 * thread scheduler, which means that generally speaking we only need 32 * to use a critical section to avoid problems. Foreign thread 33 * scheduling is queued via (async) IPIs. 34 */ 35 36 #ifdef _KERNEL 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/kernel.h> 41 #include <sys/proc.h> 42 #include <sys/rtprio.h> 43 #include <sys/queue.h> 44 #include <sys/thread2.h> 45 #include <sys/sysctl.h> 46 #include <sys/kthread.h> 47 #include <machine/cpu.h> 48 #include <sys/lock.h> 49 #include <sys/caps.h> 50 51 #include <vm/vm.h> 52 #include <vm/vm_param.h> 53 #include <vm/vm_kern.h> 54 #include <vm/vm_object.h> 55 #include <vm/vm_page.h> 56 #include <vm/vm_map.h> 57 #include <vm/vm_pager.h> 58 #include <vm/vm_extern.h> 59 #include <vm/vm_zone.h> 60 61 #include <machine/stdarg.h> 62 #include <machine/ipl.h> 63 #include <machine/smp.h> 64 65 #define THREAD_STACK (UPAGES * PAGE_SIZE) 66 67 #else 68 69 #include <sys/stdint.h> 70 #include <libcaps/thread.h> 71 #include <sys/thread.h> 72 #include <sys/msgport.h> 73 #include <sys/errno.h> 74 #include <libcaps/globaldata.h> 75 #include <sys/thread2.h> 76 #include <sys/msgport2.h> 77 #include <stdio.h> 78 #include <stdlib.h> 79 #include <string.h> 80 #include <machine/cpufunc.h> 81 #include <machine/lock.h> 82 83 #endif 84 85 static int untimely_switch = 0; 86 #ifdef INVARIANTS 87 static int panic_on_cscount = 0; 88 #endif 89 static __int64_t switch_count = 0; 90 static __int64_t preempt_hit = 0; 91 static __int64_t preempt_miss = 0; 92 static __int64_t preempt_weird = 0; 93 94 #ifdef _KERNEL 95 96 SYSCTL_INT(_lwkt, OID_AUTO, untimely_switch, CTLFLAG_RW, &untimely_switch, 0, ""); 97 #ifdef INVARIANTS 98 SYSCTL_INT(_lwkt, OID_AUTO, panic_on_cscount, CTLFLAG_RW, &panic_on_cscount, 0, ""); 99 #endif 100 SYSCTL_QUAD(_lwkt, OID_AUTO, switch_count, CTLFLAG_RW, &switch_count, 0, ""); 101 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_hit, CTLFLAG_RW, &preempt_hit, 0, ""); 102 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_miss, CTLFLAG_RW, &preempt_miss, 0, ""); 103 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_weird, CTLFLAG_RW, &preempt_weird, 0, ""); 104 105 #endif 106 107 /* 108 * These helper procedures handle the runq, they can only be called from 109 * within a critical section. 110 * 111 * WARNING! Prior to SMP being brought up it is possible to enqueue and 112 * dequeue threads belonging to other cpus, so be sure to use td->td_gd 113 * instead of 'mycpu' when referencing the globaldata structure. Once 114 * SMP live enqueuing and dequeueing only occurs on the current cpu. 115 */ 116 static __inline 117 void 118 _lwkt_dequeue(thread_t td) 119 { 120 if (td->td_flags & TDF_RUNQ) { 121 int nq = td->td_pri & TDPRI_MASK; 122 struct globaldata *gd = td->td_gd; 123 124 td->td_flags &= ~TDF_RUNQ; 125 TAILQ_REMOVE(&gd->gd_tdrunq[nq], td, td_threadq); 126 /* runqmask is passively cleaned up by the switcher */ 127 } 128 } 129 130 static __inline 131 void 132 _lwkt_enqueue(thread_t td) 133 { 134 if ((td->td_flags & TDF_RUNQ) == 0) { 135 int nq = td->td_pri & TDPRI_MASK; 136 struct globaldata *gd = td->td_gd; 137 138 td->td_flags |= TDF_RUNQ; 139 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], td, td_threadq); 140 gd->gd_runqmask |= 1 << nq; 141 } 142 } 143 144 static __inline 145 int 146 _lwkt_wantresched(thread_t ntd, thread_t cur) 147 { 148 return((ntd->td_pri & TDPRI_MASK) > (cur->td_pri & TDPRI_MASK)); 149 } 150 151 #ifdef _KERNEL 152 153 /* 154 * LWKTs operate on a per-cpu basis 155 * 156 * WARNING! Called from early boot, 'mycpu' may not work yet. 157 */ 158 void 159 lwkt_gdinit(struct globaldata *gd) 160 { 161 int i; 162 163 for (i = 0; i < sizeof(gd->gd_tdrunq)/sizeof(gd->gd_tdrunq[0]); ++i) 164 TAILQ_INIT(&gd->gd_tdrunq[i]); 165 gd->gd_runqmask = 0; 166 TAILQ_INIT(&gd->gd_tdallq); 167 } 168 169 #endif /* _KERNEL */ 170 171 /* 172 * Initialize a thread wait structure prior to first use. 173 * 174 * NOTE! called from low level boot code, we cannot do anything fancy! 175 */ 176 void 177 lwkt_wait_init(lwkt_wait_t w) 178 { 179 lwkt_token_init(&w->wa_token); 180 TAILQ_INIT(&w->wa_waitq); 181 w->wa_gen = 0; 182 w->wa_count = 0; 183 } 184 185 /* 186 * Create a new thread. The thread must be associated with a process context 187 * or LWKT start address before it can be scheduled. If the target cpu is 188 * -1 the thread will be created on the current cpu. 189 * 190 * If you intend to create a thread without a process context this function 191 * does everything except load the startup and switcher function. 192 */ 193 thread_t 194 lwkt_alloc_thread(struct thread *td, int cpu) 195 { 196 void *stack; 197 int flags = 0; 198 199 if (td == NULL) { 200 crit_enter(); 201 if (mycpu->gd_tdfreecount > 0) { 202 --mycpu->gd_tdfreecount; 203 td = TAILQ_FIRST(&mycpu->gd_tdfreeq); 204 KASSERT(td != NULL && (td->td_flags & TDF_RUNNING) == 0, 205 ("lwkt_alloc_thread: unexpected NULL or corrupted td")); 206 TAILQ_REMOVE(&mycpu->gd_tdfreeq, td, td_threadq); 207 crit_exit(); 208 stack = td->td_kstack; 209 flags = td->td_flags & (TDF_ALLOCATED_STACK|TDF_ALLOCATED_THREAD); 210 } else { 211 crit_exit(); 212 #ifdef _KERNEL 213 td = zalloc(thread_zone); 214 #else 215 td = malloc(sizeof(struct thread)); 216 #endif 217 td->td_kstack = NULL; 218 flags |= TDF_ALLOCATED_THREAD; 219 } 220 } 221 if ((stack = td->td_kstack) == NULL) { 222 #ifdef _KERNEL 223 stack = (void *)kmem_alloc(kernel_map, THREAD_STACK); 224 #else 225 stack = libcaps_alloc_stack(THREAD_STACK); 226 #endif 227 flags |= TDF_ALLOCATED_STACK; 228 } 229 if (cpu < 0) 230 lwkt_init_thread(td, stack, flags, mycpu); 231 else 232 lwkt_init_thread(td, stack, flags, globaldata_find(cpu)); 233 return(td); 234 } 235 236 #ifdef _KERNEL 237 238 /* 239 * Initialize a preexisting thread structure. This function is used by 240 * lwkt_alloc_thread() and also used to initialize the per-cpu idlethread. 241 * 242 * All threads start out in a critical section at a priority of 243 * TDPRI_KERN_DAEMON. Higher level code will modify the priority as 244 * appropriate. This function may send an IPI message when the 245 * requested cpu is not the current cpu and consequently gd_tdallq may 246 * not be initialized synchronously from the point of view of the originating 247 * cpu. 248 * 249 * NOTE! we have to be careful in regards to creating threads for other cpus 250 * if SMP has not yet been activated. 251 */ 252 #ifdef SMP 253 254 static void 255 lwkt_init_thread_remote(void *arg) 256 { 257 thread_t td = arg; 258 259 TAILQ_INSERT_TAIL(&td->td_gd->gd_tdallq, td, td_allq); 260 } 261 262 #endif 263 264 void 265 lwkt_init_thread(thread_t td, void *stack, int flags, struct globaldata *gd) 266 { 267 bzero(td, sizeof(struct thread)); 268 td->td_kstack = stack; 269 td->td_flags |= flags; 270 td->td_gd = gd; 271 td->td_pri = TDPRI_KERN_DAEMON + TDPRI_CRIT; 272 lwkt_initport(&td->td_msgport, td); 273 pmap_init_thread(td); 274 #ifdef SMP 275 if (gd == mycpu) { 276 crit_enter(); 277 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq); 278 crit_exit(); 279 } else { 280 lwkt_send_ipiq(gd, lwkt_init_thread_remote, td); 281 } 282 #else 283 crit_enter(); 284 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq); 285 crit_exit(); 286 #endif 287 } 288 289 #endif /* _KERNEL */ 290 291 void 292 lwkt_set_comm(thread_t td, const char *ctl, ...) 293 { 294 __va_list va; 295 296 __va_start(va, ctl); 297 vsnprintf(td->td_comm, sizeof(td->td_comm), ctl, va); 298 __va_end(va); 299 } 300 301 void 302 lwkt_hold(thread_t td) 303 { 304 ++td->td_refs; 305 } 306 307 void 308 lwkt_rele(thread_t td) 309 { 310 KKASSERT(td->td_refs > 0); 311 --td->td_refs; 312 } 313 314 #ifdef _KERNEL 315 316 void 317 lwkt_wait_free(thread_t td) 318 { 319 while (td->td_refs) 320 tsleep(td, 0, "tdreap", hz); 321 } 322 323 #endif 324 325 void 326 lwkt_free_thread(thread_t td) 327 { 328 struct globaldata *gd = mycpu; 329 330 KASSERT((td->td_flags & TDF_RUNNING) == 0, 331 ("lwkt_free_thread: did not exit! %p", td)); 332 333 crit_enter(); 334 TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq); 335 if (gd->gd_tdfreecount < CACHE_NTHREADS && 336 (td->td_flags & TDF_ALLOCATED_THREAD) 337 ) { 338 ++gd->gd_tdfreecount; 339 TAILQ_INSERT_HEAD(&gd->gd_tdfreeq, td, td_threadq); 340 crit_exit(); 341 } else { 342 crit_exit(); 343 if (td->td_kstack && (td->td_flags & TDF_ALLOCATED_STACK)) { 344 #ifdef _KERNEL 345 kmem_free(kernel_map, (vm_offset_t)td->td_kstack, THREAD_STACK); 346 #else 347 libcaps_free_stack(td->td_kstack, THREAD_STACK); 348 #endif 349 /* gd invalid */ 350 td->td_kstack = NULL; 351 } 352 if (td->td_flags & TDF_ALLOCATED_THREAD) { 353 #ifdef _KERNEL 354 zfree(thread_zone, td); 355 #else 356 free(td); 357 #endif 358 } 359 } 360 } 361 362 363 /* 364 * Switch to the next runnable lwkt. If no LWKTs are runnable then 365 * switch to the idlethread. Switching must occur within a critical 366 * section to avoid races with the scheduling queue. 367 * 368 * We always have full control over our cpu's run queue. Other cpus 369 * that wish to manipulate our queue must use the cpu_*msg() calls to 370 * talk to our cpu, so a critical section is all that is needed and 371 * the result is very, very fast thread switching. 372 * 373 * The LWKT scheduler uses a fixed priority model and round-robins at 374 * each priority level. User process scheduling is a totally 375 * different beast and LWKT priorities should not be confused with 376 * user process priorities. 377 * 378 * The MP lock may be out of sync with the thread's td_mpcount. lwkt_switch() 379 * cleans it up. Note that the td_switch() function cannot do anything that 380 * requires the MP lock since the MP lock will have already been setup for 381 * the target thread (not the current thread). It's nice to have a scheduler 382 * that does not need the MP lock to work because it allows us to do some 383 * really cool high-performance MP lock optimizations. 384 */ 385 386 void 387 lwkt_switch(void) 388 { 389 globaldata_t gd; 390 thread_t td = curthread; 391 thread_t ntd; 392 #ifdef SMP 393 int mpheld; 394 #endif 395 396 /* 397 * Switching from within a 'fast' (non thread switched) interrupt is 398 * illegal. 399 */ 400 if (mycpu->gd_intr_nesting_level && panicstr == NULL) { 401 panic("lwkt_switch: cannot switch from within a fast interrupt, yet\n"); 402 } 403 404 /* 405 * Passive release (used to transition from user to kernel mode 406 * when we block or switch rather then when we enter the kernel). 407 * This function is NOT called if we are switching into a preemption 408 * or returning from a preemption. Typically this causes us to lose 409 * our P_CURPROC designation (if we have one) and become a true LWKT 410 * thread, and may also hand P_CURPROC to another process and schedule 411 * its thread. 412 */ 413 if (td->td_release) 414 td->td_release(td); 415 416 crit_enter(); 417 ++switch_count; 418 419 #ifdef SMP 420 /* 421 * td_mpcount cannot be used to determine if we currently hold the 422 * MP lock because get_mplock() will increment it prior to attempting 423 * to get the lock, and switch out if it can't. Our ownership of 424 * the actual lock will remain stable while we are in a critical section 425 * (but, of course, another cpu may own or release the lock so the 426 * actual value of mp_lock is not stable). 427 */ 428 mpheld = MP_LOCK_HELD(); 429 #ifdef INVARIANTS 430 if (td->td_cscount) { 431 printf("Diagnostic: attempt to switch while mastering cpusync: %p\n", 432 td); 433 if (panic_on_cscount) 434 panic("switching while mastering cpusync"); 435 } 436 #endif 437 #endif 438 if ((ntd = td->td_preempted) != NULL) { 439 /* 440 * We had preempted another thread on this cpu, resume the preempted 441 * thread. This occurs transparently, whether the preempted thread 442 * was scheduled or not (it may have been preempted after descheduling 443 * itself). 444 * 445 * We have to setup the MP lock for the original thread after backing 446 * out the adjustment that was made to curthread when the original 447 * was preempted. 448 */ 449 KKASSERT(ntd->td_flags & TDF_PREEMPT_LOCK); 450 #ifdef SMP 451 if (ntd->td_mpcount && mpheld == 0) { 452 panic("MPLOCK NOT HELD ON RETURN: %p %p %d %d\n", 453 td, ntd, td->td_mpcount, ntd->td_mpcount); 454 } 455 if (ntd->td_mpcount) { 456 td->td_mpcount -= ntd->td_mpcount; 457 KKASSERT(td->td_mpcount >= 0); 458 } 459 #endif 460 ntd->td_flags |= TDF_PREEMPT_DONE; 461 /* YYY release mp lock on switchback if original doesn't need it */ 462 } else { 463 /* 464 * Priority queue / round-robin at each priority. Note that user 465 * processes run at a fixed, low priority and the user process 466 * scheduler deals with interactions between user processes 467 * by scheduling and descheduling them from the LWKT queue as 468 * necessary. 469 * 470 * We have to adjust the MP lock for the target thread. If we 471 * need the MP lock and cannot obtain it we try to locate a 472 * thread that does not need the MP lock. If we cannot, we spin 473 * instead of HLT. 474 * 475 * A similar issue exists for the tokens held by the target thread. 476 * If we cannot obtain ownership of the tokens we cannot immediately 477 * schedule the thread. 478 */ 479 480 /* 481 * We are switching threads. If there are any pending requests for 482 * tokens we can satisfy all of them here. 483 */ 484 gd = mycpu; 485 #ifdef SMP 486 if (gd->gd_tokreqbase) 487 lwkt_drain_token_requests(); 488 #endif 489 490 again: 491 if (gd->gd_runqmask) { 492 int nq = bsrl(gd->gd_runqmask); 493 if ((ntd = TAILQ_FIRST(&gd->gd_tdrunq[nq])) == NULL) { 494 gd->gd_runqmask &= ~(1 << nq); 495 goto again; 496 } 497 #ifdef SMP 498 /* 499 * If the target needs the MP lock and we couldn't get it, 500 * or if the target is holding tokens and we could not 501 * gain ownership of the tokens, continue looking for a 502 * thread to schedule and spin instead of HLT if we can't. 503 */ 504 if ((ntd->td_mpcount && mpheld == 0 && !cpu_try_mplock()) || 505 (ntd->td_toks && lwkt_chktokens(ntd) == 0) 506 ) { 507 u_int32_t rqmask = gd->gd_runqmask; 508 while (rqmask) { 509 TAILQ_FOREACH(ntd, &gd->gd_tdrunq[nq], td_threadq) { 510 if (ntd->td_mpcount && !mpheld && !cpu_try_mplock()) 511 continue; 512 mpheld = MP_LOCK_HELD(); 513 if (ntd->td_toks && !lwkt_chktokens(ntd)) 514 continue; 515 break; 516 } 517 if (ntd) 518 break; 519 rqmask &= ~(1 << nq); 520 nq = bsrl(rqmask); 521 } 522 if (ntd == NULL) { 523 ntd = &gd->gd_idlethread; 524 ntd->td_flags |= TDF_IDLE_NOHLT; 525 } else { 526 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); 527 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); 528 } 529 } else { 530 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); 531 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); 532 } 533 #else 534 TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq); 535 TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq); 536 #endif 537 } else { 538 /* 539 * We have nothing to run but only let the idle loop halt 540 * the cpu if there are no pending interrupts. 541 */ 542 ntd = &gd->gd_idlethread; 543 if (gd->gd_reqflags & RQF_IDLECHECK_MASK) 544 ntd->td_flags |= TDF_IDLE_NOHLT; 545 } 546 } 547 KASSERT(ntd->td_pri >= TDPRI_CRIT, 548 ("priority problem in lwkt_switch %d %d", td->td_pri, ntd->td_pri)); 549 550 /* 551 * Do the actual switch. If the new target does not need the MP lock 552 * and we are holding it, release the MP lock. If the new target requires 553 * the MP lock we have already acquired it for the target. 554 */ 555 #ifdef SMP 556 if (ntd->td_mpcount == 0 ) { 557 if (MP_LOCK_HELD()) 558 cpu_rel_mplock(); 559 } else { 560 ASSERT_MP_LOCK_HELD(); 561 } 562 #endif 563 if (td != ntd) { 564 td->td_switch(ntd); 565 } 566 567 crit_exit(); 568 } 569 570 /* 571 * Switch if another thread has a higher priority. Do not switch to other 572 * threads at the same priority. 573 */ 574 void 575 lwkt_maybe_switch() 576 { 577 struct globaldata *gd = mycpu; 578 struct thread *td = gd->gd_curthread; 579 580 if ((td->td_pri & TDPRI_MASK) < bsrl(gd->gd_runqmask)) { 581 lwkt_switch(); 582 } 583 } 584 585 /* 586 * Request that the target thread preempt the current thread. Preemption 587 * only works under a specific set of conditions: 588 * 589 * - We are not preempting ourselves 590 * - The target thread is owned by the current cpu 591 * - We are not currently being preempted 592 * - The target is not currently being preempted 593 * - We are able to satisfy the target's MP lock requirements (if any). 594 * 595 * THE CALLER OF LWKT_PREEMPT() MUST BE IN A CRITICAL SECTION. Typically 596 * this is called via lwkt_schedule() through the td_preemptable callback. 597 * critpri is the managed critical priority that we should ignore in order 598 * to determine whether preemption is possible (aka usually just the crit 599 * priority of lwkt_schedule() itself). 600 * 601 * XXX at the moment we run the target thread in a critical section during 602 * the preemption in order to prevent the target from taking interrupts 603 * that *WE* can't. Preemption is strictly limited to interrupt threads 604 * and interrupt-like threads, outside of a critical section, and the 605 * preempted source thread will be resumed the instant the target blocks 606 * whether or not the source is scheduled (i.e. preemption is supposed to 607 * be as transparent as possible). 608 * 609 * The target thread inherits our MP count (added to its own) for the 610 * duration of the preemption in order to preserve the atomicy of the 611 * MP lock during the preemption. Therefore, any preempting targets must be 612 * careful in regards to MP assertions. Note that the MP count may be 613 * out of sync with the physical mp_lock, but we do not have to preserve 614 * the original ownership of the lock if it was out of synch (that is, we 615 * can leave it synchronized on return). 616 */ 617 void 618 lwkt_preempt(thread_t ntd, int critpri) 619 { 620 struct globaldata *gd = mycpu; 621 thread_t td = gd->gd_curthread; 622 #ifdef SMP 623 int mpheld; 624 int savecnt; 625 #endif 626 627 /* 628 * The caller has put us in a critical section. We can only preempt 629 * if the caller of the caller was not in a critical section (basically 630 * a local interrupt), as determined by the 'critpri' parameter. If 631 * we are unable to preempt 632 * 633 * YYY The target thread must be in a critical section (else it must 634 * inherit our critical section? I dunno yet). 635 * 636 * Any tokens held by the target may not be held by thread(s) being 637 * preempted. We take the easy way out and do not preempt if 638 * the target is holding tokens. 639 */ 640 KASSERT(ntd->td_pri >= TDPRI_CRIT, ("BADCRIT0 %d", ntd->td_pri)); 641 642 need_resched(); 643 if (!_lwkt_wantresched(ntd, td)) { 644 ++preempt_miss; 645 return; 646 } 647 if ((td->td_pri & ~TDPRI_MASK) > critpri) { 648 ++preempt_miss; 649 return; 650 } 651 #ifdef SMP 652 if (ntd->td_gd != gd) { 653 ++preempt_miss; 654 return; 655 } 656 #endif 657 /* 658 * Take the easy way out and do not preempt if the target is holding 659 * one or more tokens. We could test whether the thread(s) being 660 * preempted interlock against the target thread's tokens and whether 661 * we can get all the target thread's tokens, but this situation 662 * should not occur very often so its easier to simply not preempt. 663 */ 664 if (ntd->td_toks != NULL) { 665 ++preempt_miss; 666 return; 667 } 668 if (td == ntd || ((td->td_flags | ntd->td_flags) & TDF_PREEMPT_LOCK)) { 669 ++preempt_weird; 670 return; 671 } 672 if (ntd->td_preempted) { 673 ++preempt_hit; 674 return; 675 } 676 #ifdef SMP 677 /* 678 * note: an interrupt might have occured just as we were transitioning 679 * to or from the MP lock. In this case td_mpcount will be pre-disposed 680 * (non-zero) but not actually synchronized with the actual state of the 681 * lock. We can use it to imply an MP lock requirement for the 682 * preemption but we cannot use it to test whether we hold the MP lock 683 * or not. 684 */ 685 savecnt = td->td_mpcount; 686 mpheld = MP_LOCK_HELD(); 687 ntd->td_mpcount += td->td_mpcount; 688 if (mpheld == 0 && ntd->td_mpcount && !cpu_try_mplock()) { 689 ntd->td_mpcount -= td->td_mpcount; 690 ++preempt_miss; 691 return; 692 } 693 #endif 694 695 ++preempt_hit; 696 ntd->td_preempted = td; 697 td->td_flags |= TDF_PREEMPT_LOCK; 698 td->td_switch(ntd); 699 KKASSERT(ntd->td_preempted && (td->td_flags & TDF_PREEMPT_DONE)); 700 #ifdef SMP 701 KKASSERT(savecnt == td->td_mpcount); 702 mpheld = MP_LOCK_HELD(); 703 if (mpheld && td->td_mpcount == 0) 704 cpu_rel_mplock(); 705 else if (mpheld == 0 && td->td_mpcount) 706 panic("lwkt_preempt(): MP lock was not held through"); 707 #endif 708 ntd->td_preempted = NULL; 709 td->td_flags &= ~(TDF_PREEMPT_LOCK|TDF_PREEMPT_DONE); 710 } 711 712 /* 713 * Yield our thread while higher priority threads are pending. This is 714 * typically called when we leave a critical section but it can be safely 715 * called while we are in a critical section. 716 * 717 * This function will not generally yield to equal priority threads but it 718 * can occur as a side effect. Note that lwkt_switch() is called from 719 * inside the critical section to prevent its own crit_exit() from reentering 720 * lwkt_yield_quick(). 721 * 722 * gd_reqflags indicates that *something* changed, e.g. an interrupt or softint 723 * came along but was blocked and made pending. 724 * 725 * (self contained on a per cpu basis) 726 */ 727 void 728 lwkt_yield_quick(void) 729 { 730 globaldata_t gd = mycpu; 731 thread_t td = gd->gd_curthread; 732 733 /* 734 * gd_reqflags is cleared in splz if the cpl is 0. If we were to clear 735 * it with a non-zero cpl then we might not wind up calling splz after 736 * a task switch when the critical section is exited even though the 737 * new task could accept the interrupt. 738 * 739 * XXX from crit_exit() only called after last crit section is released. 740 * If called directly will run splz() even if in a critical section. 741 * 742 * td_nest_count prevent deep nesting via splz() or doreti(). Note that 743 * except for this special case, we MUST call splz() here to handle any 744 * pending ints, particularly after we switch, or we might accidently 745 * halt the cpu with interrupts pending. 746 */ 747 if (gd->gd_reqflags && td->td_nest_count < 2) 748 splz(); 749 750 /* 751 * YYY enabling will cause wakeup() to task-switch, which really 752 * confused the old 4.x code. This is a good way to simulate 753 * preemption and MP without actually doing preemption or MP, because a 754 * lot of code assumes that wakeup() does not block. 755 */ 756 if (untimely_switch && td->td_nest_count == 0 && 757 gd->gd_intr_nesting_level == 0 758 ) { 759 crit_enter(); 760 /* 761 * YYY temporary hacks until we disassociate the userland scheduler 762 * from the LWKT scheduler. 763 */ 764 if (td->td_flags & TDF_RUNQ) { 765 lwkt_switch(); /* will not reenter yield function */ 766 } else { 767 lwkt_schedule_self(); /* make sure we are scheduled */ 768 lwkt_switch(); /* will not reenter yield function */ 769 lwkt_deschedule_self(); /* make sure we are descheduled */ 770 } 771 crit_exit_noyield(td); 772 } 773 } 774 775 /* 776 * This implements a normal yield which, unlike _quick, will yield to equal 777 * priority threads as well. Note that gd_reqflags tests will be handled by 778 * the crit_exit() call in lwkt_switch(). 779 * 780 * (self contained on a per cpu basis) 781 */ 782 void 783 lwkt_yield(void) 784 { 785 lwkt_schedule_self(); 786 lwkt_switch(); 787 } 788 789 /* 790 * Schedule a thread to run. As the current thread we can always safely 791 * schedule ourselves, and a shortcut procedure is provided for that 792 * function. 793 * 794 * (non-blocking, self contained on a per cpu basis) 795 */ 796 void 797 lwkt_schedule_self(void) 798 { 799 thread_t td = curthread; 800 801 crit_enter_quick(td); 802 KASSERT(td->td_wait == NULL, ("lwkt_schedule_self(): td_wait not NULL!")); 803 KASSERT(td != &td->td_gd->gd_idlethread, ("lwkt_schedule_self(): scheduling gd_idlethread is illegal!")); 804 _lwkt_enqueue(td); 805 #ifdef _KERNEL 806 if (td->td_proc && td->td_proc->p_stat == SSLEEP) 807 panic("SCHED SELF PANIC"); 808 #endif 809 crit_exit_quick(td); 810 } 811 812 /* 813 * Generic schedule. Possibly schedule threads belonging to other cpus and 814 * deal with threads that might be blocked on a wait queue. 815 * 816 * YYY this is one of the best places to implement load balancing code. 817 * Load balancing can be accomplished by requesting other sorts of actions 818 * for the thread in question. 819 */ 820 void 821 lwkt_schedule(thread_t td) 822 { 823 #ifdef INVARIANTS 824 KASSERT(td != &td->td_gd->gd_idlethread, ("lwkt_schedule(): scheduling gd_idlethread is illegal!")); 825 if ((td->td_flags & TDF_PREEMPT_LOCK) == 0 && td->td_proc 826 && td->td_proc->p_stat == SSLEEP 827 ) { 828 printf("PANIC schedule curtd = %p (%d %d) target %p (%d %d)\n", 829 curthread, 830 curthread->td_proc ? curthread->td_proc->p_pid : -1, 831 curthread->td_proc ? curthread->td_proc->p_stat : -1, 832 td, 833 td->td_proc ? curthread->td_proc->p_pid : -1, 834 td->td_proc ? curthread->td_proc->p_stat : -1 835 ); 836 panic("SCHED PANIC"); 837 } 838 #endif 839 crit_enter(); 840 if (td == curthread) { 841 _lwkt_enqueue(td); 842 } else { 843 lwkt_wait_t w; 844 845 /* 846 * If the thread is on a wait list we have to send our scheduling 847 * request to the owner of the wait structure. Otherwise we send 848 * the scheduling request to the cpu owning the thread. Races 849 * are ok, the target will forward the message as necessary (the 850 * message may chase the thread around before it finally gets 851 * acted upon). 852 * 853 * (remember, wait structures use stable storage) 854 */ 855 if ((w = td->td_wait) != NULL) { 856 lwkt_tokref wref; 857 858 if (lwkt_trytoken(&wref, &w->wa_token)) { 859 TAILQ_REMOVE(&w->wa_waitq, td, td_threadq); 860 --w->wa_count; 861 td->td_wait = NULL; 862 #ifdef SMP 863 if (td->td_gd == mycpu) { 864 _lwkt_enqueue(td); 865 if (td->td_preemptable) 866 td->td_preemptable(td, TDPRI_CRIT*2); /* YYY +token */ 867 else if (_lwkt_wantresched(td, curthread)) 868 need_resched(); 869 } else { 870 lwkt_send_ipiq(td->td_gd, (ipifunc_t)lwkt_schedule, td); 871 } 872 #else 873 _lwkt_enqueue(td); 874 if (td->td_preemptable) 875 td->td_preemptable(td, TDPRI_CRIT*2); /* YYY +token */ 876 else if (_lwkt_wantresched(td, curthread)) 877 need_resched(); 878 #endif 879 lwkt_reltoken(&wref); 880 } else { 881 lwkt_send_ipiq(w->wa_token.t_cpu, (ipifunc_t)lwkt_schedule, td); 882 } 883 } else { 884 /* 885 * If the wait structure is NULL and we own the thread, there 886 * is no race (since we are in a critical section). If we 887 * do not own the thread there might be a race but the 888 * target cpu will deal with it. 889 */ 890 #ifdef SMP 891 if (td->td_gd == mycpu) { 892 _lwkt_enqueue(td); 893 if (td->td_preemptable) { 894 td->td_preemptable(td, TDPRI_CRIT); 895 } else if (_lwkt_wantresched(td, curthread)) { 896 need_resched(); 897 } 898 } else { 899 lwkt_send_ipiq(td->td_gd, (ipifunc_t)lwkt_schedule, td); 900 } 901 #else 902 _lwkt_enqueue(td); 903 if (td->td_preemptable) { 904 td->td_preemptable(td, TDPRI_CRIT); 905 } else if (_lwkt_wantresched(td, curthread)) { 906 need_resched(); 907 } 908 #endif 909 } 910 } 911 crit_exit(); 912 } 913 914 /* 915 * Managed acquisition. This code assumes that the MP lock is held for 916 * the tdallq operation and that the thread has been descheduled from its 917 * original cpu. We also have to wait for the thread to be entirely switched 918 * out on its original cpu (this is usually fast enough that we never loop) 919 * since the LWKT system does not have to hold the MP lock while switching 920 * and the target may have released it before switching. 921 */ 922 void 923 lwkt_acquire(thread_t td) 924 { 925 struct globaldata *gd; 926 927 gd = td->td_gd; 928 KKASSERT((td->td_flags & TDF_RUNQ) == 0); 929 while (td->td_flags & TDF_RUNNING) /* XXX spin */ 930 ; 931 if (gd != mycpu) { 932 crit_enter(); 933 TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq); /* protected by BGL */ 934 gd = mycpu; 935 td->td_gd = gd; 936 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq); /* protected by BGL */ 937 crit_exit(); 938 } 939 } 940 941 /* 942 * Deschedule a thread. 943 * 944 * (non-blocking, self contained on a per cpu basis) 945 */ 946 void 947 lwkt_deschedule_self(void) 948 { 949 thread_t td = curthread; 950 951 crit_enter(); 952 KASSERT(td->td_wait == NULL, ("lwkt_schedule_self(): td_wait not NULL!")); 953 _lwkt_dequeue(td); 954 crit_exit(); 955 } 956 957 /* 958 * Generic deschedule. Descheduling threads other then your own should be 959 * done only in carefully controlled circumstances. Descheduling is 960 * asynchronous. 961 * 962 * This function may block if the cpu has run out of messages. 963 */ 964 void 965 lwkt_deschedule(thread_t td) 966 { 967 crit_enter(); 968 if (td == curthread) { 969 _lwkt_dequeue(td); 970 } else { 971 if (td->td_gd == mycpu) { 972 _lwkt_dequeue(td); 973 } else { 974 lwkt_send_ipiq(td->td_gd, (ipifunc_t)lwkt_deschedule, td); 975 } 976 } 977 crit_exit(); 978 } 979 980 /* 981 * Set the target thread's priority. This routine does not automatically 982 * switch to a higher priority thread, LWKT threads are not designed for 983 * continuous priority changes. Yield if you want to switch. 984 * 985 * We have to retain the critical section count which uses the high bits 986 * of the td_pri field. The specified priority may also indicate zero or 987 * more critical sections by adding TDPRI_CRIT*N. 988 */ 989 void 990 lwkt_setpri(thread_t td, int pri) 991 { 992 KKASSERT(pri >= 0); 993 KKASSERT(td->td_gd == mycpu); 994 crit_enter(); 995 if (td->td_flags & TDF_RUNQ) { 996 _lwkt_dequeue(td); 997 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 998 _lwkt_enqueue(td); 999 } else { 1000 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1001 } 1002 crit_exit(); 1003 } 1004 1005 void 1006 lwkt_setpri_self(int pri) 1007 { 1008 thread_t td = curthread; 1009 1010 KKASSERT(pri >= 0 && pri <= TDPRI_MAX); 1011 crit_enter(); 1012 if (td->td_flags & TDF_RUNQ) { 1013 _lwkt_dequeue(td); 1014 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1015 _lwkt_enqueue(td); 1016 } else { 1017 td->td_pri = (td->td_pri & ~TDPRI_MASK) + pri; 1018 } 1019 crit_exit(); 1020 } 1021 1022 struct proc * 1023 lwkt_preempted_proc(void) 1024 { 1025 thread_t td = curthread; 1026 while (td->td_preempted) 1027 td = td->td_preempted; 1028 return(td->td_proc); 1029 } 1030 1031 /* 1032 * Block on the specified wait queue until signaled. A generation number 1033 * must be supplied to interlock the wait queue. The function will 1034 * return immediately if the generation number does not match the wait 1035 * structure's generation number. 1036 */ 1037 void 1038 lwkt_block(lwkt_wait_t w, const char *wmesg, int *gen) 1039 { 1040 thread_t td = curthread; 1041 lwkt_tokref ilock; 1042 1043 lwkt_gettoken(&ilock, &w->wa_token); 1044 crit_enter(); 1045 if (w->wa_gen == *gen) { 1046 _lwkt_dequeue(td); 1047 TAILQ_INSERT_TAIL(&w->wa_waitq, td, td_threadq); 1048 ++w->wa_count; 1049 td->td_wait = w; 1050 td->td_wmesg = wmesg; 1051 again: 1052 lwkt_switch(); 1053 if (td->td_wmesg != NULL) { 1054 _lwkt_dequeue(td); 1055 goto again; 1056 } 1057 } 1058 crit_exit(); 1059 *gen = w->wa_gen; 1060 lwkt_reltoken(&ilock); 1061 } 1062 1063 /* 1064 * Signal a wait queue. We gain ownership of the wait queue in order to 1065 * signal it. Once a thread is removed from the wait queue we have to 1066 * deal with the cpu owning the thread. 1067 * 1068 * Note: alternatively we could message the target cpu owning the wait 1069 * queue. YYY implement as sysctl. 1070 */ 1071 void 1072 lwkt_signal(lwkt_wait_t w, int count) 1073 { 1074 thread_t td; 1075 lwkt_tokref ilock; 1076 1077 lwkt_gettoken(&ilock, &w->wa_token); 1078 ++w->wa_gen; 1079 crit_enter(); 1080 if (count < 0) 1081 count = w->wa_count; 1082 while ((td = TAILQ_FIRST(&w->wa_waitq)) != NULL && count) { 1083 --count; 1084 --w->wa_count; 1085 TAILQ_REMOVE(&w->wa_waitq, td, td_threadq); 1086 td->td_wait = NULL; 1087 td->td_wmesg = NULL; 1088 if (td->td_gd == mycpu) { 1089 _lwkt_enqueue(td); 1090 } else { 1091 lwkt_send_ipiq(td->td_gd, (ipifunc_t)lwkt_schedule, td); 1092 } 1093 } 1094 crit_exit(); 1095 lwkt_reltoken(&ilock); 1096 } 1097 1098 /* 1099 * Create a kernel process/thread/whatever. It shares it's address space 1100 * with proc0 - ie: kernel only. 1101 * 1102 * NOTE! By default new threads are created with the MP lock held. A 1103 * thread which does not require the MP lock should release it by calling 1104 * rel_mplock() at the start of the new thread. 1105 */ 1106 int 1107 lwkt_create(void (*func)(void *), void *arg, 1108 struct thread **tdp, thread_t template, int tdflags, int cpu, 1109 const char *fmt, ...) 1110 { 1111 thread_t td; 1112 __va_list ap; 1113 1114 td = lwkt_alloc_thread(template, cpu); 1115 if (tdp) 1116 *tdp = td; 1117 cpu_set_thread_handler(td, lwkt_exit, func, arg); 1118 td->td_flags |= TDF_VERBOSE | tdflags; 1119 #ifdef SMP 1120 td->td_mpcount = 1; 1121 #endif 1122 1123 /* 1124 * Set up arg0 for 'ps' etc 1125 */ 1126 __va_start(ap, fmt); 1127 vsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap); 1128 __va_end(ap); 1129 1130 /* 1131 * Schedule the thread to run 1132 */ 1133 if ((td->td_flags & TDF_STOPREQ) == 0) 1134 lwkt_schedule(td); 1135 else 1136 td->td_flags &= ~TDF_STOPREQ; 1137 return 0; 1138 } 1139 1140 /* 1141 * kthread_* is specific to the kernel and is not needed by userland. 1142 */ 1143 #ifdef _KERNEL 1144 1145 /* 1146 * Destroy an LWKT thread. Warning! This function is not called when 1147 * a process exits, cpu_proc_exit() directly calls cpu_thread_exit() and 1148 * uses a different reaping mechanism. 1149 */ 1150 void 1151 lwkt_exit(void) 1152 { 1153 thread_t td = curthread; 1154 1155 if (td->td_flags & TDF_VERBOSE) 1156 printf("kthread %p %s has exited\n", td, td->td_comm); 1157 caps_exit(td); 1158 crit_enter(); 1159 lwkt_deschedule_self(); 1160 ++mycpu->gd_tdfreecount; 1161 TAILQ_INSERT_TAIL(&mycpu->gd_tdfreeq, td, td_threadq); 1162 cpu_thread_exit(); 1163 } 1164 1165 /* 1166 * Create a kernel process/thread/whatever. It shares it's address space 1167 * with proc0 - ie: kernel only. 5.x compatible. 1168 * 1169 * NOTE! By default kthreads are created with the MP lock held. A 1170 * thread which does not require the MP lock should release it by calling 1171 * rel_mplock() at the start of the new thread. 1172 */ 1173 int 1174 kthread_create(void (*func)(void *), void *arg, 1175 struct thread **tdp, const char *fmt, ...) 1176 { 1177 thread_t td; 1178 __va_list ap; 1179 1180 td = lwkt_alloc_thread(NULL, -1); 1181 if (tdp) 1182 *tdp = td; 1183 cpu_set_thread_handler(td, kthread_exit, func, arg); 1184 td->td_flags |= TDF_VERBOSE; 1185 #ifdef SMP 1186 td->td_mpcount = 1; 1187 #endif 1188 1189 /* 1190 * Set up arg0 for 'ps' etc 1191 */ 1192 __va_start(ap, fmt); 1193 vsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap); 1194 __va_end(ap); 1195 1196 /* 1197 * Schedule the thread to run 1198 */ 1199 lwkt_schedule(td); 1200 return 0; 1201 } 1202 1203 /* 1204 * Destroy an LWKT thread. Warning! This function is not called when 1205 * a process exits, cpu_proc_exit() directly calls cpu_thread_exit() and 1206 * uses a different reaping mechanism. 1207 * 1208 * XXX duplicates lwkt_exit() 1209 */ 1210 void 1211 kthread_exit(void) 1212 { 1213 lwkt_exit(); 1214 } 1215 1216 #endif /* _KERNEL */ 1217 1218 void 1219 crit_panic(void) 1220 { 1221 thread_t td = curthread; 1222 int lpri = td->td_pri; 1223 1224 td->td_pri = 0; 1225 panic("td_pri is/would-go negative! %p %d", td, lpri); 1226 } 1227 1228