1 /*
2 * Copyright (c) 2003-2011 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 /*
36 * Each cpu in a system has its own self-contained light weight kernel
37 * thread scheduler, which means that generally speaking we only need
38 * to use a critical section to avoid problems. Foreign thread
39 * scheduling is queued via (async) IPIs.
40 */
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/proc.h>
46 #include <sys/rtprio.h>
47 #include <sys/kinfo.h>
48 #include <sys/malloc.h>
49 #include <sys/queue.h>
50 #include <sys/sysctl.h>
51 #include <sys/kthread.h>
52 #include <machine/cpu.h>
53 #include <sys/lock.h>
54 #include <sys/spinlock.h>
55 #include <sys/ktr.h>
56 #include <sys/indefinite.h>
57
58 #include <sys/thread2.h>
59 #include <sys/spinlock2.h>
60 #include <sys/indefinite2.h>
61
62 #include <sys/dsched.h>
63
64 #include <vm/vm.h>
65 #include <vm/vm_param.h>
66 #include <vm/vm_kern.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_page.h>
69 #include <vm/vm_map.h>
70 #include <vm/vm_pager.h>
71 #include <vm/vm_extern.h>
72
73 #include <machine/stdarg.h>
74 #include <machine/smp.h>
75 #include <machine/clock.h>
76
77 #define LOOPMASK
78
79 #if !defined(KTR_CTXSW)
80 #define KTR_CTXSW KTR_ALL
81 #endif
82 KTR_INFO_MASTER(ctxsw);
83 KTR_INFO(KTR_CTXSW, ctxsw, sw, 0, "#cpu[%d].td = %p", int cpu, struct thread *td);
84 KTR_INFO(KTR_CTXSW, ctxsw, pre, 1, "#cpu[%d].td = %p", int cpu, struct thread *td);
85 KTR_INFO(KTR_CTXSW, ctxsw, newtd, 2, "#threads[%p].name = %s", struct thread *td, char *comm);
86 KTR_INFO(KTR_CTXSW, ctxsw, deadtd, 3, "#threads[%p].name = <dead>", struct thread *td);
87
88 static MALLOC_DEFINE(M_THREAD, "thread", "lwkt threads");
89 MALLOC_DEFINE(M_FPUCTX, "fpuctx", "kernel FPU contexts");
90
91 #ifdef INVARIANTS
92 static int panic_on_cscount = 0;
93 #endif
94 #ifdef DEBUG_LWKT_THREAD
95 static int64_t switch_count = 0;
96 static int64_t preempt_hit = 0;
97 static int64_t preempt_miss = 0;
98 static int64_t preempt_weird = 0;
99 #endif
100 static int lwkt_use_spin_port;
101 __read_mostly static struct objcache *thread_cache;
102 int cpu_mwait_spin = 0;
103
104 static void lwkt_schedule_remote(void *arg, int arg2, struct intrframe *frame);
105 static void lwkt_setcpu_remote(void *arg);
106
107 /*
108 * We can make all thread ports use the spin backend instead of the thread
109 * backend. This should only be set to debug the spin backend.
110 */
111 TUNABLE_INT("lwkt.use_spin_port", &lwkt_use_spin_port);
112
113 #ifdef INVARIANTS
114 SYSCTL_INT(_lwkt, OID_AUTO, panic_on_cscount, CTLFLAG_RW, &panic_on_cscount, 0,
115 "Panic if attempting to switch lwkt's while mastering cpusync");
116 #endif
117 #ifdef DEBUG_LWKT_THREAD
118 SYSCTL_QUAD(_lwkt, OID_AUTO, switch_count, CTLFLAG_RW, &switch_count, 0,
119 "Number of switched threads");
120 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_hit, CTLFLAG_RW, &preempt_hit, 0,
121 "Successful preemption events");
122 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_miss, CTLFLAG_RW, &preempt_miss, 0,
123 "Failed preemption events");
124 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_weird, CTLFLAG_RW, &preempt_weird, 0,
125 "Number of preempted threads.");
126 #endif
127 extern int lwkt_sched_debug;
128 int lwkt_sched_debug = 0;
129 SYSCTL_INT(_lwkt, OID_AUTO, sched_debug, CTLFLAG_RW,
130 &lwkt_sched_debug, 0, "Scheduler debug");
131 __read_mostly static u_int lwkt_spin_loops = 10;
132 SYSCTL_UINT(_lwkt, OID_AUTO, spin_loops, CTLFLAG_RW,
133 &lwkt_spin_loops, 0, "Scheduler spin loops until sorted decon");
134 __read_mostly static int preempt_enable = 1;
135 SYSCTL_INT(_lwkt, OID_AUTO, preempt_enable, CTLFLAG_RW,
136 &preempt_enable, 0, "Enable preemption");
137 static int lwkt_cache_threads = 0;
138 SYSCTL_INT(_lwkt, OID_AUTO, cache_threads, CTLFLAG_RD,
139 &lwkt_cache_threads, 0, "thread+kstack cache");
140
141 /*
142 * These helper procedures handle the runq, they can only be called from
143 * within a critical section.
144 *
145 * WARNING! Prior to SMP being brought up it is possible to enqueue and
146 * dequeue threads belonging to other cpus, so be sure to use td->td_gd
147 * instead of 'mycpu' when referencing the globaldata structure. Once
148 * SMP live enqueuing and dequeueing only occurs on the current cpu.
149 */
150 static __inline
151 void
_lwkt_dequeue(thread_t td)152 _lwkt_dequeue(thread_t td)
153 {
154 if (td->td_flags & TDF_RUNQ) {
155 struct globaldata *gd = td->td_gd;
156
157 td->td_flags &= ~TDF_RUNQ;
158 TAILQ_REMOVE(&gd->gd_tdrunq, td, td_threadq);
159 --gd->gd_tdrunqcount;
160 if (TAILQ_FIRST(&gd->gd_tdrunq) == NULL)
161 atomic_clear_int(&gd->gd_reqflags, RQF_RUNNING);
162 }
163 }
164
165 /*
166 * Priority enqueue.
167 *
168 * There are a limited number of lwkt threads runnable since user
169 * processes only schedule one at a time per cpu. However, there can
170 * be many user processes in kernel mode exiting from a tsleep() which
171 * become runnable.
172 *
173 * We scan the queue in both directions to help deal with degenerate
174 * situations when hundreds or thousands (or more) threads are runnable.
175 *
176 * NOTE: lwkt_schedulerclock() will force a round-robin based on td_pri and
177 * will ignore user priority. This is to ensure that user threads in
178 * kernel mode get cpu at some point regardless of what the user
179 * scheduler thinks.
180 */
181 static __inline
182 void
_lwkt_enqueue(thread_t td)183 _lwkt_enqueue(thread_t td)
184 {
185 thread_t xtd; /* forward scan */
186 thread_t rtd; /* reverse scan */
187
188 if ((td->td_flags & (TDF_RUNQ|TDF_MIGRATING|TDF_BLOCKQ)) == 0) {
189 struct globaldata *gd = td->td_gd;
190
191 td->td_flags |= TDF_RUNQ;
192 xtd = TAILQ_FIRST(&gd->gd_tdrunq);
193 if (xtd == NULL) {
194 TAILQ_INSERT_TAIL(&gd->gd_tdrunq, td, td_threadq);
195 atomic_set_int(&gd->gd_reqflags, RQF_RUNNING);
196 } else {
197 /*
198 * NOTE: td_upri - higher numbers more desireable, same sense
199 * as td_pri (typically reversed from lwp_upri).
200 *
201 * In the equal priority case we want the best selection
202 * at the beginning so the less desireable selections know
203 * that they have to setrunqueue/go-to-another-cpu, even
204 * though it means switching back to the 'best' selection.
205 * This also avoids degenerate situations when many threads
206 * are runnable or waking up at the same time.
207 *
208 * If upri matches exactly place at end/round-robin.
209 */
210 rtd = TAILQ_LAST(&gd->gd_tdrunq, lwkt_queue);
211
212 while (xtd &&
213 (xtd->td_pri > td->td_pri ||
214 (xtd->td_pri == td->td_pri &&
215 xtd->td_upri >= td->td_upri))) {
216 xtd = TAILQ_NEXT(xtd, td_threadq);
217
218 /*
219 * Doing a reverse scan at the same time is an optimization
220 * for the insert-closer-to-tail case that avoids having to
221 * scan the entire list. This situation can occur when
222 * thousands of threads are woken up at the same time.
223 */
224 if (rtd->td_pri > td->td_pri ||
225 (rtd->td_pri == td->td_pri &&
226 rtd->td_upri >= td->td_upri)) {
227 TAILQ_INSERT_AFTER(&gd->gd_tdrunq, rtd, td, td_threadq);
228 goto skip;
229 }
230 rtd = TAILQ_PREV(rtd, lwkt_queue, td_threadq);
231 }
232 if (xtd)
233 TAILQ_INSERT_BEFORE(xtd, td, td_threadq);
234 else
235 TAILQ_INSERT_TAIL(&gd->gd_tdrunq, td, td_threadq);
236 }
237 skip:
238 ++gd->gd_tdrunqcount;
239
240 /*
241 * Request a LWKT reschedule if we are now at the head of the queue.
242 */
243 if (TAILQ_FIRST(&gd->gd_tdrunq) == td)
244 need_lwkt_resched();
245 }
246 }
247
248 static boolean_t
_lwkt_thread_ctor(void * obj,void * privdata,int ocflags)249 _lwkt_thread_ctor(void *obj, void *privdata, int ocflags)
250 {
251 struct thread *td = (struct thread *)obj;
252
253 td->td_kstack = NULL;
254 td->td_kstack_size = 0;
255 td->td_flags = TDF_ALLOCATED_THREAD;
256 td->td_mpflags = 0;
257 return (1);
258 }
259
260 static void
_lwkt_thread_dtor(void * obj,void * privdata)261 _lwkt_thread_dtor(void *obj, void *privdata)
262 {
263 struct thread *td = (struct thread *)obj;
264
265 KASSERT(td->td_flags & TDF_ALLOCATED_THREAD,
266 ("_lwkt_thread_dtor: not allocated from objcache"));
267 KASSERT((td->td_flags & TDF_ALLOCATED_STACK) && td->td_kstack &&
268 td->td_kstack_size > 0,
269 ("_lwkt_thread_dtor: corrupted stack"));
270 kmem_free(kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size);
271 td->td_kstack = NULL;
272 td->td_flags = 0;
273 }
274
275 /*
276 * Initialize the lwkt s/system.
277 *
278 * Nominally cache up to 32 thread + kstack structures. Cache more on
279 * systems with a lot of cpu cores.
280 */
281 static void
lwkt_init(void)282 lwkt_init(void)
283 {
284 TUNABLE_INT("lwkt.cache_threads", &lwkt_cache_threads);
285 if (lwkt_cache_threads == 0) {
286 lwkt_cache_threads = ncpus * 4;
287 if (lwkt_cache_threads < 32)
288 lwkt_cache_threads = 32;
289 }
290 thread_cache = objcache_create_mbacked(
291 M_THREAD, sizeof(struct thread),
292 0, lwkt_cache_threads,
293 _lwkt_thread_ctor, _lwkt_thread_dtor, NULL);
294 }
295 SYSINIT(lwkt_init, SI_BOOT2_LWKT_INIT, SI_ORDER_FIRST, lwkt_init, NULL);
296
297 /*
298 * Schedule a thread to run. As the current thread we can always safely
299 * schedule ourselves, and a shortcut procedure is provided for that
300 * function.
301 *
302 * (non-blocking, self contained on a per cpu basis)
303 */
304 void
lwkt_schedule_self(thread_t td)305 lwkt_schedule_self(thread_t td)
306 {
307 KKASSERT((td->td_flags & TDF_MIGRATING) == 0);
308 crit_enter_quick(td);
309 KASSERT(td != &td->td_gd->gd_idlethread,
310 ("lwkt_schedule_self(): scheduling gd_idlethread is illegal!"));
311 KKASSERT(td->td_lwp == NULL ||
312 (td->td_lwp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
313 _lwkt_enqueue(td);
314 crit_exit_quick(td);
315 }
316
317 /*
318 * Deschedule a thread.
319 *
320 * (non-blocking, self contained on a per cpu basis)
321 */
322 void
lwkt_deschedule_self(thread_t td)323 lwkt_deschedule_self(thread_t td)
324 {
325 crit_enter_quick(td);
326 _lwkt_dequeue(td);
327 crit_exit_quick(td);
328 }
329
330 /*
331 * LWKTs operate on a per-cpu basis
332 *
333 * WARNING! Called from early boot, 'mycpu' may not work yet.
334 */
335 void
lwkt_gdinit(struct globaldata * gd)336 lwkt_gdinit(struct globaldata *gd)
337 {
338 TAILQ_INIT(&gd->gd_tdrunq);
339 TAILQ_INIT(&gd->gd_tdallq);
340 lockinit(&gd->gd_sysctllock, "sysctl", 0, LK_CANRECURSE);
341 }
342
343 /*
344 * Create a new thread. The thread must be associated with a process context
345 * or LWKT start address before it can be scheduled. If the target cpu is
346 * -1 the thread will be created on the current cpu.
347 *
348 * If you intend to create a thread without a process context this function
349 * does everything except load the startup and switcher function.
350 */
351 thread_t
lwkt_alloc_thread(struct thread * td,int stksize,int cpu,int flags)352 lwkt_alloc_thread(struct thread *td, int stksize, int cpu, int flags)
353 {
354 static int cpu_rotator;
355 globaldata_t gd = mycpu;
356 void *stack;
357
358 /*
359 * If static thread storage is not supplied allocate a thread. Reuse
360 * a cached free thread if possible. gd_freetd is used to keep an exiting
361 * thread intact through the exit.
362 */
363 if (td == NULL) {
364 crit_enter_gd(gd);
365 if ((td = gd->gd_freetd) != NULL) {
366 KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK|
367 TDF_RUNQ)) == 0);
368 gd->gd_freetd = NULL;
369 } else {
370 td = objcache_get(thread_cache, M_WAITOK);
371 KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK|
372 TDF_RUNQ)) == 0);
373 }
374 crit_exit_gd(gd);
375 KASSERT((td->td_flags &
376 (TDF_ALLOCATED_THREAD|TDF_RUNNING|TDF_PREEMPT_LOCK)) ==
377 TDF_ALLOCATED_THREAD,
378 ("lwkt_alloc_thread: corrupted td flags 0x%X", td->td_flags));
379 flags |= td->td_flags & (TDF_ALLOCATED_THREAD|TDF_ALLOCATED_STACK);
380 }
381
382 /*
383 * Try to reuse cached stack.
384 */
385 if ((stack = td->td_kstack) != NULL && td->td_kstack_size != stksize) {
386 if (flags & TDF_ALLOCATED_STACK) {
387 kmem_free(kernel_map, (vm_offset_t)stack, td->td_kstack_size);
388 stack = NULL;
389 }
390 }
391 if (stack == NULL) {
392 if (cpu < 0) {
393 stack = (void *)kmem_alloc_stack(kernel_map, stksize, 0);
394 } else {
395 stack = (void *)kmem_alloc_stack(kernel_map, stksize,
396 KM_CPU(cpu));
397 }
398 flags |= TDF_ALLOCATED_STACK;
399 }
400 if (cpu < 0) {
401 cpu = ++cpu_rotator;
402 cpu_ccfence();
403 cpu = (uint32_t)cpu % (uint32_t)ncpus;
404 }
405 lwkt_init_thread(td, stack, stksize, flags, globaldata_find(cpu));
406 return(td);
407 }
408
409 /*
410 * Initialize a preexisting thread structure. This function is used by
411 * lwkt_alloc_thread() and also used to initialize the per-cpu idlethread.
412 *
413 * All threads start out in a critical section at a priority of
414 * TDPRI_KERN_DAEMON. Higher level code will modify the priority as
415 * appropriate. This function may send an IPI message when the
416 * requested cpu is not the current cpu and consequently gd_tdallq may
417 * not be initialized synchronously from the point of view of the originating
418 * cpu.
419 *
420 * NOTE! we have to be careful in regards to creating threads for other cpus
421 * if SMP has not yet been activated.
422 */
423 static void
lwkt_init_thread_remote(void * arg)424 lwkt_init_thread_remote(void *arg)
425 {
426 thread_t td = arg;
427
428 /*
429 * Protected by critical section held by IPI dispatch
430 */
431 TAILQ_INSERT_TAIL(&td->td_gd->gd_tdallq, td, td_allq);
432 }
433
434 /*
435 * lwkt core thread structural initialization.
436 *
437 * NOTE: All threads are initialized as mpsafe threads.
438 */
439 void
lwkt_init_thread(thread_t td,void * stack,int stksize,int flags,struct globaldata * gd)440 lwkt_init_thread(thread_t td, void *stack, int stksize, int flags,
441 struct globaldata *gd)
442 {
443 globaldata_t mygd = mycpu;
444
445 bzero(td, sizeof(struct thread));
446 td->td_kstack = stack;
447 td->td_kstack_size = stksize;
448 td->td_flags = flags;
449 td->td_mpflags = 0;
450 td->td_type = TD_TYPE_GENERIC;
451 td->td_gd = gd;
452 td->td_pri = TDPRI_KERN_DAEMON;
453 td->td_critcount = 1;
454 td->td_toks_have = NULL;
455 td->td_toks_stop = &td->td_toks_base;
456 if (lwkt_use_spin_port || (flags & TDF_FORCE_SPINPORT)) {
457 lwkt_initport_spin(&td->td_msgport, td,
458 (flags & TDF_FIXEDCPU) ? TRUE : FALSE);
459 } else {
460 lwkt_initport_thread(&td->td_msgport, td);
461 }
462 pmap_init_thread(td);
463
464 /*
465 * Normally initializing a thread for a remote cpu requires sending an
466 * IPI. However, the idlethread is setup before the other cpus are
467 * activated so we have to treat it as a special case. XXX manipulation
468 * of gd_tdallq requires the BGL.
469 */
470 if (gd == mygd || td == &gd->gd_idlethread) {
471 crit_enter_gd(mygd);
472 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq);
473 crit_exit_gd(mygd);
474 } else {
475 lwkt_send_ipiq(gd, lwkt_init_thread_remote, td);
476 }
477 dsched_enter_thread(td);
478 }
479
480 void
lwkt_set_comm(thread_t td,const char * ctl,...)481 lwkt_set_comm(thread_t td, const char *ctl, ...)
482 {
483 __va_list va;
484
485 __va_start(va, ctl);
486 kvsnprintf(td->td_comm, sizeof(td->td_comm), ctl, va);
487 __va_end(va);
488 KTR_LOG(ctxsw_newtd, td, td->td_comm);
489 }
490
491 /*
492 * Prevent the thread from getting destroyed. Note that unlike PHOLD/PRELE
493 * this does not prevent the thread from migrating to another cpu so the
494 * gd_tdallq state is not protected by this.
495 */
496 void
lwkt_hold(thread_t td)497 lwkt_hold(thread_t td)
498 {
499 atomic_add_int(&td->td_refs, 1);
500 }
501
502 void
lwkt_rele(thread_t td)503 lwkt_rele(thread_t td)
504 {
505 KKASSERT(td->td_refs > 0);
506 atomic_add_int(&td->td_refs, -1);
507 }
508
509 void
lwkt_free_thread(thread_t td)510 lwkt_free_thread(thread_t td)
511 {
512 KKASSERT(td->td_refs == 0);
513 KKASSERT((td->td_flags & (TDF_RUNNING | TDF_PREEMPT_LOCK |
514 TDF_RUNQ | TDF_TSLEEPQ | TDF_KERNELFP)) == 0);
515
516 if (td->td_kfpuctx) {
517 kfree(td->td_kfpuctx, M_FPUCTX);
518 td->td_kfpuctx = NULL;
519 }
520
521 if (td->td_flags & TDF_ALLOCATED_THREAD) {
522 objcache_put(thread_cache, td);
523 } else if (td->td_flags & TDF_ALLOCATED_STACK) {
524 /* client-allocated struct with internally allocated stack */
525 KASSERT(td->td_kstack && td->td_kstack_size > 0,
526 ("lwkt_free_thread: corrupted stack"));
527 kmem_free(kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size);
528 td->td_kstack = NULL;
529 td->td_kstack_size = 0;
530 }
531
532 KTR_LOG(ctxsw_deadtd, td);
533 }
534
535
536 /*
537 * Switch to the next runnable lwkt. If no LWKTs are runnable then
538 * switch to the idlethread. Switching must occur within a critical
539 * section to avoid races with the scheduling queue.
540 *
541 * We always have full control over our cpu's run queue. Other cpus
542 * that wish to manipulate our queue must use the cpu_*msg() calls to
543 * talk to our cpu, so a critical section is all that is needed and
544 * the result is very, very fast thread switching.
545 *
546 * The LWKT scheduler uses a fixed priority model and round-robins at
547 * each priority level. User process scheduling is a totally
548 * different beast and LWKT priorities should not be confused with
549 * user process priorities.
550 *
551 * PREEMPTION NOTE: Preemption occurs via lwkt_preempt(). lwkt_switch()
552 * is not called by the current thread in the preemption case, only when
553 * the preempting thread blocks (in order to return to the original thread).
554 *
555 * SPECIAL NOTE ON SWITCH ATOMICY: Certain operations such as thread
556 * migration and tsleep deschedule the current lwkt thread and call
557 * lwkt_switch(). In particular, the target cpu of the migration fully
558 * expects the thread to become non-runnable and can deadlock against
559 * cpusync operations if we run any IPIs prior to switching the thread out.
560 *
561 * WE MUST BE VERY CAREFUL NOT TO RUN SPLZ DIRECTLY OR INDIRECTLY IF
562 * THE CURRENT THREAD HAS BEEN DESCHEDULED!
563 */
564 void
lwkt_switch(void)565 lwkt_switch(void)
566 {
567 globaldata_t gd = mycpu;
568 thread_t td = gd->gd_curthread;
569 thread_t ntd;
570 thread_t xtd;
571 int upri;
572 #ifdef LOOPMASK
573 uint64_t tsc_base = rdtsc();
574 #endif
575
576 KKASSERT(gd->gd_processing_ipiq == 0);
577 KKASSERT(td->td_flags & TDF_RUNNING);
578
579 /*
580 * Switching from within a 'fast' (non thread switched) interrupt or IPI
581 * is illegal. However, we may have to do it anyway if we hit a fatal
582 * kernel trap or we have paniced.
583 *
584 * If this case occurs save and restore the interrupt nesting level.
585 */
586 if (gd->gd_intr_nesting_level) {
587 int savegdnest;
588 int savegdtrap;
589
590 if (gd->gd_trap_nesting_level == 0 && panic_cpu_gd != mycpu) {
591 panic("lwkt_switch: Attempt to switch from a "
592 "fast interrupt, ipi, or hard code section, "
593 "td %p\n",
594 td);
595 } else {
596 savegdnest = gd->gd_intr_nesting_level;
597 savegdtrap = gd->gd_trap_nesting_level;
598 gd->gd_intr_nesting_level = 0;
599 gd->gd_trap_nesting_level = 0;
600 if ((td->td_flags & TDF_PANICWARN) == 0) {
601 td->td_flags |= TDF_PANICWARN;
602 kprintf("Warning: thread switch from interrupt, IPI, "
603 "or hard code section.\n"
604 "thread %p (%s)\n", td, td->td_comm);
605 print_backtrace(-1);
606 }
607 lwkt_switch();
608 gd->gd_intr_nesting_level = savegdnest;
609 gd->gd_trap_nesting_level = savegdtrap;
610 return;
611 }
612 }
613
614 /*
615 * Release our current user process designation if we are blocking
616 * or if a user reschedule was requested.
617 *
618 * NOTE: This function is NOT called if we are switching into or
619 * returning from a preemption.
620 *
621 * NOTE: Releasing our current user process designation may cause
622 * it to be assigned to another thread, which in turn will
623 * cause us to block in the usched acquire code when we attempt
624 * to return to userland.
625 *
626 * NOTE: On SMP systems this can be very nasty when heavy token
627 * contention is present so we want to be careful not to
628 * release the designation gratuitously.
629 */
630 if (td->td_release &&
631 (user_resched_wanted() || (td->td_flags & TDF_RUNQ) == 0)) {
632 td->td_release(td);
633 }
634
635 /*
636 * Release all tokens. Once we do this we must remain in the critical
637 * section and cannot run IPIs or other interrupts until we switch away
638 * because they may implode if they try to get a token using our thread
639 * context.
640 */
641 crit_enter_gd(gd);
642 if (TD_TOKS_HELD(td))
643 lwkt_relalltokens(td);
644
645 /*
646 * We had better not be holding any spin locks, but don't get into an
647 * endless panic loop.
648 */
649 KASSERT(gd->gd_spinlocks == 0 || panicstr != NULL,
650 ("lwkt_switch: still holding %d exclusive spinlocks!",
651 gd->gd_spinlocks));
652
653 #ifdef INVARIANTS
654 if (td->td_cscount) {
655 kprintf("Diagnostic: attempt to switch while mastering cpusync: %p\n",
656 td);
657 if (panic_on_cscount)
658 panic("switching while mastering cpusync");
659 }
660 #endif
661
662 /*
663 * If we had preempted another thread on this cpu, resume the preempted
664 * thread. This occurs transparently, whether the preempted thread
665 * was scheduled or not (it may have been preempted after descheduling
666 * itself).
667 *
668 * We have to setup the MP lock for the original thread after backing
669 * out the adjustment that was made to curthread when the original
670 * was preempted.
671 */
672 if ((ntd = td->td_preempted) != NULL) {
673 KKASSERT(ntd->td_flags & TDF_PREEMPT_LOCK);
674 ntd->td_flags |= TDF_PREEMPT_DONE;
675 ntd->td_contended = 0; /* reset contended */
676
677 /*
678 * The interrupt may have woken a thread up, we need to properly
679 * set the reschedule flag if the originally interrupted thread is
680 * at a lower priority.
681 *
682 * NOTE: The interrupt may not have descheduled ntd.
683 *
684 * NOTE: We do not reschedule if there are no threads on the runq.
685 * (ntd could be the idlethread).
686 */
687 xtd = TAILQ_FIRST(&gd->gd_tdrunq);
688 if (xtd && xtd != ntd)
689 need_lwkt_resched();
690 goto havethread_preempted;
691 }
692
693 /*
694 * Figure out switch target. If we cannot switch to our desired target
695 * look for a thread that we can switch to.
696 *
697 * NOTE! The limited spin loop and related parameters are extremely
698 * important for system performance, particularly for pipes and
699 * concurrent conflicting VM faults.
700 */
701 clear_lwkt_resched();
702 ntd = TAILQ_FIRST(&gd->gd_tdrunq);
703
704 if (ntd) {
705 do {
706 if (TD_TOKS_NOT_HELD(ntd) ||
707 lwkt_getalltokens(ntd, (ntd->td_contended > lwkt_spin_loops)))
708 {
709 goto havethread;
710 }
711 ++ntd->td_contended; /* overflow ok */
712 if (gd->gd_indefinite.type == 0)
713 indefinite_init(&gd->gd_indefinite, NULL, NULL, 0, 't');
714 #ifdef LOOPMASK
715 if (tsc_frequency && rdtsc() - tsc_base > tsc_frequency) {
716 kprintf("lwkt_switch: WARNING, excessive token contention "
717 "cpu %d, %d sec, "
718 "td %p (%s)\n",
719 gd->gd_cpuid,
720 ntd->td_contended,
721 ntd,
722 ntd->td_comm);
723 tsc_base = rdtsc();
724 }
725 #endif
726 } while (ntd->td_contended < (lwkt_spin_loops >> 1));
727 upri = ntd->td_upri;
728
729 /*
730 * Bleh, the thread we wanted to switch to has a contended token.
731 * See if we can switch to another thread.
732 *
733 * We generally don't want to do this because it represents a
734 * priority inversion, but contending tokens on the same cpu can
735 * cause real problems if we don't now that we have an exclusive
736 * priority mechanism over shared for tokens.
737 *
738 * The solution is to allow threads with pending tokens to compete
739 * for them (a lower priority thread will get less cpu once it
740 * returns from the kernel anyway). If a thread does not have
741 * any contending tokens, we go by td_pri and upri.
742 */
743 while ((ntd = TAILQ_NEXT(ntd, td_threadq)) != NULL) {
744 if (TD_TOKS_NOT_HELD(ntd) &&
745 ntd->td_pri < TDPRI_KERN_LPSCHED && upri > ntd->td_upri) {
746 continue;
747 }
748 if (upri < ntd->td_upri)
749 upri = ntd->td_upri;
750
751 /*
752 * Try this one.
753 */
754 if (TD_TOKS_NOT_HELD(ntd) ||
755 lwkt_getalltokens(ntd, (ntd->td_contended > lwkt_spin_loops))) {
756 goto havethread;
757 }
758 ++ntd->td_contended; /* overflow ok */
759 }
760
761 /*
762 * Fall through, switch to idle thread to get us out of the current
763 * context. Since we were contended, prevent HLT by flagging a
764 * LWKT reschedule.
765 */
766 need_lwkt_resched();
767 }
768
769 /*
770 * We either contended on ntd or the runq is empty. We must switch
771 * through the idle thread to get out of the current context.
772 */
773 ntd = &gd->gd_idlethread;
774 if (gd->gd_trap_nesting_level == 0 && panicstr == NULL)
775 ASSERT_NO_TOKENS_HELD(ntd);
776 cpu_time.cp_msg[0] = 0;
777 goto haveidle;
778
779 havethread:
780 /*
781 * Clear gd_idle_repeat when doing a normal switch to a non-idle
782 * thread.
783 */
784 ntd->td_wmesg = NULL;
785 ntd->td_contended = 0; /* reset once scheduled */
786 ++gd->gd_cnt.v_swtch;
787 gd->gd_idle_repeat = 0;
788
789 /*
790 * If we were busy waiting record final disposition
791 */
792 if (gd->gd_indefinite.type)
793 indefinite_done(&gd->gd_indefinite);
794
795 havethread_preempted:
796 /*
797 * If the new target does not need the MP lock and we are holding it,
798 * release the MP lock. If the new target requires the MP lock we have
799 * already acquired it for the target.
800 */
801 ;
802 haveidle:
803 KASSERT(ntd->td_critcount,
804 ("priority problem in lwkt_switch %d %d",
805 td->td_critcount, ntd->td_critcount));
806
807 if (td != ntd) {
808 /*
809 * Execute the actual thread switch operation. This function
810 * returns to the current thread and returns the previous thread
811 * (which may be different from the thread we switched to).
812 *
813 * We are responsible for marking ntd as TDF_RUNNING.
814 */
815 KKASSERT((ntd->td_flags & TDF_RUNNING) == 0);
816 #ifdef DEBUG_LWKT_THREAD
817 ++switch_count;
818 #endif
819 KTR_LOG(ctxsw_sw, gd->gd_cpuid, ntd);
820 ntd->td_flags |= TDF_RUNNING;
821 lwkt_switch_return(td->td_switch(ntd));
822 /* ntd invalid, td_switch() can return a different thread_t */
823 }
824
825 /*
826 * catch-all. XXX is this strictly needed?
827 */
828 splz_check();
829
830 /* NOTE: current cpu may have changed after switch */
831 crit_exit_quick(td);
832 }
833
834 /*
835 * Called by assembly in the td_switch (thread restore path) for thread
836 * bootstrap cases which do not 'return' to lwkt_switch().
837 */
838 void
lwkt_switch_return(thread_t otd)839 lwkt_switch_return(thread_t otd)
840 {
841 globaldata_t rgd;
842 #ifdef LOOPMASK
843 uint64_t tsc_base = rdtsc();
844 #endif
845 int exiting;
846
847 exiting = otd->td_flags & TDF_EXITING;
848 cpu_ccfence();
849
850 /*
851 * Check if otd was migrating. Now that we are on ntd we can finish
852 * up the migration. This is a bit messy but it is the only place
853 * where td is known to be fully descheduled.
854 *
855 * We can only activate the migration if otd was migrating but not
856 * held on the cpu due to a preemption chain. We still have to
857 * clear TDF_RUNNING on the old thread either way.
858 *
859 * We are responsible for clearing the previously running thread's
860 * TDF_RUNNING.
861 */
862 if ((rgd = otd->td_migrate_gd) != NULL &&
863 (otd->td_flags & TDF_PREEMPT_LOCK) == 0) {
864 KKASSERT((otd->td_flags & (TDF_MIGRATING | TDF_RUNNING)) ==
865 (TDF_MIGRATING | TDF_RUNNING));
866 otd->td_migrate_gd = NULL;
867 otd->td_flags &= ~TDF_RUNNING;
868 lwkt_send_ipiq(rgd, lwkt_setcpu_remote, otd);
869 } else {
870 otd->td_flags &= ~TDF_RUNNING;
871 }
872
873 /*
874 * Final exit validations (see lwp_wait()). Note that otd becomes
875 * invalid the *instant* we set TDF_MP_EXITSIG.
876 *
877 * Use the EXITING status loaded from before we clear TDF_RUNNING,
878 * because if it is not set otd becomes invalid the instant we clear
879 * TDF_RUNNING on it (otherwise, if the system is fast enough, we
880 * might 'steal' TDF_EXITING from another switch-return!).
881 */
882 while (exiting) {
883 u_int mpflags;
884
885 mpflags = otd->td_mpflags;
886 cpu_ccfence();
887
888 if (mpflags & TDF_MP_EXITWAIT) {
889 if (atomic_cmpset_int(&otd->td_mpflags, mpflags,
890 mpflags | TDF_MP_EXITSIG)) {
891 wakeup(otd);
892 break;
893 }
894 } else {
895 if (atomic_cmpset_int(&otd->td_mpflags, mpflags,
896 mpflags | TDF_MP_EXITSIG)) {
897 wakeup(otd);
898 break;
899 }
900 }
901
902 #ifdef LOOPMASK
903 if (tsc_frequency && rdtsc() - tsc_base > tsc_frequency) {
904 kprintf("lwkt_switch_return: excessive TDF_EXITING "
905 "thread %p\n", otd);
906 tsc_base = rdtsc();
907 }
908 #endif
909 }
910 }
911
912 /*
913 * Request that the target thread preempt the current thread. Preemption
914 * can only occur only:
915 *
916 * - If our critical section is the one that we were called with
917 * - The relative priority of the target thread is higher
918 * - The target is not excessively interrupt-nested via td_nest_count
919 * - The target thread holds no tokens.
920 * - The target thread is not already scheduled and belongs to the
921 * current cpu.
922 * - The current thread is not holding any spin-locks.
923 *
924 * THE CALLER OF LWKT_PREEMPT() MUST BE IN A CRITICAL SECTION. Typically
925 * this is called via lwkt_schedule() through the td_preemptable callback.
926 * critcount is the managed critical priority that we should ignore in order
927 * to determine whether preemption is possible (aka usually just the crit
928 * priority of lwkt_schedule() itself).
929 *
930 * Preemption is typically limited to interrupt threads.
931 *
932 * Operation works in a fairly straight-forward manner. The normal
933 * scheduling code is bypassed and we switch directly to the target
934 * thread. When the target thread attempts to block or switch away
935 * code at the base of lwkt_switch() will switch directly back to our
936 * thread. Our thread is able to retain whatever tokens it holds and
937 * if the target needs one of them the target will switch back to us
938 * and reschedule itself normally.
939 */
940 void
lwkt_preempt(thread_t ntd,int critcount)941 lwkt_preempt(thread_t ntd, int critcount)
942 {
943 struct globaldata *gd = mycpu;
944 thread_t xtd;
945 thread_t td;
946 int save_gd_intr_nesting_level;
947
948 /*
949 * The caller has put us in a critical section. We can only preempt
950 * if the caller of the caller was not in a critical section (basically
951 * a local interrupt), as determined by the 'critcount' parameter. We
952 * also can't preempt if the caller is holding any spinlocks (even if
953 * he isn't in a critical section). This also handles the tokens test.
954 *
955 * YYY The target thread must be in a critical section (else it must
956 * inherit our critical section? I dunno yet).
957 */
958 KASSERT(ntd->td_critcount, ("BADCRIT0 %d", ntd->td_pri));
959
960 td = gd->gd_curthread;
961 if (preempt_enable == 0) {
962 #ifdef DEBUG_LWKT_THREAD
963 ++preempt_miss;
964 #endif
965 return;
966 }
967 if (ntd->td_pri <= td->td_pri) {
968 #ifdef DEBUG_LWKT_THREAD
969 ++preempt_miss;
970 #endif
971 return;
972 }
973 if (td->td_critcount > critcount) {
974 #ifdef DEBUG_LWKT_THREAD
975 ++preempt_miss;
976 #endif
977 return;
978 }
979 if (td->td_nest_count >= 2) {
980 #ifdef DEBUG_LWKT_THREAD
981 ++preempt_miss;
982 #endif
983 return;
984 }
985 if (td->td_cscount) {
986 #ifdef DEBUG_LWKT_THREAD
987 ++preempt_miss;
988 #endif
989 return;
990 }
991 if (ntd->td_gd != gd) {
992 #ifdef DEBUG_LWKT_THREAD
993 ++preempt_miss;
994 #endif
995 return;
996 }
997
998 /*
999 * We don't have to check spinlocks here as they will also bump
1000 * td_critcount.
1001 *
1002 * Do not try to preempt if the target thread is holding any tokens.
1003 * We could try to acquire the tokens but this case is so rare there
1004 * is no need to support it.
1005 */
1006 KKASSERT(gd->gd_spinlocks == 0);
1007
1008 if (TD_TOKS_HELD(ntd)) {
1009 #ifdef DEBUG_LWKT_THREAD
1010 ++preempt_miss;
1011 #endif
1012 return;
1013 }
1014 if (td == ntd || ((td->td_flags | ntd->td_flags) & TDF_PREEMPT_LOCK)) {
1015 #ifdef DEBUG_LWKT_THREAD
1016 ++preempt_weird;
1017 #endif
1018 return;
1019 }
1020 if (ntd->td_preempted) {
1021 #ifdef DEBUG_LWKT_THREAD
1022 ++preempt_hit;
1023 #endif
1024 return;
1025 }
1026 KKASSERT(gd->gd_processing_ipiq == 0);
1027
1028 /*
1029 * Since we are able to preempt the current thread, there is no need to
1030 * call need_lwkt_resched().
1031 *
1032 * We must temporarily clear gd_intr_nesting_level around the switch
1033 * since switchouts from the target thread are allowed (they will just
1034 * return to our thread), and since the target thread has its own stack.
1035 *
1036 * A preemption must switch back to the original thread, assert the
1037 * case.
1038 */
1039 #ifdef DEBUG_LWKT_THREAD
1040 ++preempt_hit;
1041 #endif
1042 ntd->td_preempted = td;
1043 td->td_flags |= TDF_PREEMPT_LOCK;
1044 KTR_LOG(ctxsw_pre, gd->gd_cpuid, ntd);
1045 save_gd_intr_nesting_level = gd->gd_intr_nesting_level;
1046 gd->gd_intr_nesting_level = 0;
1047
1048 KKASSERT((ntd->td_flags & TDF_RUNNING) == 0);
1049 ntd->td_flags |= TDF_RUNNING;
1050 xtd = td->td_switch(ntd);
1051 KKASSERT(xtd == ntd);
1052 lwkt_switch_return(xtd);
1053 gd->gd_intr_nesting_level = save_gd_intr_nesting_level;
1054
1055 KKASSERT(ntd->td_preempted && (td->td_flags & TDF_PREEMPT_DONE));
1056 ntd->td_preempted = NULL;
1057 td->td_flags &= ~(TDF_PREEMPT_LOCK|TDF_PREEMPT_DONE);
1058 }
1059
1060 /*
1061 * Conditionally call splz() if gd_reqflags indicates work is pending.
1062 * This will work inside a critical section but not inside a hard code
1063 * section.
1064 *
1065 * (self contained on a per cpu basis)
1066 */
1067 void
splz_check(void)1068 splz_check(void)
1069 {
1070 globaldata_t gd = mycpu;
1071 thread_t td = gd->gd_curthread;
1072
1073 if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) &&
1074 gd->gd_intr_nesting_level == 0 &&
1075 td->td_nest_count < 2)
1076 {
1077 splz();
1078 }
1079 }
1080
1081 /*
1082 * This version is integrated into crit_exit, reqflags has already
1083 * been tested but td_critcount has not.
1084 *
1085 * We only want to execute the splz() on the 1->0 transition of
1086 * critcount and not in a hard code section or if too deeply nested.
1087 *
1088 * NOTE: gd->gd_spinlocks is implied to be 0 when td_critcount is 0.
1089 */
1090 void
lwkt_maybe_splz(thread_t td)1091 lwkt_maybe_splz(thread_t td)
1092 {
1093 globaldata_t gd = td->td_gd;
1094
1095 if (td->td_critcount == 0 &&
1096 gd->gd_intr_nesting_level == 0 &&
1097 td->td_nest_count < 2)
1098 {
1099 splz();
1100 }
1101 }
1102
1103 /*
1104 * Drivers which set up processing co-threads can call this function to
1105 * run the co-thread at a higher priority and to allow it to preempt
1106 * normal threads.
1107 */
1108 void
lwkt_set_interrupt_support_thread(void)1109 lwkt_set_interrupt_support_thread(void)
1110 {
1111 thread_t td = curthread;
1112
1113 lwkt_setpri_self(TDPRI_INT_SUPPORT);
1114 td->td_flags |= TDF_INTTHREAD;
1115 td->td_preemptable = lwkt_preempt;
1116 }
1117
1118
1119 /*
1120 * This function is used to negotiate a passive release of the current
1121 * process/lwp designation with the user scheduler, allowing the user
1122 * scheduler to schedule another user thread. The related kernel thread
1123 * (curthread) continues running in the released state.
1124 */
1125 void
lwkt_passive_release(struct thread * td)1126 lwkt_passive_release(struct thread *td)
1127 {
1128 struct lwp *lp = td->td_lwp;
1129
1130 td->td_release = NULL;
1131 lwkt_setpri_self(TDPRI_KERN_USER);
1132
1133 lp->lwp_proc->p_usched->release_curproc(lp);
1134 }
1135
1136
1137 /*
1138 * This implements a LWKT yield, allowing a kernel thread to yield to other
1139 * kernel threads at the same or higher priority. This function can be
1140 * called in a tight loop and will typically only yield once per tick.
1141 *
1142 * Most kernel threads run at the same priority in order to allow equal
1143 * sharing.
1144 *
1145 * (self contained on a per cpu basis)
1146 */
1147 void
lwkt_yield(void)1148 lwkt_yield(void)
1149 {
1150 globaldata_t gd = mycpu;
1151 thread_t td = gd->gd_curthread;
1152
1153 /*
1154 * Should never be called with spinlocks held but there is a path
1155 * via ACPI where it might happen.
1156 */
1157 if (gd->gd_spinlocks)
1158 return;
1159
1160 /*
1161 * Safe to call splz if we are not too-heavily nested.
1162 */
1163 if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) && td->td_nest_count < 2)
1164 splz();
1165
1166 /*
1167 * Caller allows switching
1168 */
1169 if (lwkt_resched_wanted()) {
1170 atomic_set_int(&td->td_mpflags, TDF_MP_DIDYIELD);
1171 lwkt_schedule_self(td);
1172 lwkt_switch();
1173 }
1174 }
1175
1176 /*
1177 * The quick version processes pending interrupts and higher-priority
1178 * LWKT threads but will not round-robin same-priority LWKT threads.
1179 *
1180 * When called while attempting to return to userland the only same-pri
1181 * threads are the ones which have already tried to become the current
1182 * user process.
1183 */
1184 void
lwkt_yield_quick(void)1185 lwkt_yield_quick(void)
1186 {
1187 globaldata_t gd = mycpu;
1188 thread_t td = gd->gd_curthread;
1189
1190 if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) && td->td_nest_count < 2)
1191 splz();
1192 if (lwkt_resched_wanted()) {
1193 crit_enter();
1194 if (TAILQ_FIRST(&gd->gd_tdrunq) == td) {
1195 clear_lwkt_resched();
1196 } else {
1197 atomic_set_int(&td->td_mpflags, TDF_MP_DIDYIELD);
1198 lwkt_schedule_self(curthread);
1199 lwkt_switch();
1200 }
1201 crit_exit();
1202 }
1203 }
1204
1205 /*
1206 * This yield is designed for kernel threads with a user context.
1207 *
1208 * The kernel acting on behalf of the user is potentially cpu-bound,
1209 * this function will efficiently allow other threads to run and also
1210 * switch to other processes by releasing.
1211 *
1212 * The lwkt_user_yield() function is designed to have very low overhead
1213 * if no yield is determined to be needed.
1214 */
1215 void
lwkt_user_yield(void)1216 lwkt_user_yield(void)
1217 {
1218 globaldata_t gd = mycpu;
1219 thread_t td = gd->gd_curthread;
1220
1221 /*
1222 * Should never be called with spinlocks held but there is a path
1223 * via ACPI where it might happen.
1224 */
1225 if (gd->gd_spinlocks)
1226 return;
1227
1228 /*
1229 * Always run any pending interrupts in case we are in a critical
1230 * section.
1231 */
1232 if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) && td->td_nest_count < 2)
1233 splz();
1234
1235 /*
1236 * Switch (which forces a release) if another kernel thread needs
1237 * the cpu, if userland wants us to resched, or if our kernel
1238 * quantum has run out.
1239 */
1240 if (lwkt_resched_wanted() ||
1241 user_resched_wanted())
1242 {
1243 lwkt_switch();
1244 }
1245
1246 #if 0
1247 /*
1248 * Reacquire the current process if we are released.
1249 *
1250 * XXX not implemented atm. The kernel may be holding locks and such,
1251 * so we want the thread to continue to receive cpu.
1252 */
1253 if (td->td_release == NULL && lp) {
1254 lp->lwp_proc->p_usched->acquire_curproc(lp);
1255 td->td_release = lwkt_passive_release;
1256 lwkt_setpri_self(TDPRI_USER_NORM);
1257 }
1258 #endif
1259 }
1260
1261 /*
1262 * Generic schedule. Possibly schedule threads belonging to other cpus and
1263 * deal with threads that might be blocked on a wait queue.
1264 *
1265 * We have a little helper inline function which does additional work after
1266 * the thread has been enqueued, including dealing with preemption and
1267 * setting need_lwkt_resched() (which prevents the kernel from returning
1268 * to userland until it has processed higher priority threads).
1269 *
1270 * It is possible for this routine to be called after a failed _enqueue
1271 * (due to the target thread migrating, sleeping, or otherwise blocked).
1272 * We have to check that the thread is actually on the run queue!
1273 */
1274 static __inline
1275 void
_lwkt_schedule_post(globaldata_t gd,thread_t ntd,int ccount)1276 _lwkt_schedule_post(globaldata_t gd, thread_t ntd, int ccount)
1277 {
1278 if (ntd->td_flags & TDF_RUNQ) {
1279 if (ntd->td_preemptable) {
1280 ntd->td_preemptable(ntd, ccount); /* YYY +token */
1281 }
1282 }
1283 }
1284
1285 static __inline
1286 void
_lwkt_schedule(thread_t td)1287 _lwkt_schedule(thread_t td)
1288 {
1289 globaldata_t mygd = mycpu;
1290
1291 KASSERT(td != &td->td_gd->gd_idlethread,
1292 ("lwkt_schedule(): scheduling gd_idlethread is illegal!"));
1293 KKASSERT((td->td_flags & TDF_MIGRATING) == 0);
1294 crit_enter_gd(mygd);
1295 KKASSERT(td->td_lwp == NULL ||
1296 (td->td_lwp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
1297
1298 if (td == mygd->gd_curthread) {
1299 _lwkt_enqueue(td);
1300 } else {
1301 /*
1302 * If we own the thread, there is no race (since we are in a
1303 * critical section). If we do not own the thread there might
1304 * be a race but the target cpu will deal with it.
1305 */
1306 if (td->td_gd == mygd) {
1307 _lwkt_enqueue(td);
1308 _lwkt_schedule_post(mygd, td, 1);
1309 } else {
1310 lwkt_send_ipiq3(td->td_gd, lwkt_schedule_remote, td, 0);
1311 }
1312 }
1313 crit_exit_gd(mygd);
1314 }
1315
1316 void
lwkt_schedule(thread_t td)1317 lwkt_schedule(thread_t td)
1318 {
1319 _lwkt_schedule(td);
1320 }
1321
1322 void
lwkt_schedule_noresched(thread_t td)1323 lwkt_schedule_noresched(thread_t td) /* XXX not impl */
1324 {
1325 _lwkt_schedule(td);
1326 }
1327
1328 /*
1329 * When scheduled remotely if frame != NULL the IPIQ is being
1330 * run via doreti or an interrupt then preemption can be allowed.
1331 *
1332 * To allow preemption we have to drop the critical section so only
1333 * one is present in _lwkt_schedule_post.
1334 */
1335 static void
lwkt_schedule_remote(void * arg,int arg2,struct intrframe * frame)1336 lwkt_schedule_remote(void *arg, int arg2, struct intrframe *frame)
1337 {
1338 thread_t td = curthread;
1339 thread_t ntd = arg;
1340
1341 if (frame && ntd->td_preemptable) {
1342 crit_exit_noyield(td);
1343 _lwkt_schedule(ntd);
1344 crit_enter_quick(td);
1345 } else {
1346 _lwkt_schedule(ntd);
1347 }
1348 }
1349
1350 /*
1351 * Thread migration using a 'Pull' method. The thread may or may not be
1352 * the current thread. It MUST be descheduled and in a stable state.
1353 * lwkt_giveaway() must be called on the cpu owning the thread.
1354 *
1355 * At any point after lwkt_giveaway() is called, the target cpu may
1356 * 'pull' the thread by calling lwkt_acquire().
1357 *
1358 * We have to make sure the thread is not sitting on a per-cpu tsleep
1359 * queue or it will blow up when it moves to another cpu.
1360 *
1361 * MPSAFE - must be called under very specific conditions.
1362 */
1363 void
lwkt_giveaway(thread_t td)1364 lwkt_giveaway(thread_t td)
1365 {
1366 globaldata_t gd = mycpu;
1367
1368 crit_enter_gd(gd);
1369 if (td->td_flags & TDF_TSLEEPQ)
1370 tsleep_remove(td);
1371 KKASSERT(td->td_gd == gd);
1372 TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq);
1373 td->td_flags |= TDF_MIGRATING;
1374 crit_exit_gd(gd);
1375 }
1376
1377 void
lwkt_acquire(thread_t td)1378 lwkt_acquire(thread_t td)
1379 {
1380 globaldata_t gd;
1381 globaldata_t mygd;
1382
1383 KKASSERT(td->td_flags & TDF_MIGRATING);
1384 gd = td->td_gd;
1385 mygd = mycpu;
1386 if (gd != mycpu) {
1387 #ifdef LOOPMASK
1388 uint64_t tsc_base = rdtsc();
1389 #endif
1390 cpu_lfence();
1391 KKASSERT((td->td_flags & TDF_RUNQ) == 0);
1392 crit_enter_gd(mygd);
1393 DEBUG_PUSH_INFO("lwkt_acquire");
1394 while (td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) {
1395 lwkt_process_ipiq();
1396 cpu_lfence();
1397 #ifdef _KERNEL_VIRTUAL
1398 vkernel_yield();
1399 #endif
1400 #ifdef LOOPMASK
1401 if (tsc_frequency && rdtsc() - tsc_base > tsc_frequency) {
1402 kprintf("lwkt_acquire: stuck td %p td->td_flags %08x\n",
1403 td, td->td_flags);
1404 tsc_base = rdtsc();
1405 }
1406 #endif
1407 }
1408 DEBUG_POP_INFO();
1409 cpu_mfence();
1410 td->td_gd = mygd;
1411 TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq);
1412 td->td_flags &= ~TDF_MIGRATING;
1413 crit_exit_gd(mygd);
1414 } else {
1415 crit_enter_gd(mygd);
1416 TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq);
1417 td->td_flags &= ~TDF_MIGRATING;
1418 crit_exit_gd(mygd);
1419 }
1420 }
1421
1422 /*
1423 * Generic deschedule. Descheduling threads other then your own should be
1424 * done only in carefully controlled circumstances. Descheduling is
1425 * asynchronous.
1426 *
1427 * This function may block if the cpu has run out of messages.
1428 */
1429 void
lwkt_deschedule(thread_t td)1430 lwkt_deschedule(thread_t td)
1431 {
1432 crit_enter();
1433 if (td == curthread) {
1434 _lwkt_dequeue(td);
1435 } else {
1436 if (td->td_gd == mycpu) {
1437 _lwkt_dequeue(td);
1438 } else {
1439 lwkt_send_ipiq(td->td_gd, (ipifunc1_t)lwkt_deschedule, td);
1440 }
1441 }
1442 crit_exit();
1443 }
1444
1445 /*
1446 * Set the target thread's priority. This routine does not automatically
1447 * switch to a higher priority thread, LWKT threads are not designed for
1448 * continuous priority changes. Yield if you want to switch.
1449 */
1450 void
lwkt_setpri(thread_t td,int pri)1451 lwkt_setpri(thread_t td, int pri)
1452 {
1453 if (td->td_pri != pri) {
1454 KKASSERT(pri >= 0);
1455 crit_enter();
1456 if (td->td_flags & TDF_RUNQ) {
1457 KKASSERT(td->td_gd == mycpu);
1458 _lwkt_dequeue(td);
1459 td->td_pri = pri;
1460 _lwkt_enqueue(td);
1461 } else {
1462 td->td_pri = pri;
1463 }
1464 crit_exit();
1465 }
1466 }
1467
1468 /*
1469 * Set the initial priority for a thread prior to it being scheduled for
1470 * the first time. The thread MUST NOT be scheduled before or during
1471 * this call. The thread may be assigned to a cpu other then the current
1472 * cpu.
1473 *
1474 * Typically used after a thread has been created with TDF_STOPPREQ,
1475 * and before the thread is initially scheduled.
1476 */
1477 void
lwkt_setpri_initial(thread_t td,int pri)1478 lwkt_setpri_initial(thread_t td, int pri)
1479 {
1480 KKASSERT(pri >= 0);
1481 KKASSERT((td->td_flags & TDF_RUNQ) == 0);
1482 td->td_pri = pri;
1483 }
1484
1485 void
lwkt_setpri_self(int pri)1486 lwkt_setpri_self(int pri)
1487 {
1488 thread_t td = curthread;
1489
1490 KKASSERT(pri >= 0 && pri <= TDPRI_MAX);
1491 crit_enter();
1492 if (td->td_flags & TDF_RUNQ) {
1493 _lwkt_dequeue(td);
1494 td->td_pri = pri;
1495 _lwkt_enqueue(td);
1496 } else {
1497 td->td_pri = pri;
1498 }
1499 crit_exit();
1500 }
1501
1502 /*
1503 * hz tick scheduler clock for LWKT threads
1504 */
1505 void
lwkt_schedulerclock(thread_t td)1506 lwkt_schedulerclock(thread_t td)
1507 {
1508 globaldata_t gd = td->td_gd;
1509 thread_t xtd;
1510
1511 xtd = TAILQ_FIRST(&gd->gd_tdrunq);
1512 if (xtd == td) {
1513 /*
1514 * If the current thread is at the head of the runq shift it to the
1515 * end of any equal-priority threads and request a LWKT reschedule
1516 * if it moved.
1517 *
1518 * Ignore upri in this situation. There will only be one user thread
1519 * in user mode, all others will be user threads running in kernel
1520 * mode and we have to make sure they get some cpu.
1521 */
1522 xtd = TAILQ_NEXT(td, td_threadq);
1523 if (xtd && xtd->td_pri == td->td_pri) {
1524 TAILQ_REMOVE(&gd->gd_tdrunq, td, td_threadq);
1525 while (xtd && xtd->td_pri == td->td_pri)
1526 xtd = TAILQ_NEXT(xtd, td_threadq);
1527 if (xtd)
1528 TAILQ_INSERT_BEFORE(xtd, td, td_threadq);
1529 else
1530 TAILQ_INSERT_TAIL(&gd->gd_tdrunq, td, td_threadq);
1531 need_lwkt_resched();
1532 }
1533 } else if (xtd) {
1534 /*
1535 * If we scheduled a thread other than the one at the head of the
1536 * queue always request a reschedule every tick.
1537 */
1538 need_lwkt_resched();
1539 }
1540 /* else curthread probably the idle thread, no need to reschedule */
1541 }
1542
1543 /*
1544 * Migrate the current thread to the specified cpu.
1545 *
1546 * This is accomplished by descheduling ourselves from the current cpu
1547 * and setting td_migrate_gd. The lwkt_switch() code will detect that the
1548 * 'old' thread wants to migrate after it has been completely switched out
1549 * and will complete the migration.
1550 *
1551 * TDF_MIGRATING prevents scheduling races while the thread is being migrated.
1552 *
1553 * We must be sure to release our current process designation (if a user
1554 * process) before clearing out any tsleepq we are on because the release
1555 * code may re-add us.
1556 *
1557 * We must be sure to remove ourselves from the current cpu's tsleepq
1558 * before potentially moving to another queue. The thread can be on
1559 * a tsleepq due to a left-over tsleep_interlock().
1560 */
1561
1562 void
lwkt_setcpu_self(globaldata_t rgd)1563 lwkt_setcpu_self(globaldata_t rgd)
1564 {
1565 thread_t td = curthread;
1566
1567 if (td->td_gd != rgd) {
1568 crit_enter_quick(td);
1569
1570 if (td->td_release)
1571 td->td_release(td);
1572 if (td->td_flags & TDF_TSLEEPQ)
1573 tsleep_remove(td);
1574
1575 /*
1576 * Set TDF_MIGRATING to prevent a spurious reschedule while we are
1577 * trying to deschedule ourselves and switch away, then deschedule
1578 * ourself, remove us from tdallq, and set td_migrate_gd. Finally,
1579 * call lwkt_switch() to complete the operation.
1580 */
1581 td->td_flags |= TDF_MIGRATING;
1582 lwkt_deschedule_self(td);
1583 TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq);
1584 td->td_migrate_gd = rgd;
1585 lwkt_switch();
1586
1587 /*
1588 * We are now on the target cpu
1589 */
1590 KKASSERT(rgd == mycpu);
1591 TAILQ_INSERT_TAIL(&rgd->gd_tdallq, td, td_allq);
1592 crit_exit_quick(td);
1593 }
1594 }
1595
1596 void
lwkt_migratecpu(int cpuid)1597 lwkt_migratecpu(int cpuid)
1598 {
1599 globaldata_t rgd;
1600
1601 rgd = globaldata_find(cpuid);
1602 lwkt_setcpu_self(rgd);
1603 }
1604
1605 /*
1606 * Remote IPI for cpu migration (called while in a critical section so we
1607 * do not have to enter another one).
1608 *
1609 * The thread (td) has already been completely descheduled from the
1610 * originating cpu and we can simply assert the case. The thread is
1611 * assigned to the new cpu and enqueued.
1612 *
1613 * The thread will re-add itself to tdallq when it resumes execution.
1614 */
1615 static void
lwkt_setcpu_remote(void * arg)1616 lwkt_setcpu_remote(void *arg)
1617 {
1618 thread_t td = arg;
1619 globaldata_t gd = mycpu;
1620
1621 KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) == 0);
1622 td->td_gd = gd;
1623 cpu_mfence();
1624 td->td_flags &= ~TDF_MIGRATING;
1625 KKASSERT(td->td_migrate_gd == NULL);
1626 KKASSERT(td->td_lwp == NULL ||
1627 (td->td_lwp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
1628 _lwkt_enqueue(td);
1629 }
1630
1631 struct lwp *
lwkt_preempted_proc(void)1632 lwkt_preempted_proc(void)
1633 {
1634 thread_t td = curthread;
1635 while (td->td_preempted)
1636 td = td->td_preempted;
1637 return(td->td_lwp);
1638 }
1639
1640 /*
1641 * Create a kernel process/thread/whatever. It shares it's address space
1642 * with proc0 - ie: kernel only.
1643 *
1644 * If the cpu is not specified one will be selected. In the future
1645 * specifying a cpu of -1 will enable kernel thread migration between
1646 * cpus.
1647 */
1648 int
lwkt_create(void (* func)(void *),void * arg,struct thread ** tdp,thread_t template,int tdflags,int cpu,const char * fmt,...)1649 lwkt_create(void (*func)(void *), void *arg, struct thread **tdp,
1650 thread_t template, int tdflags, int cpu, const char *fmt, ...)
1651 {
1652 thread_t td;
1653 __va_list ap;
1654
1655 td = lwkt_alloc_thread(template, LWKT_THREAD_STACK, cpu,
1656 tdflags);
1657 if (tdp)
1658 *tdp = td;
1659 cpu_set_thread_handler(td, lwkt_exit, func, arg);
1660
1661 /*
1662 * Set up arg0 for 'ps' etc
1663 */
1664 __va_start(ap, fmt);
1665 kvsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap);
1666 __va_end(ap);
1667
1668 /*
1669 * Schedule the thread to run
1670 */
1671 if (td->td_flags & TDF_NOSTART)
1672 td->td_flags &= ~TDF_NOSTART;
1673 else
1674 lwkt_schedule(td);
1675 return 0;
1676 }
1677
1678 /*
1679 * Destroy an LWKT thread. Warning! This function is not called when
1680 * a process exits, cpu_proc_exit() directly calls cpu_thread_exit() and
1681 * uses a different reaping mechanism.
1682 */
1683 void
lwkt_exit(void)1684 lwkt_exit(void)
1685 {
1686 thread_t td = curthread;
1687 thread_t std;
1688 globaldata_t gd;
1689
1690 /*
1691 * Do any cleanup that might block here
1692 */
1693 biosched_done(td);
1694 dsched_exit_thread(td);
1695
1696 /*
1697 * Get us into a critical section to interlock gd_freetd and loop
1698 * until we can get it freed.
1699 *
1700 * We have to cache the current td in gd_freetd because objcache_put()ing
1701 * it would rip it out from under us while our thread is still active.
1702 *
1703 * We are the current thread so of course our own TDF_RUNNING bit will
1704 * be set, so unlike the lwp reap code we don't wait for it to clear.
1705 */
1706 gd = mycpu;
1707 crit_enter_quick(td);
1708 for (;;) {
1709 if (td->td_refs) {
1710 tsleep(td, 0, "tdreap", 1);
1711 continue;
1712 }
1713 if ((std = gd->gd_freetd) != NULL) {
1714 KKASSERT((std->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) == 0);
1715 gd->gd_freetd = NULL;
1716 objcache_put(thread_cache, std);
1717 continue;
1718 }
1719 break;
1720 }
1721
1722 /*
1723 * Remove thread resources from kernel lists and deschedule us for
1724 * the last time. We cannot block after this point or we may end
1725 * up with a stale td on the tsleepq.
1726 *
1727 * None of this may block, the critical section is the only thing
1728 * protecting tdallq and the only thing preventing new lwkt_hold()
1729 * thread refs now.
1730 */
1731 if (td->td_flags & TDF_TSLEEPQ)
1732 tsleep_remove(td);
1733 lwkt_deschedule_self(td);
1734 lwkt_remove_tdallq(td);
1735 KKASSERT(td->td_refs == 0);
1736
1737 /*
1738 * Final cleanup
1739 */
1740 KKASSERT(gd->gd_freetd == NULL);
1741 if (td->td_flags & TDF_ALLOCATED_THREAD)
1742 gd->gd_freetd = td;
1743 cpu_thread_exit();
1744 }
1745
1746 void
lwkt_remove_tdallq(thread_t td)1747 lwkt_remove_tdallq(thread_t td)
1748 {
1749 KKASSERT(td->td_gd == mycpu);
1750 TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq);
1751 }
1752
1753 /*
1754 * Code reduction and branch prediction improvements. Call/return
1755 * overhead on modern cpus often degenerates into 0 cycles due to
1756 * the cpu's branch prediction hardware and return pc cache. We
1757 * can take advantage of this by not inlining medium-complexity
1758 * functions and we can also reduce the branch prediction impact
1759 * by collapsing perfectly predictable branches into a single
1760 * procedure instead of duplicating it.
1761 *
1762 * Is any of this noticeable? Probably not, so I'll take the
1763 * smaller code size.
1764 */
1765 void
crit_exit_wrapper(__DEBUG_CRIT_ARG__)1766 crit_exit_wrapper(__DEBUG_CRIT_ARG__)
1767 {
1768 _crit_exit(mycpu __DEBUG_CRIT_PASS_ARG__);
1769 }
1770
1771 void
crit_panic(void)1772 crit_panic(void)
1773 {
1774 thread_t td = curthread;
1775 int lcrit = td->td_critcount;
1776
1777 td->td_critcount = 0;
1778 cpu_ccfence();
1779 panic("td_critcount is/would-go negative! %p %d", td, lcrit);
1780 /* NOT REACHED */
1781 }
1782
1783 /*
1784 * Called from debugger/panic on cpus which have been stopped. We must still
1785 * process the IPIQ while stopped.
1786 *
1787 * If we are dumping also try to process any pending interrupts. This may
1788 * or may not work depending on the state of the cpu at the point it was
1789 * stopped.
1790 */
1791 void
lwkt_smp_stopped(void)1792 lwkt_smp_stopped(void)
1793 {
1794 globaldata_t gd = mycpu;
1795
1796 if (dumping) {
1797 lwkt_process_ipiq();
1798 --gd->gd_intr_nesting_level;
1799 splz();
1800 ++gd->gd_intr_nesting_level;
1801 } else {
1802 lwkt_process_ipiq();
1803 }
1804 cpu_smp_stopped();
1805 }
1806