xref: /dragonfly/sys/kern/lwkt_thread.c (revision 52f9f0d9)
1 /*
2  * Copyright (c) 2003-2011 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 /*
36  * Each cpu in a system has its own self-contained light weight kernel
37  * thread scheduler, which means that generally speaking we only need
38  * to use a critical section to avoid problems.  Foreign thread
39  * scheduling is queued via (async) IPIs.
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/proc.h>
46 #include <sys/rtprio.h>
47 #include <sys/kinfo.h>
48 #include <sys/queue.h>
49 #include <sys/sysctl.h>
50 #include <sys/kthread.h>
51 #include <machine/cpu.h>
52 #include <sys/lock.h>
53 #include <sys/caps.h>
54 #include <sys/spinlock.h>
55 #include <sys/ktr.h>
56 
57 #include <sys/thread2.h>
58 #include <sys/spinlock2.h>
59 #include <sys/mplock2.h>
60 
61 #include <sys/dsched.h>
62 
63 #include <vm/vm.h>
64 #include <vm/vm_param.h>
65 #include <vm/vm_kern.h>
66 #include <vm/vm_object.h>
67 #include <vm/vm_page.h>
68 #include <vm/vm_map.h>
69 #include <vm/vm_pager.h>
70 #include <vm/vm_extern.h>
71 
72 #include <machine/stdarg.h>
73 #include <machine/smp.h>
74 
75 #if !defined(KTR_CTXSW)
76 #define KTR_CTXSW KTR_ALL
77 #endif
78 KTR_INFO_MASTER(ctxsw);
79 KTR_INFO(KTR_CTXSW, ctxsw, sw, 0, "#cpu[%d].td = %p", int cpu, struct thread *td);
80 KTR_INFO(KTR_CTXSW, ctxsw, pre, 1, "#cpu[%d].td = %p", int cpu, struct thread *td);
81 KTR_INFO(KTR_CTXSW, ctxsw, newtd, 2, "#threads[%p].name = %s", struct thread *td, char *comm);
82 KTR_INFO(KTR_CTXSW, ctxsw, deadtd, 3, "#threads[%p].name = <dead>", struct thread *td);
83 
84 static MALLOC_DEFINE(M_THREAD, "thread", "lwkt threads");
85 
86 #ifdef	INVARIANTS
87 static int panic_on_cscount = 0;
88 #endif
89 static __int64_t switch_count = 0;
90 static __int64_t preempt_hit = 0;
91 static __int64_t preempt_miss = 0;
92 static __int64_t preempt_weird = 0;
93 static __int64_t token_contention_count[TDPRI_MAX+1] __debugvar;
94 static int lwkt_use_spin_port;
95 static struct objcache *thread_cache;
96 
97 #ifdef SMP
98 static void lwkt_schedule_remote(void *arg, int arg2, struct intrframe *frame);
99 static void lwkt_setcpu_remote(void *arg);
100 #endif
101 
102 extern void cpu_heavy_restore(void);
103 extern void cpu_lwkt_restore(void);
104 extern void cpu_kthread_restore(void);
105 extern void cpu_idle_restore(void);
106 
107 /*
108  * We can make all thread ports use the spin backend instead of the thread
109  * backend.  This should only be set to debug the spin backend.
110  */
111 TUNABLE_INT("lwkt.use_spin_port", &lwkt_use_spin_port);
112 
113 #ifdef	INVARIANTS
114 SYSCTL_INT(_lwkt, OID_AUTO, panic_on_cscount, CTLFLAG_RW, &panic_on_cscount, 0,
115     "Panic if attempting to switch lwkt's while mastering cpusync");
116 #endif
117 SYSCTL_QUAD(_lwkt, OID_AUTO, switch_count, CTLFLAG_RW, &switch_count, 0,
118     "Number of switched threads");
119 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_hit, CTLFLAG_RW, &preempt_hit, 0,
120     "Successful preemption events");
121 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_miss, CTLFLAG_RW, &preempt_miss, 0,
122     "Failed preemption events");
123 SYSCTL_QUAD(_lwkt, OID_AUTO, preempt_weird, CTLFLAG_RW, &preempt_weird, 0,
124     "Number of preempted threads.");
125 #ifdef	INVARIANTS
126 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_00, CTLFLAG_RW,
127 	&token_contention_count[0], 0, "spinning due to token contention");
128 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_01, CTLFLAG_RW,
129 	&token_contention_count[1], 0, "spinning due to token contention");
130 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_02, CTLFLAG_RW,
131 	&token_contention_count[2], 0, "spinning due to token contention");
132 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_03, CTLFLAG_RW,
133 	&token_contention_count[3], 0, "spinning due to token contention");
134 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_04, CTLFLAG_RW,
135 	&token_contention_count[4], 0, "spinning due to token contention");
136 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_05, CTLFLAG_RW,
137 	&token_contention_count[5], 0, "spinning due to token contention");
138 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_06, CTLFLAG_RW,
139 	&token_contention_count[6], 0, "spinning due to token contention");
140 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_07, CTLFLAG_RW,
141 	&token_contention_count[7], 0, "spinning due to token contention");
142 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_08, CTLFLAG_RW,
143 	&token_contention_count[8], 0, "spinning due to token contention");
144 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_09, CTLFLAG_RW,
145 	&token_contention_count[9], 0, "spinning due to token contention");
146 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_10, CTLFLAG_RW,
147 	&token_contention_count[10], 0, "spinning due to token contention");
148 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_11, CTLFLAG_RW,
149 	&token_contention_count[11], 0, "spinning due to token contention");
150 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_12, CTLFLAG_RW,
151 	&token_contention_count[12], 0, "spinning due to token contention");
152 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_13, CTLFLAG_RW,
153 	&token_contention_count[13], 0, "spinning due to token contention");
154 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_14, CTLFLAG_RW,
155 	&token_contention_count[14], 0, "spinning due to token contention");
156 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_15, CTLFLAG_RW,
157 	&token_contention_count[15], 0, "spinning due to token contention");
158 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_16, CTLFLAG_RW,
159 	&token_contention_count[16], 0, "spinning due to token contention");
160 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_17, CTLFLAG_RW,
161 	&token_contention_count[17], 0, "spinning due to token contention");
162 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_18, CTLFLAG_RW,
163 	&token_contention_count[18], 0, "spinning due to token contention");
164 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_19, CTLFLAG_RW,
165 	&token_contention_count[19], 0, "spinning due to token contention");
166 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_20, CTLFLAG_RW,
167 	&token_contention_count[20], 0, "spinning due to token contention");
168 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_21, CTLFLAG_RW,
169 	&token_contention_count[21], 0, "spinning due to token contention");
170 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_22, CTLFLAG_RW,
171 	&token_contention_count[22], 0, "spinning due to token contention");
172 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_23, CTLFLAG_RW,
173 	&token_contention_count[23], 0, "spinning due to token contention");
174 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_24, CTLFLAG_RW,
175 	&token_contention_count[24], 0, "spinning due to token contention");
176 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_25, CTLFLAG_RW,
177 	&token_contention_count[25], 0, "spinning due to token contention");
178 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_26, CTLFLAG_RW,
179 	&token_contention_count[26], 0, "spinning due to token contention");
180 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_27, CTLFLAG_RW,
181 	&token_contention_count[27], 0, "spinning due to token contention");
182 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_28, CTLFLAG_RW,
183 	&token_contention_count[28], 0, "spinning due to token contention");
184 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_29, CTLFLAG_RW,
185 	&token_contention_count[29], 0, "spinning due to token contention");
186 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_30, CTLFLAG_RW,
187 	&token_contention_count[30], 0, "spinning due to token contention");
188 SYSCTL_QUAD(_lwkt, OID_AUTO, token_contention_count_31, CTLFLAG_RW,
189 	&token_contention_count[31], 0, "spinning due to token contention");
190 #endif
191 static int fairq_enable = 0;
192 SYSCTL_INT(_lwkt, OID_AUTO, fairq_enable, CTLFLAG_RW,
193 	&fairq_enable, 0, "Turn on fairq priority accumulators");
194 static int fairq_bypass = -1;
195 SYSCTL_INT(_lwkt, OID_AUTO, fairq_bypass, CTLFLAG_RW,
196 	&fairq_bypass, 0, "Allow fairq to bypass td on token failure");
197 extern int lwkt_sched_debug;
198 int lwkt_sched_debug = 0;
199 SYSCTL_INT(_lwkt, OID_AUTO, sched_debug, CTLFLAG_RW,
200 	&lwkt_sched_debug, 0, "Scheduler debug");
201 static int lwkt_spin_loops = 10;
202 SYSCTL_INT(_lwkt, OID_AUTO, spin_loops, CTLFLAG_RW,
203 	&lwkt_spin_loops, 0, "Scheduler spin loops until sorted decon");
204 static int lwkt_spin_reseq = 0;
205 SYSCTL_INT(_lwkt, OID_AUTO, spin_reseq, CTLFLAG_RW,
206 	&lwkt_spin_reseq, 0, "Scheduler resequencer enable");
207 static int lwkt_spin_monitor = 0;
208 SYSCTL_INT(_lwkt, OID_AUTO, spin_monitor, CTLFLAG_RW,
209 	&lwkt_spin_monitor, 0, "Scheduler uses monitor/mwait");
210 static int lwkt_spin_fatal = 0;	/* disabled */
211 SYSCTL_INT(_lwkt, OID_AUTO, spin_fatal, CTLFLAG_RW,
212 	&lwkt_spin_fatal, 0, "LWKT scheduler spin loops till fatal panic");
213 static int preempt_enable = 1;
214 SYSCTL_INT(_lwkt, OID_AUTO, preempt_enable, CTLFLAG_RW,
215 	&preempt_enable, 0, "Enable preemption");
216 static int lwkt_cache_threads = 0;
217 SYSCTL_INT(_lwkt, OID_AUTO, cache_threads, CTLFLAG_RD,
218 	&lwkt_cache_threads, 0, "thread+kstack cache");
219 
220 static __cachealign int lwkt_cseq_rindex;
221 static __cachealign int lwkt_cseq_windex;
222 
223 /*
224  * These helper procedures handle the runq, they can only be called from
225  * within a critical section.
226  *
227  * WARNING!  Prior to SMP being brought up it is possible to enqueue and
228  * dequeue threads belonging to other cpus, so be sure to use td->td_gd
229  * instead of 'mycpu' when referencing the globaldata structure.   Once
230  * SMP live enqueuing and dequeueing only occurs on the current cpu.
231  */
232 static __inline
233 void
234 _lwkt_dequeue(thread_t td)
235 {
236     if (td->td_flags & TDF_RUNQ) {
237 	struct globaldata *gd = td->td_gd;
238 
239 	td->td_flags &= ~TDF_RUNQ;
240 	TAILQ_REMOVE(&gd->gd_tdrunq, td, td_threadq);
241 	if (TAILQ_FIRST(&gd->gd_tdrunq) == NULL)
242 		atomic_clear_int(&gd->gd_reqflags, RQF_RUNNING);
243     }
244 }
245 
246 /*
247  * Priority enqueue.
248  *
249  * NOTE: There are a limited number of lwkt threads runnable since user
250  *	 processes only schedule one at a time per cpu.
251  */
252 static __inline
253 void
254 _lwkt_enqueue(thread_t td)
255 {
256     thread_t xtd;
257 
258     if ((td->td_flags & (TDF_RUNQ|TDF_MIGRATING|TDF_BLOCKQ)) == 0) {
259 	struct globaldata *gd = td->td_gd;
260 
261 	td->td_flags |= TDF_RUNQ;
262 	xtd = TAILQ_FIRST(&gd->gd_tdrunq);
263 	if (xtd == NULL) {
264 	    TAILQ_INSERT_TAIL(&gd->gd_tdrunq, td, td_threadq);
265 	    atomic_set_int(&gd->gd_reqflags, RQF_RUNNING);
266 	} else {
267 	    while (xtd && xtd->td_pri >= td->td_pri)
268 		xtd = TAILQ_NEXT(xtd, td_threadq);
269 	    if (xtd)
270 		TAILQ_INSERT_BEFORE(xtd, td, td_threadq);
271 	    else
272 		TAILQ_INSERT_TAIL(&gd->gd_tdrunq, td, td_threadq);
273 	}
274 
275 	/*
276 	 * Request a LWKT reschedule if we are now at the head of the queue.
277 	 */
278 	if (TAILQ_FIRST(&gd->gd_tdrunq) == td)
279 	    need_lwkt_resched();
280     }
281 }
282 
283 static __boolean_t
284 _lwkt_thread_ctor(void *obj, void *privdata, int ocflags)
285 {
286 	struct thread *td = (struct thread *)obj;
287 
288 	td->td_kstack = NULL;
289 	td->td_kstack_size = 0;
290 	td->td_flags = TDF_ALLOCATED_THREAD;
291 	td->td_mpflags = 0;
292 	return (1);
293 }
294 
295 static void
296 _lwkt_thread_dtor(void *obj, void *privdata)
297 {
298 	struct thread *td = (struct thread *)obj;
299 
300 	KASSERT(td->td_flags & TDF_ALLOCATED_THREAD,
301 	    ("_lwkt_thread_dtor: not allocated from objcache"));
302 	KASSERT((td->td_flags & TDF_ALLOCATED_STACK) && td->td_kstack &&
303 		td->td_kstack_size > 0,
304 	    ("_lwkt_thread_dtor: corrupted stack"));
305 	kmem_free(&kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size);
306 	td->td_kstack = NULL;
307 	td->td_flags = 0;
308 }
309 
310 /*
311  * Initialize the lwkt s/system.
312  *
313  * Nominally cache up to 32 thread + kstack structures.  Cache more on
314  * systems with a lot of cpu cores.
315  */
316 void
317 lwkt_init(void)
318 {
319     TUNABLE_INT("lwkt.cache_threads", &lwkt_cache_threads);
320     if (lwkt_cache_threads == 0) {
321 	lwkt_cache_threads = ncpus * 4;
322 	if (lwkt_cache_threads < 32)
323 	    lwkt_cache_threads = 32;
324     }
325     thread_cache = objcache_create_mbacked(
326 				M_THREAD, sizeof(struct thread),
327 				NULL, lwkt_cache_threads,
328 				_lwkt_thread_ctor, _lwkt_thread_dtor, NULL);
329 }
330 
331 /*
332  * Schedule a thread to run.  As the current thread we can always safely
333  * schedule ourselves, and a shortcut procedure is provided for that
334  * function.
335  *
336  * (non-blocking, self contained on a per cpu basis)
337  */
338 void
339 lwkt_schedule_self(thread_t td)
340 {
341     KKASSERT((td->td_flags & TDF_MIGRATING) == 0);
342     crit_enter_quick(td);
343     KASSERT(td != &td->td_gd->gd_idlethread,
344 	    ("lwkt_schedule_self(): scheduling gd_idlethread is illegal!"));
345     KKASSERT(td->td_lwp == NULL ||
346 	     (td->td_lwp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
347     _lwkt_enqueue(td);
348     crit_exit_quick(td);
349 }
350 
351 /*
352  * Deschedule a thread.
353  *
354  * (non-blocking, self contained on a per cpu basis)
355  */
356 void
357 lwkt_deschedule_self(thread_t td)
358 {
359     crit_enter_quick(td);
360     _lwkt_dequeue(td);
361     crit_exit_quick(td);
362 }
363 
364 /*
365  * LWKTs operate on a per-cpu basis
366  *
367  * WARNING!  Called from early boot, 'mycpu' may not work yet.
368  */
369 void
370 lwkt_gdinit(struct globaldata *gd)
371 {
372     TAILQ_INIT(&gd->gd_tdrunq);
373     TAILQ_INIT(&gd->gd_tdallq);
374 }
375 
376 /*
377  * Create a new thread.  The thread must be associated with a process context
378  * or LWKT start address before it can be scheduled.  If the target cpu is
379  * -1 the thread will be created on the current cpu.
380  *
381  * If you intend to create a thread without a process context this function
382  * does everything except load the startup and switcher function.
383  */
384 thread_t
385 lwkt_alloc_thread(struct thread *td, int stksize, int cpu, int flags)
386 {
387     static int cpu_rotator;
388     globaldata_t gd = mycpu;
389     void *stack;
390 
391     /*
392      * If static thread storage is not supplied allocate a thread.  Reuse
393      * a cached free thread if possible.  gd_freetd is used to keep an exiting
394      * thread intact through the exit.
395      */
396     if (td == NULL) {
397 	crit_enter_gd(gd);
398 	if ((td = gd->gd_freetd) != NULL) {
399 	    KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK|
400 				      TDF_RUNQ)) == 0);
401 	    gd->gd_freetd = NULL;
402 	} else {
403 	    td = objcache_get(thread_cache, M_WAITOK);
404 	    KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK|
405 				      TDF_RUNQ)) == 0);
406 	}
407 	crit_exit_gd(gd);
408     	KASSERT((td->td_flags &
409 		 (TDF_ALLOCATED_THREAD|TDF_RUNNING|TDF_PREEMPT_LOCK)) ==
410 		 TDF_ALLOCATED_THREAD,
411 		("lwkt_alloc_thread: corrupted td flags 0x%X", td->td_flags));
412     	flags |= td->td_flags & (TDF_ALLOCATED_THREAD|TDF_ALLOCATED_STACK);
413     }
414 
415     /*
416      * Try to reuse cached stack.
417      */
418     if ((stack = td->td_kstack) != NULL && td->td_kstack_size != stksize) {
419 	if (flags & TDF_ALLOCATED_STACK) {
420 	    kmem_free(&kernel_map, (vm_offset_t)stack, td->td_kstack_size);
421 	    stack = NULL;
422 	}
423     }
424     if (stack == NULL) {
425 	stack = (void *)kmem_alloc_stack(&kernel_map, stksize);
426 	flags |= TDF_ALLOCATED_STACK;
427     }
428     if (cpu < 0) {
429 	cpu = ++cpu_rotator;
430 	cpu_ccfence();
431 	cpu %= ncpus;
432     }
433     lwkt_init_thread(td, stack, stksize, flags, globaldata_find(cpu));
434     return(td);
435 }
436 
437 /*
438  * Initialize a preexisting thread structure.  This function is used by
439  * lwkt_alloc_thread() and also used to initialize the per-cpu idlethread.
440  *
441  * All threads start out in a critical section at a priority of
442  * TDPRI_KERN_DAEMON.  Higher level code will modify the priority as
443  * appropriate.  This function may send an IPI message when the
444  * requested cpu is not the current cpu and consequently gd_tdallq may
445  * not be initialized synchronously from the point of view of the originating
446  * cpu.
447  *
448  * NOTE! we have to be careful in regards to creating threads for other cpus
449  * if SMP has not yet been activated.
450  */
451 #ifdef SMP
452 
453 static void
454 lwkt_init_thread_remote(void *arg)
455 {
456     thread_t td = arg;
457 
458     /*
459      * Protected by critical section held by IPI dispatch
460      */
461     TAILQ_INSERT_TAIL(&td->td_gd->gd_tdallq, td, td_allq);
462 }
463 
464 #endif
465 
466 /*
467  * lwkt core thread structural initialization.
468  *
469  * NOTE: All threads are initialized as mpsafe threads.
470  */
471 void
472 lwkt_init_thread(thread_t td, void *stack, int stksize, int flags,
473 		struct globaldata *gd)
474 {
475     globaldata_t mygd = mycpu;
476 
477     bzero(td, sizeof(struct thread));
478     td->td_kstack = stack;
479     td->td_kstack_size = stksize;
480     td->td_flags = flags;
481     td->td_mpflags = 0;
482     td->td_gd = gd;
483     td->td_pri = TDPRI_KERN_DAEMON;
484     td->td_critcount = 1;
485     td->td_toks_have = NULL;
486     td->td_toks_stop = &td->td_toks_base;
487     if (lwkt_use_spin_port || (flags & TDF_FORCE_SPINPORT))
488 	lwkt_initport_spin(&td->td_msgport);
489     else
490 	lwkt_initport_thread(&td->td_msgport, td);
491     pmap_init_thread(td);
492 #ifdef SMP
493     /*
494      * Normally initializing a thread for a remote cpu requires sending an
495      * IPI.  However, the idlethread is setup before the other cpus are
496      * activated so we have to treat it as a special case.  XXX manipulation
497      * of gd_tdallq requires the BGL.
498      */
499     if (gd == mygd || td == &gd->gd_idlethread) {
500 	crit_enter_gd(mygd);
501 	TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq);
502 	crit_exit_gd(mygd);
503     } else {
504 	lwkt_send_ipiq(gd, lwkt_init_thread_remote, td);
505     }
506 #else
507     crit_enter_gd(mygd);
508     TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq);
509     crit_exit_gd(mygd);
510 #endif
511 
512     dsched_new_thread(td);
513 }
514 
515 void
516 lwkt_set_comm(thread_t td, const char *ctl, ...)
517 {
518     __va_list va;
519 
520     __va_start(va, ctl);
521     kvsnprintf(td->td_comm, sizeof(td->td_comm), ctl, va);
522     __va_end(va);
523     KTR_LOG(ctxsw_newtd, td, td->td_comm);
524 }
525 
526 /*
527  * Prevent the thread from getting destroyed.  Note that unlike PHOLD/PRELE
528  * this does not prevent the thread from migrating to another cpu so the
529  * gd_tdallq state is not protected by this.
530  */
531 void
532 lwkt_hold(thread_t td)
533 {
534     atomic_add_int(&td->td_refs, 1);
535 }
536 
537 void
538 lwkt_rele(thread_t td)
539 {
540     KKASSERT(td->td_refs > 0);
541     atomic_add_int(&td->td_refs, -1);
542 }
543 
544 void
545 lwkt_free_thread(thread_t td)
546 {
547     KKASSERT(td->td_refs == 0);
548     KKASSERT((td->td_flags & (TDF_RUNNING | TDF_PREEMPT_LOCK |
549 			      TDF_RUNQ | TDF_TSLEEPQ)) == 0);
550     if (td->td_flags & TDF_ALLOCATED_THREAD) {
551     	objcache_put(thread_cache, td);
552     } else if (td->td_flags & TDF_ALLOCATED_STACK) {
553 	/* client-allocated struct with internally allocated stack */
554 	KASSERT(td->td_kstack && td->td_kstack_size > 0,
555 	    ("lwkt_free_thread: corrupted stack"));
556 	kmem_free(&kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size);
557 	td->td_kstack = NULL;
558 	td->td_kstack_size = 0;
559     }
560     KTR_LOG(ctxsw_deadtd, td);
561 }
562 
563 
564 /*
565  * Switch to the next runnable lwkt.  If no LWKTs are runnable then
566  * switch to the idlethread.  Switching must occur within a critical
567  * section to avoid races with the scheduling queue.
568  *
569  * We always have full control over our cpu's run queue.  Other cpus
570  * that wish to manipulate our queue must use the cpu_*msg() calls to
571  * talk to our cpu, so a critical section is all that is needed and
572  * the result is very, very fast thread switching.
573  *
574  * The LWKT scheduler uses a fixed priority model and round-robins at
575  * each priority level.  User process scheduling is a totally
576  * different beast and LWKT priorities should not be confused with
577  * user process priorities.
578  *
579  * PREEMPTION NOTE: Preemption occurs via lwkt_preempt().  lwkt_switch()
580  * is not called by the current thread in the preemption case, only when
581  * the preempting thread blocks (in order to return to the original thread).
582  *
583  * SPECIAL NOTE ON SWITCH ATOMICY: Certain operations such as thread
584  * migration and tsleep deschedule the current lwkt thread and call
585  * lwkt_switch().  In particular, the target cpu of the migration fully
586  * expects the thread to become non-runnable and can deadlock against
587  * cpusync operations if we run any IPIs prior to switching the thread out.
588  *
589  * WE MUST BE VERY CAREFUL NOT TO RUN SPLZ DIRECTLY OR INDIRECTLY IF
590  * THE CURRENT THREAD HAS BEEN DESCHEDULED!
591  */
592 void
593 lwkt_switch(void)
594 {
595     globaldata_t gd = mycpu;
596     thread_t td = gd->gd_curthread;
597     thread_t ntd;
598     int spinning = 0;
599 
600     KKASSERT(gd->gd_processing_ipiq == 0);
601     KKASSERT(td->td_flags & TDF_RUNNING);
602 
603     /*
604      * Switching from within a 'fast' (non thread switched) interrupt or IPI
605      * is illegal.  However, we may have to do it anyway if we hit a fatal
606      * kernel trap or we have paniced.
607      *
608      * If this case occurs save and restore the interrupt nesting level.
609      */
610     if (gd->gd_intr_nesting_level) {
611 	int savegdnest;
612 	int savegdtrap;
613 
614 	if (gd->gd_trap_nesting_level == 0 && panic_cpu_gd != mycpu) {
615 	    panic("lwkt_switch: Attempt to switch from a "
616 		  "a fast interrupt, ipi, or hard code section, "
617 		  "td %p\n",
618 		  td);
619 	} else {
620 	    savegdnest = gd->gd_intr_nesting_level;
621 	    savegdtrap = gd->gd_trap_nesting_level;
622 	    gd->gd_intr_nesting_level = 0;
623 	    gd->gd_trap_nesting_level = 0;
624 	    if ((td->td_flags & TDF_PANICWARN) == 0) {
625 		td->td_flags |= TDF_PANICWARN;
626 		kprintf("Warning: thread switch from interrupt, IPI, "
627 			"or hard code section.\n"
628 			"thread %p (%s)\n", td, td->td_comm);
629 		print_backtrace(-1);
630 	    }
631 	    lwkt_switch();
632 	    gd->gd_intr_nesting_level = savegdnest;
633 	    gd->gd_trap_nesting_level = savegdtrap;
634 	    return;
635 	}
636     }
637 
638     /*
639      * Release our current user process designation if we are blocking
640      * or if a user reschedule was requested.
641      *
642      * NOTE: This function is NOT called if we are switching into or
643      *	     returning from a preemption.
644      *
645      * NOTE: Releasing our current user process designation may cause
646      *	     it to be assigned to another thread, which in turn will
647      *	     cause us to block in the usched acquire code when we attempt
648      *	     to return to userland.
649      *
650      * NOTE: On SMP systems this can be very nasty when heavy token
651      *	     contention is present so we want to be careful not to
652      *	     release the designation gratuitously.
653      */
654     if (td->td_release &&
655 	(user_resched_wanted() || (td->td_flags & TDF_RUNQ) == 0)) {
656 	    td->td_release(td);
657     }
658 
659     /*
660      * Release all tokens
661      */
662     crit_enter_gd(gd);
663     if (TD_TOKS_HELD(td))
664 	    lwkt_relalltokens(td);
665 
666     /*
667      * We had better not be holding any spin locks, but don't get into an
668      * endless panic loop.
669      */
670     KASSERT(gd->gd_spinlocks_wr == 0 || panicstr != NULL,
671 	    ("lwkt_switch: still holding %d exclusive spinlocks!",
672 	     gd->gd_spinlocks_wr));
673 
674 
675 #ifdef SMP
676 #ifdef	INVARIANTS
677     if (td->td_cscount) {
678 	kprintf("Diagnostic: attempt to switch while mastering cpusync: %p\n",
679 		td);
680 	if (panic_on_cscount)
681 	    panic("switching while mastering cpusync");
682     }
683 #endif
684 #endif
685 
686     /*
687      * If we had preempted another thread on this cpu, resume the preempted
688      * thread.  This occurs transparently, whether the preempted thread
689      * was scheduled or not (it may have been preempted after descheduling
690      * itself).
691      *
692      * We have to setup the MP lock for the original thread after backing
693      * out the adjustment that was made to curthread when the original
694      * was preempted.
695      */
696     if ((ntd = td->td_preempted) != NULL) {
697 	KKASSERT(ntd->td_flags & TDF_PREEMPT_LOCK);
698 	ntd->td_flags |= TDF_PREEMPT_DONE;
699 
700 	/*
701 	 * The interrupt may have woken a thread up, we need to properly
702 	 * set the reschedule flag if the originally interrupted thread is
703 	 * at a lower priority.
704 	 *
705 	 * The interrupt may not have descheduled.
706 	 */
707 	if (TAILQ_FIRST(&gd->gd_tdrunq) != ntd)
708 	    need_lwkt_resched();
709 	goto havethread_preempted;
710     }
711 
712     /*
713      * If we cannot obtain ownership of the tokens we cannot immediately
714      * schedule the target thread.
715      *
716      * Reminder: Again, we cannot afford to run any IPIs in this path if
717      * the current thread has been descheduled.
718      */
719     for (;;) {
720 	clear_lwkt_resched();
721 
722 	/*
723 	 * Hotpath - pull the head of the run queue and attempt to schedule
724 	 * it.
725 	 */
726 	for (;;) {
727 	    ntd = TAILQ_FIRST(&gd->gd_tdrunq);
728 
729 	    if (ntd == NULL) {
730 		/*
731 		 * Runq is empty, switch to idle to allow it to halt.
732 		 */
733 		ntd = &gd->gd_idlethread;
734 #ifdef SMP
735 		if (gd->gd_trap_nesting_level == 0 && panicstr == NULL)
736 		    ASSERT_NO_TOKENS_HELD(ntd);
737 #endif
738 		cpu_time.cp_msg[0] = 0;
739 		cpu_time.cp_stallpc = 0;
740 		goto haveidle;
741 	    }
742 	    break;
743 	}
744 
745 	/*
746 	 * Hotpath - schedule ntd.
747 	 *
748 	 * NOTE: For UP there is no mplock and lwkt_getalltokens()
749 	 *	     always succeeds.
750 	 */
751 	if (TD_TOKS_NOT_HELD(ntd) ||
752 	    lwkt_getalltokens(ntd, (spinning >= lwkt_spin_loops)))
753 	{
754 	    goto havethread;
755 	}
756 
757 	/*
758 	 * Coldpath (SMP only since tokens always succeed on UP)
759 	 *
760 	 * We had some contention on the thread we wanted to schedule.
761 	 * What we do now is try to find a thread that we can schedule
762 	 * in its stead.
763 	 *
764 	 * The coldpath scan does NOT rearrange threads in the run list.
765 	 * The lwkt_schedulerclock() will assert need_lwkt_resched() on
766 	 * the next tick whenever the current head is not the current thread.
767 	 */
768 #ifdef	INVARIANTS
769 	++token_contention_count[ntd->td_pri];
770 	++ntd->td_contended;
771 #endif
772 
773 	if (fairq_bypass > 0)
774 		goto skip;
775 
776 	while ((ntd = TAILQ_NEXT(ntd, td_threadq)) != NULL) {
777 		/*
778 		 * Never schedule threads returning to userland or the
779 		 * user thread scheduler helper thread when higher priority
780 		 * threads are present.
781 		 */
782 		if (ntd->td_pri < TDPRI_KERN_LPSCHED) {
783 			ntd = NULL;
784 			break;
785 		}
786 
787 		/*
788 		 * Try this one.
789 		 */
790 		if (TD_TOKS_NOT_HELD(ntd) ||
791 		    lwkt_getalltokens(ntd, (spinning >= lwkt_spin_loops))) {
792 			goto havethread;
793 		}
794 #ifdef	INVARIANTS
795 		++token_contention_count[ntd->td_pri];
796 		++ntd->td_contended;
797 #endif
798 	}
799 
800 skip:
801 	/*
802 	 * We exhausted the run list, meaning that all runnable threads
803 	 * are contested.
804 	 */
805 	cpu_pause();
806 	ntd = &gd->gd_idlethread;
807 #ifdef SMP
808 	if (gd->gd_trap_nesting_level == 0 && panicstr == NULL)
809 	    ASSERT_NO_TOKENS_HELD(ntd);
810 	/* contention case, do not clear contention mask */
811 #endif
812 
813 	/*
814 	 * We are going to have to retry but if the current thread is not
815 	 * on the runq we instead switch through the idle thread to get away
816 	 * from the current thread.  We have to flag for lwkt reschedule
817 	 * to prevent the idle thread from halting.
818 	 *
819 	 * NOTE: A non-zero spinning is passed to lwkt_getalltokens() to
820 	 *	 instruct it to deal with the potential for deadlocks by
821 	 *	 ordering the tokens by address.
822 	 */
823 	if ((td->td_flags & TDF_RUNQ) == 0) {
824 	    need_lwkt_resched();	/* prevent hlt */
825 	    goto haveidle;
826 	}
827 #if defined(INVARIANTS) && defined(__amd64__)
828 	if ((read_rflags() & PSL_I) == 0) {
829 		cpu_enable_intr();
830 		panic("lwkt_switch() called with interrupts disabled");
831 	}
832 #endif
833 
834 	/*
835 	 * Number iterations so far.  After a certain point we switch to
836 	 * a sorted-address/monitor/mwait version of lwkt_getalltokens()
837 	 */
838 	if (spinning < 0x7FFFFFFF)
839 	    ++spinning;
840 
841 #ifdef SMP
842 	/*
843 	 * lwkt_getalltokens() failed in sorted token mode, we can use
844 	 * monitor/mwait in this case.
845 	 */
846 	if (spinning >= lwkt_spin_loops &&
847 	    (cpu_mi_feature & CPU_MI_MONITOR) &&
848 	    lwkt_spin_monitor)
849 	{
850 	    cpu_mmw_pause_int(&gd->gd_reqflags,
851 			      (gd->gd_reqflags | RQF_SPINNING) &
852 			      ~RQF_IDLECHECK_WK_MASK);
853 	}
854 #endif
855 
856 	/*
857 	 * We already checked that td is still scheduled so this should be
858 	 * safe.
859 	 */
860 	splz_check();
861 
862 	/*
863 	 * This experimental resequencer is used as a fall-back to reduce
864 	 * hw cache line contention by placing each core's scheduler into a
865 	 * time-domain-multplexed slot.
866 	 *
867 	 * The resequencer is disabled by default.  It's functionality has
868 	 * largely been superceeded by the token algorithm which limits races
869 	 * to a subset of cores.
870 	 *
871 	 * The resequencer algorithm tends to break down when more than
872 	 * 20 cores are contending.  What appears to happen is that new
873 	 * tokens can be obtained out of address-sorted order by new cores
874 	 * while existing cores languish in long delays between retries and
875 	 * wind up being starved-out of the token acquisition.
876 	 */
877 	if (lwkt_spin_reseq && spinning >= lwkt_spin_reseq) {
878 	    int cseq = atomic_fetchadd_int(&lwkt_cseq_windex, 1);
879 	    int oseq;
880 
881 	    while ((oseq = lwkt_cseq_rindex) != cseq) {
882 		cpu_ccfence();
883 #if 1
884 		if (cpu_mi_feature & CPU_MI_MONITOR) {
885 		    cpu_mmw_pause_int(&lwkt_cseq_rindex, oseq);
886 		} else {
887 #endif
888 		    cpu_pause();
889 		    cpu_lfence();
890 #if 1
891 		}
892 #endif
893 	    }
894 	    DELAY(1);
895 	    atomic_add_int(&lwkt_cseq_rindex, 1);
896 	}
897 	/* highest level for(;;) loop */
898     }
899 
900 havethread:
901     /*
902      * Clear gd_idle_repeat when doing a normal switch to a non-idle
903      * thread.
904      */
905     ntd->td_wmesg = NULL;
906     ++gd->gd_cnt.v_swtch;
907     gd->gd_idle_repeat = 0;
908 
909 havethread_preempted:
910     /*
911      * If the new target does not need the MP lock and we are holding it,
912      * release the MP lock.  If the new target requires the MP lock we have
913      * already acquired it for the target.
914      */
915     ;
916 haveidle:
917     KASSERT(ntd->td_critcount,
918 	    ("priority problem in lwkt_switch %d %d",
919 	    td->td_critcount, ntd->td_critcount));
920 
921     if (td != ntd) {
922 	/*
923 	 * Execute the actual thread switch operation.  This function
924 	 * returns to the current thread and returns the previous thread
925 	 * (which may be different from the thread we switched to).
926 	 *
927 	 * We are responsible for marking ntd as TDF_RUNNING.
928 	 */
929 	KKASSERT((ntd->td_flags & TDF_RUNNING) == 0);
930 	++switch_count;
931 	KTR_LOG(ctxsw_sw, gd->gd_cpuid, ntd);
932 	ntd->td_flags |= TDF_RUNNING;
933 	lwkt_switch_return(td->td_switch(ntd));
934 	/* ntd invalid, td_switch() can return a different thread_t */
935     }
936 
937     /*
938      * catch-all.  XXX is this strictly needed?
939      */
940     splz_check();
941 
942     /* NOTE: current cpu may have changed after switch */
943     crit_exit_quick(td);
944 }
945 
946 /*
947  * Called by assembly in the td_switch (thread restore path) for thread
948  * bootstrap cases which do not 'return' to lwkt_switch().
949  */
950 void
951 lwkt_switch_return(thread_t otd)
952 {
953 #ifdef SMP
954 	globaldata_t rgd;
955 
956 	/*
957 	 * Check if otd was migrating.  Now that we are on ntd we can finish
958 	 * up the migration.  This is a bit messy but it is the only place
959 	 * where td is known to be fully descheduled.
960 	 *
961 	 * We can only activate the migration if otd was migrating but not
962 	 * held on the cpu due to a preemption chain.  We still have to
963 	 * clear TDF_RUNNING on the old thread either way.
964 	 *
965 	 * We are responsible for clearing the previously running thread's
966 	 * TDF_RUNNING.
967 	 */
968 	if ((rgd = otd->td_migrate_gd) != NULL &&
969 	    (otd->td_flags & TDF_PREEMPT_LOCK) == 0) {
970 		KKASSERT((otd->td_flags & (TDF_MIGRATING | TDF_RUNNING)) ==
971 			 (TDF_MIGRATING | TDF_RUNNING));
972 		otd->td_migrate_gd = NULL;
973 		otd->td_flags &= ~TDF_RUNNING;
974 		lwkt_send_ipiq(rgd, lwkt_setcpu_remote, otd);
975 	} else {
976 		otd->td_flags &= ~TDF_RUNNING;
977 	}
978 #else
979 	otd->td_flags &= ~TDF_RUNNING;
980 #endif
981 }
982 
983 /*
984  * Request that the target thread preempt the current thread.  Preemption
985  * can only occur if our only critical section is the one that we were called
986  * with, the relative priority of the target thread is higher, and the target
987  * thread holds no tokens.  This also only works if we are not holding any
988  * spinlocks (obviously).
989  *
990  * THE CALLER OF LWKT_PREEMPT() MUST BE IN A CRITICAL SECTION.  Typically
991  * this is called via lwkt_schedule() through the td_preemptable callback.
992  * critcount is the managed critical priority that we should ignore in order
993  * to determine whether preemption is possible (aka usually just the crit
994  * priority of lwkt_schedule() itself).
995  *
996  * Preemption is typically limited to interrupt threads.
997  *
998  * Operation works in a fairly straight-forward manner.  The normal
999  * scheduling code is bypassed and we switch directly to the target
1000  * thread.  When the target thread attempts to block or switch away
1001  * code at the base of lwkt_switch() will switch directly back to our
1002  * thread.  Our thread is able to retain whatever tokens it holds and
1003  * if the target needs one of them the target will switch back to us
1004  * and reschedule itself normally.
1005  */
1006 void
1007 lwkt_preempt(thread_t ntd, int critcount)
1008 {
1009     struct globaldata *gd = mycpu;
1010     thread_t xtd;
1011     thread_t td;
1012     int save_gd_intr_nesting_level;
1013 
1014     /*
1015      * The caller has put us in a critical section.  We can only preempt
1016      * if the caller of the caller was not in a critical section (basically
1017      * a local interrupt), as determined by the 'critcount' parameter.  We
1018      * also can't preempt if the caller is holding any spinlocks (even if
1019      * he isn't in a critical section).  This also handles the tokens test.
1020      *
1021      * YYY The target thread must be in a critical section (else it must
1022      * inherit our critical section?  I dunno yet).
1023      */
1024     KASSERT(ntd->td_critcount, ("BADCRIT0 %d", ntd->td_pri));
1025 
1026     td = gd->gd_curthread;
1027     if (preempt_enable == 0) {
1028 	++preempt_miss;
1029 	return;
1030     }
1031     if (ntd->td_pri <= td->td_pri) {
1032 	++preempt_miss;
1033 	return;
1034     }
1035     if (td->td_critcount > critcount) {
1036 	++preempt_miss;
1037 	return;
1038     }
1039 #ifdef SMP
1040     if (td->td_cscount) {
1041 	++preempt_miss;
1042 	return;
1043     }
1044     if (ntd->td_gd != gd) {
1045 	++preempt_miss;
1046 	return;
1047     }
1048 #endif
1049     /*
1050      * We don't have to check spinlocks here as they will also bump
1051      * td_critcount.
1052      *
1053      * Do not try to preempt if the target thread is holding any tokens.
1054      * We could try to acquire the tokens but this case is so rare there
1055      * is no need to support it.
1056      */
1057     KKASSERT(gd->gd_spinlocks_wr == 0);
1058 
1059     if (TD_TOKS_HELD(ntd)) {
1060 	++preempt_miss;
1061 	return;
1062     }
1063     if (td == ntd || ((td->td_flags | ntd->td_flags) & TDF_PREEMPT_LOCK)) {
1064 	++preempt_weird;
1065 	return;
1066     }
1067     if (ntd->td_preempted) {
1068 	++preempt_hit;
1069 	return;
1070     }
1071     KKASSERT(gd->gd_processing_ipiq == 0);
1072 
1073     /*
1074      * Since we are able to preempt the current thread, there is no need to
1075      * call need_lwkt_resched().
1076      *
1077      * We must temporarily clear gd_intr_nesting_level around the switch
1078      * since switchouts from the target thread are allowed (they will just
1079      * return to our thread), and since the target thread has its own stack.
1080      *
1081      * A preemption must switch back to the original thread, assert the
1082      * case.
1083      */
1084     ++preempt_hit;
1085     ntd->td_preempted = td;
1086     td->td_flags |= TDF_PREEMPT_LOCK;
1087     KTR_LOG(ctxsw_pre, gd->gd_cpuid, ntd);
1088     save_gd_intr_nesting_level = gd->gd_intr_nesting_level;
1089     gd->gd_intr_nesting_level = 0;
1090 
1091     KKASSERT((ntd->td_flags & TDF_RUNNING) == 0);
1092     ntd->td_flags |= TDF_RUNNING;
1093     xtd = td->td_switch(ntd);
1094     KKASSERT(xtd == ntd);
1095     lwkt_switch_return(xtd);
1096     gd->gd_intr_nesting_level = save_gd_intr_nesting_level;
1097 
1098     KKASSERT(ntd->td_preempted && (td->td_flags & TDF_PREEMPT_DONE));
1099     ntd->td_preempted = NULL;
1100     td->td_flags &= ~(TDF_PREEMPT_LOCK|TDF_PREEMPT_DONE);
1101 }
1102 
1103 /*
1104  * Conditionally call splz() if gd_reqflags indicates work is pending.
1105  * This will work inside a critical section but not inside a hard code
1106  * section.
1107  *
1108  * (self contained on a per cpu basis)
1109  */
1110 void
1111 splz_check(void)
1112 {
1113     globaldata_t gd = mycpu;
1114     thread_t td = gd->gd_curthread;
1115 
1116     if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) &&
1117 	gd->gd_intr_nesting_level == 0 &&
1118 	td->td_nest_count < 2)
1119     {
1120 	splz();
1121     }
1122 }
1123 
1124 /*
1125  * This version is integrated into crit_exit, reqflags has already
1126  * been tested but td_critcount has not.
1127  *
1128  * We only want to execute the splz() on the 1->0 transition of
1129  * critcount and not in a hard code section or if too deeply nested.
1130  *
1131  * NOTE: gd->gd_spinlocks_wr is implied to be 0 when td_critcount is 0.
1132  */
1133 void
1134 lwkt_maybe_splz(thread_t td)
1135 {
1136     globaldata_t gd = td->td_gd;
1137 
1138     if (td->td_critcount == 0 &&
1139 	gd->gd_intr_nesting_level == 0 &&
1140 	td->td_nest_count < 2)
1141     {
1142 	splz();
1143     }
1144 }
1145 
1146 /*
1147  * Drivers which set up processing co-threads can call this function to
1148  * run the co-thread at a higher priority and to allow it to preempt
1149  * normal threads.
1150  */
1151 void
1152 lwkt_set_interrupt_support_thread(void)
1153 {
1154 	thread_t td = curthread;
1155 
1156         lwkt_setpri_self(TDPRI_INT_SUPPORT);
1157 	td->td_flags |= TDF_INTTHREAD;
1158 	td->td_preemptable = lwkt_preempt;
1159 }
1160 
1161 
1162 /*
1163  * This function is used to negotiate a passive release of the current
1164  * process/lwp designation with the user scheduler, allowing the user
1165  * scheduler to schedule another user thread.  The related kernel thread
1166  * (curthread) continues running in the released state.
1167  */
1168 void
1169 lwkt_passive_release(struct thread *td)
1170 {
1171     struct lwp *lp = td->td_lwp;
1172 
1173     td->td_release = NULL;
1174     lwkt_setpri_self(TDPRI_KERN_USER);
1175     lp->lwp_proc->p_usched->release_curproc(lp);
1176 }
1177 
1178 
1179 /*
1180  * This implements a LWKT yield, allowing a kernel thread to yield to other
1181  * kernel threads at the same or higher priority.  This function can be
1182  * called in a tight loop and will typically only yield once per tick.
1183  *
1184  * Most kernel threads run at the same priority in order to allow equal
1185  * sharing.
1186  *
1187  * (self contained on a per cpu basis)
1188  */
1189 void
1190 lwkt_yield(void)
1191 {
1192     globaldata_t gd = mycpu;
1193     thread_t td = gd->gd_curthread;
1194 
1195     if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) && td->td_nest_count < 2)
1196 	splz();
1197     if (lwkt_resched_wanted()) {
1198 	lwkt_schedule_self(curthread);
1199 	lwkt_switch();
1200     }
1201 }
1202 
1203 /*
1204  * This yield is designed for kernel threads with a user context.
1205  *
1206  * The kernel acting on behalf of the user is potentially cpu-bound,
1207  * this function will efficiently allow other threads to run and also
1208  * switch to other processes by releasing.
1209  *
1210  * The lwkt_user_yield() function is designed to have very low overhead
1211  * if no yield is determined to be needed.
1212  */
1213 void
1214 lwkt_user_yield(void)
1215 {
1216     globaldata_t gd = mycpu;
1217     thread_t td = gd->gd_curthread;
1218 
1219     /*
1220      * Always run any pending interrupts in case we are in a critical
1221      * section.
1222      */
1223     if ((gd->gd_reqflags & RQF_IDLECHECK_MASK) && td->td_nest_count < 2)
1224 	splz();
1225 
1226     /*
1227      * Switch (which forces a release) if another kernel thread needs
1228      * the cpu, if userland wants us to resched, or if our kernel
1229      * quantum has run out.
1230      */
1231     if (lwkt_resched_wanted() ||
1232 	user_resched_wanted())
1233     {
1234 	lwkt_switch();
1235     }
1236 
1237 #if 0
1238     /*
1239      * Reacquire the current process if we are released.
1240      *
1241      * XXX not implemented atm.  The kernel may be holding locks and such,
1242      *     so we want the thread to continue to receive cpu.
1243      */
1244     if (td->td_release == NULL && lp) {
1245 	lp->lwp_proc->p_usched->acquire_curproc(lp);
1246 	td->td_release = lwkt_passive_release;
1247 	lwkt_setpri_self(TDPRI_USER_NORM);
1248     }
1249 #endif
1250 }
1251 
1252 /*
1253  * Generic schedule.  Possibly schedule threads belonging to other cpus and
1254  * deal with threads that might be blocked on a wait queue.
1255  *
1256  * We have a little helper inline function which does additional work after
1257  * the thread has been enqueued, including dealing with preemption and
1258  * setting need_lwkt_resched() (which prevents the kernel from returning
1259  * to userland until it has processed higher priority threads).
1260  *
1261  * It is possible for this routine to be called after a failed _enqueue
1262  * (due to the target thread migrating, sleeping, or otherwise blocked).
1263  * We have to check that the thread is actually on the run queue!
1264  */
1265 static __inline
1266 void
1267 _lwkt_schedule_post(globaldata_t gd, thread_t ntd, int ccount)
1268 {
1269     if (ntd->td_flags & TDF_RUNQ) {
1270 	if (ntd->td_preemptable) {
1271 	    ntd->td_preemptable(ntd, ccount);	/* YYY +token */
1272 	}
1273     }
1274 }
1275 
1276 static __inline
1277 void
1278 _lwkt_schedule(thread_t td)
1279 {
1280     globaldata_t mygd = mycpu;
1281 
1282     KASSERT(td != &td->td_gd->gd_idlethread,
1283 	    ("lwkt_schedule(): scheduling gd_idlethread is illegal!"));
1284     KKASSERT((td->td_flags & TDF_MIGRATING) == 0);
1285     crit_enter_gd(mygd);
1286     KKASSERT(td->td_lwp == NULL ||
1287 	     (td->td_lwp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
1288 
1289     if (td == mygd->gd_curthread) {
1290 	_lwkt_enqueue(td);
1291     } else {
1292 	/*
1293 	 * If we own the thread, there is no race (since we are in a
1294 	 * critical section).  If we do not own the thread there might
1295 	 * be a race but the target cpu will deal with it.
1296 	 */
1297 #ifdef SMP
1298 	if (td->td_gd == mygd) {
1299 	    _lwkt_enqueue(td);
1300 	    _lwkt_schedule_post(mygd, td, 1);
1301 	} else {
1302 	    lwkt_send_ipiq3(td->td_gd, lwkt_schedule_remote, td, 0);
1303 	}
1304 #else
1305 	_lwkt_enqueue(td);
1306 	_lwkt_schedule_post(mygd, td, 1);
1307 #endif
1308     }
1309     crit_exit_gd(mygd);
1310 }
1311 
1312 void
1313 lwkt_schedule(thread_t td)
1314 {
1315     _lwkt_schedule(td);
1316 }
1317 
1318 void
1319 lwkt_schedule_noresched(thread_t td)	/* XXX not impl */
1320 {
1321     _lwkt_schedule(td);
1322 }
1323 
1324 #ifdef SMP
1325 
1326 /*
1327  * When scheduled remotely if frame != NULL the IPIQ is being
1328  * run via doreti or an interrupt then preemption can be allowed.
1329  *
1330  * To allow preemption we have to drop the critical section so only
1331  * one is present in _lwkt_schedule_post.
1332  */
1333 static void
1334 lwkt_schedule_remote(void *arg, int arg2, struct intrframe *frame)
1335 {
1336     thread_t td = curthread;
1337     thread_t ntd = arg;
1338 
1339     if (frame && ntd->td_preemptable) {
1340 	crit_exit_noyield(td);
1341 	_lwkt_schedule(ntd);
1342 	crit_enter_quick(td);
1343     } else {
1344 	_lwkt_schedule(ntd);
1345     }
1346 }
1347 
1348 /*
1349  * Thread migration using a 'Pull' method.  The thread may or may not be
1350  * the current thread.  It MUST be descheduled and in a stable state.
1351  * lwkt_giveaway() must be called on the cpu owning the thread.
1352  *
1353  * At any point after lwkt_giveaway() is called, the target cpu may
1354  * 'pull' the thread by calling lwkt_acquire().
1355  *
1356  * We have to make sure the thread is not sitting on a per-cpu tsleep
1357  * queue or it will blow up when it moves to another cpu.
1358  *
1359  * MPSAFE - must be called under very specific conditions.
1360  */
1361 void
1362 lwkt_giveaway(thread_t td)
1363 {
1364     globaldata_t gd = mycpu;
1365 
1366     crit_enter_gd(gd);
1367     if (td->td_flags & TDF_TSLEEPQ)
1368 	tsleep_remove(td);
1369     KKASSERT(td->td_gd == gd);
1370     TAILQ_REMOVE(&gd->gd_tdallq, td, td_allq);
1371     td->td_flags |= TDF_MIGRATING;
1372     crit_exit_gd(gd);
1373 }
1374 
1375 void
1376 lwkt_acquire(thread_t td)
1377 {
1378     globaldata_t gd;
1379     globaldata_t mygd;
1380     int retry = 10000000;
1381 
1382     KKASSERT(td->td_flags & TDF_MIGRATING);
1383     gd = td->td_gd;
1384     mygd = mycpu;
1385     if (gd != mycpu) {
1386 	cpu_lfence();
1387 	KKASSERT((td->td_flags & TDF_RUNQ) == 0);
1388 	crit_enter_gd(mygd);
1389 	DEBUG_PUSH_INFO("lwkt_acquire");
1390 	while (td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) {
1391 #ifdef SMP
1392 	    lwkt_process_ipiq();
1393 #endif
1394 	    cpu_lfence();
1395 	    if (--retry == 0) {
1396 		kprintf("lwkt_acquire: stuck: td %p td->td_flags %08x\n",
1397 			td, td->td_flags);
1398 		retry = 10000000;
1399 	    }
1400 	}
1401 	DEBUG_POP_INFO();
1402 	cpu_mfence();
1403 	td->td_gd = mygd;
1404 	TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq);
1405 	td->td_flags &= ~TDF_MIGRATING;
1406 	crit_exit_gd(mygd);
1407     } else {
1408 	crit_enter_gd(mygd);
1409 	TAILQ_INSERT_TAIL(&mygd->gd_tdallq, td, td_allq);
1410 	td->td_flags &= ~TDF_MIGRATING;
1411 	crit_exit_gd(mygd);
1412     }
1413 }
1414 
1415 #endif
1416 
1417 /*
1418  * Generic deschedule.  Descheduling threads other then your own should be
1419  * done only in carefully controlled circumstances.  Descheduling is
1420  * asynchronous.
1421  *
1422  * This function may block if the cpu has run out of messages.
1423  */
1424 void
1425 lwkt_deschedule(thread_t td)
1426 {
1427     crit_enter();
1428 #ifdef SMP
1429     if (td == curthread) {
1430 	_lwkt_dequeue(td);
1431     } else {
1432 	if (td->td_gd == mycpu) {
1433 	    _lwkt_dequeue(td);
1434 	} else {
1435 	    lwkt_send_ipiq(td->td_gd, (ipifunc1_t)lwkt_deschedule, td);
1436 	}
1437     }
1438 #else
1439     _lwkt_dequeue(td);
1440 #endif
1441     crit_exit();
1442 }
1443 
1444 /*
1445  * Set the target thread's priority.  This routine does not automatically
1446  * switch to a higher priority thread, LWKT threads are not designed for
1447  * continuous priority changes.  Yield if you want to switch.
1448  */
1449 void
1450 lwkt_setpri(thread_t td, int pri)
1451 {
1452     if (td->td_pri != pri) {
1453 	KKASSERT(pri >= 0);
1454 	crit_enter();
1455 	if (td->td_flags & TDF_RUNQ) {
1456 	    KKASSERT(td->td_gd == mycpu);
1457 	    _lwkt_dequeue(td);
1458 	    td->td_pri = pri;
1459 	    _lwkt_enqueue(td);
1460 	} else {
1461 	    td->td_pri = pri;
1462 	}
1463 	crit_exit();
1464     }
1465 }
1466 
1467 /*
1468  * Set the initial priority for a thread prior to it being scheduled for
1469  * the first time.  The thread MUST NOT be scheduled before or during
1470  * this call.  The thread may be assigned to a cpu other then the current
1471  * cpu.
1472  *
1473  * Typically used after a thread has been created with TDF_STOPPREQ,
1474  * and before the thread is initially scheduled.
1475  */
1476 void
1477 lwkt_setpri_initial(thread_t td, int pri)
1478 {
1479     KKASSERT(pri >= 0);
1480     KKASSERT((td->td_flags & TDF_RUNQ) == 0);
1481     td->td_pri = pri;
1482 }
1483 
1484 void
1485 lwkt_setpri_self(int pri)
1486 {
1487     thread_t td = curthread;
1488 
1489     KKASSERT(pri >= 0 && pri <= TDPRI_MAX);
1490     crit_enter();
1491     if (td->td_flags & TDF_RUNQ) {
1492 	_lwkt_dequeue(td);
1493 	td->td_pri = pri;
1494 	_lwkt_enqueue(td);
1495     } else {
1496 	td->td_pri = pri;
1497     }
1498     crit_exit();
1499 }
1500 
1501 /*
1502  * hz tick scheduler clock for LWKT threads
1503  */
1504 void
1505 lwkt_schedulerclock(thread_t td)
1506 {
1507     globaldata_t gd = td->td_gd;
1508     thread_t xtd;
1509 
1510     if (TAILQ_FIRST(&gd->gd_tdrunq) == td) {
1511 	/*
1512 	 * If the current thread is at the head of the runq shift it to the
1513 	 * end of any equal-priority threads and request a LWKT reschedule
1514 	 * if it moved.
1515 	 */
1516 	xtd = TAILQ_NEXT(td, td_threadq);
1517 	if (xtd && xtd->td_pri == td->td_pri) {
1518 	    TAILQ_REMOVE(&gd->gd_tdrunq, td, td_threadq);
1519 	    while (xtd && xtd->td_pri == td->td_pri)
1520 		xtd = TAILQ_NEXT(xtd, td_threadq);
1521 	    if (xtd)
1522 		TAILQ_INSERT_BEFORE(xtd, td, td_threadq);
1523 	    else
1524 		TAILQ_INSERT_TAIL(&gd->gd_tdrunq, td, td_threadq);
1525 	    need_lwkt_resched();
1526 	}
1527     } else {
1528 	/*
1529 	 * If we scheduled a thread other than the one at the head of the
1530 	 * queue always request a reschedule every tick.
1531 	 */
1532 	need_lwkt_resched();
1533     }
1534 }
1535 
1536 /*
1537  * Migrate the current thread to the specified cpu.
1538  *
1539  * This is accomplished by descheduling ourselves from the current cpu
1540  * and setting td_migrate_gd.  The lwkt_switch() code will detect that the
1541  * 'old' thread wants to migrate after it has been completely switched out
1542  * and will complete the migration.
1543  *
1544  * TDF_MIGRATING prevents scheduling races while the thread is being migrated.
1545  *
1546  * We must be sure to release our current process designation (if a user
1547  * process) before clearing out any tsleepq we are on because the release
1548  * code may re-add us.
1549  *
1550  * We must be sure to remove ourselves from the current cpu's tsleepq
1551  * before potentially moving to another queue.  The thread can be on
1552  * a tsleepq due to a left-over tsleep_interlock().
1553  */
1554 
1555 void
1556 lwkt_setcpu_self(globaldata_t rgd)
1557 {
1558 #ifdef SMP
1559     thread_t td = curthread;
1560 
1561     if (td->td_gd != rgd) {
1562 	crit_enter_quick(td);
1563 
1564 	if (td->td_release)
1565 	    td->td_release(td);
1566 	if (td->td_flags & TDF_TSLEEPQ)
1567 	    tsleep_remove(td);
1568 
1569 	/*
1570 	 * Set TDF_MIGRATING to prevent a spurious reschedule while we are
1571 	 * trying to deschedule ourselves and switch away, then deschedule
1572 	 * ourself, remove us from tdallq, and set td_migrate_gd.  Finally,
1573 	 * call lwkt_switch() to complete the operation.
1574 	 */
1575 	td->td_flags |= TDF_MIGRATING;
1576 	lwkt_deschedule_self(td);
1577 	TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq);
1578 	td->td_migrate_gd = rgd;
1579 	lwkt_switch();
1580 
1581 	/*
1582 	 * We are now on the target cpu
1583 	 */
1584 	KKASSERT(rgd == mycpu);
1585 	TAILQ_INSERT_TAIL(&rgd->gd_tdallq, td, td_allq);
1586 	crit_exit_quick(td);
1587     }
1588 #endif
1589 }
1590 
1591 void
1592 lwkt_migratecpu(int cpuid)
1593 {
1594 #ifdef SMP
1595 	globaldata_t rgd;
1596 
1597 	rgd = globaldata_find(cpuid);
1598 	lwkt_setcpu_self(rgd);
1599 #endif
1600 }
1601 
1602 #ifdef SMP
1603 /*
1604  * Remote IPI for cpu migration (called while in a critical section so we
1605  * do not have to enter another one).
1606  *
1607  * The thread (td) has already been completely descheduled from the
1608  * originating cpu and we can simply assert the case.  The thread is
1609  * assigned to the new cpu and enqueued.
1610  *
1611  * The thread will re-add itself to tdallq when it resumes execution.
1612  */
1613 static void
1614 lwkt_setcpu_remote(void *arg)
1615 {
1616     thread_t td = arg;
1617     globaldata_t gd = mycpu;
1618 
1619     KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) == 0);
1620     td->td_gd = gd;
1621     cpu_mfence();
1622     td->td_flags &= ~TDF_MIGRATING;
1623     KKASSERT(td->td_migrate_gd == NULL);
1624     KKASSERT(td->td_lwp == NULL ||
1625 	    (td->td_lwp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
1626     _lwkt_enqueue(td);
1627 }
1628 #endif
1629 
1630 struct lwp *
1631 lwkt_preempted_proc(void)
1632 {
1633     thread_t td = curthread;
1634     while (td->td_preempted)
1635 	td = td->td_preempted;
1636     return(td->td_lwp);
1637 }
1638 
1639 /*
1640  * Create a kernel process/thread/whatever.  It shares it's address space
1641  * with proc0 - ie: kernel only.
1642  *
1643  * If the cpu is not specified one will be selected.  In the future
1644  * specifying a cpu of -1 will enable kernel thread migration between
1645  * cpus.
1646  */
1647 int
1648 lwkt_create(void (*func)(void *), void *arg, struct thread **tdp,
1649 	    thread_t template, int tdflags, int cpu, const char *fmt, ...)
1650 {
1651     thread_t td;
1652     __va_list ap;
1653 
1654     td = lwkt_alloc_thread(template, LWKT_THREAD_STACK, cpu,
1655 			   tdflags);
1656     if (tdp)
1657 	*tdp = td;
1658     cpu_set_thread_handler(td, lwkt_exit, func, arg);
1659 
1660     /*
1661      * Set up arg0 for 'ps' etc
1662      */
1663     __va_start(ap, fmt);
1664     kvsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap);
1665     __va_end(ap);
1666 
1667     /*
1668      * Schedule the thread to run
1669      */
1670     if (td->td_flags & TDF_NOSTART)
1671 	td->td_flags &= ~TDF_NOSTART;
1672     else
1673 	lwkt_schedule(td);
1674     return 0;
1675 }
1676 
1677 /*
1678  * Destroy an LWKT thread.   Warning!  This function is not called when
1679  * a process exits, cpu_proc_exit() directly calls cpu_thread_exit() and
1680  * uses a different reaping mechanism.
1681  */
1682 void
1683 lwkt_exit(void)
1684 {
1685     thread_t td = curthread;
1686     thread_t std;
1687     globaldata_t gd;
1688 
1689     /*
1690      * Do any cleanup that might block here
1691      */
1692     if (td->td_flags & TDF_VERBOSE)
1693 	kprintf("kthread %p %s has exited\n", td, td->td_comm);
1694     caps_exit(td);
1695     biosched_done(td);
1696     dsched_exit_thread(td);
1697 
1698     /*
1699      * Get us into a critical section to interlock gd_freetd and loop
1700      * until we can get it freed.
1701      *
1702      * We have to cache the current td in gd_freetd because objcache_put()ing
1703      * it would rip it out from under us while our thread is still active.
1704      *
1705      * We are the current thread so of course our own TDF_RUNNING bit will
1706      * be set, so unlike the lwp reap code we don't wait for it to clear.
1707      */
1708     gd = mycpu;
1709     crit_enter_quick(td);
1710     for (;;) {
1711 	if (td->td_refs) {
1712 	    tsleep(td, 0, "tdreap", 1);
1713 	    continue;
1714 	}
1715 	if ((std = gd->gd_freetd) != NULL) {
1716 	    KKASSERT((std->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK)) == 0);
1717 	    gd->gd_freetd = NULL;
1718 	    objcache_put(thread_cache, std);
1719 	    continue;
1720 	}
1721 	break;
1722     }
1723 
1724     /*
1725      * Remove thread resources from kernel lists and deschedule us for
1726      * the last time.  We cannot block after this point or we may end
1727      * up with a stale td on the tsleepq.
1728      *
1729      * None of this may block, the critical section is the only thing
1730      * protecting tdallq and the only thing preventing new lwkt_hold()
1731      * thread refs now.
1732      */
1733     if (td->td_flags & TDF_TSLEEPQ)
1734 	tsleep_remove(td);
1735     lwkt_deschedule_self(td);
1736     lwkt_remove_tdallq(td);
1737     KKASSERT(td->td_refs == 0);
1738 
1739     /*
1740      * Final cleanup
1741      */
1742     KKASSERT(gd->gd_freetd == NULL);
1743     if (td->td_flags & TDF_ALLOCATED_THREAD)
1744 	gd->gd_freetd = td;
1745     cpu_thread_exit();
1746 }
1747 
1748 void
1749 lwkt_remove_tdallq(thread_t td)
1750 {
1751     KKASSERT(td->td_gd == mycpu);
1752     TAILQ_REMOVE(&td->td_gd->gd_tdallq, td, td_allq);
1753 }
1754 
1755 /*
1756  * Code reduction and branch prediction improvements.  Call/return
1757  * overhead on modern cpus often degenerates into 0 cycles due to
1758  * the cpu's branch prediction hardware and return pc cache.  We
1759  * can take advantage of this by not inlining medium-complexity
1760  * functions and we can also reduce the branch prediction impact
1761  * by collapsing perfectly predictable branches into a single
1762  * procedure instead of duplicating it.
1763  *
1764  * Is any of this noticeable?  Probably not, so I'll take the
1765  * smaller code size.
1766  */
1767 void
1768 crit_exit_wrapper(__DEBUG_CRIT_ARG__)
1769 {
1770     _crit_exit(mycpu __DEBUG_CRIT_PASS_ARG__);
1771 }
1772 
1773 void
1774 crit_panic(void)
1775 {
1776     thread_t td = curthread;
1777     int lcrit = td->td_critcount;
1778 
1779     td->td_critcount = 0;
1780     panic("td_critcount is/would-go negative! %p %d", td, lcrit);
1781     /* NOT REACHED */
1782 }
1783 
1784 #ifdef SMP
1785 
1786 /*
1787  * Called from debugger/panic on cpus which have been stopped.  We must still
1788  * process the IPIQ while stopped, even if we were stopped while in a critical
1789  * section (XXX).
1790  *
1791  * If we are dumping also try to process any pending interrupts.  This may
1792  * or may not work depending on the state of the cpu at the point it was
1793  * stopped.
1794  */
1795 void
1796 lwkt_smp_stopped(void)
1797 {
1798     globaldata_t gd = mycpu;
1799 
1800     crit_enter_gd(gd);
1801     if (dumping) {
1802 	lwkt_process_ipiq();
1803 	splz();
1804     } else {
1805 	lwkt_process_ipiq();
1806     }
1807     crit_exit_gd(gd);
1808 }
1809 
1810 #endif
1811