xref: /dragonfly/sys/sys/thread2.h (revision 65cc0652)
1 /*
2  * SYS/THREAD2.H
3  *
4  * Implements inline procedure support for the LWKT subsystem.
5  *
6  * Generally speaking these routines only operate on threads associated
7  * with the current cpu.  For example, a higher priority thread pending
8  * on a different cpu will not be immediately scheduled by a yield() on
9  * this cpu.
10  */
11 
12 #ifndef _SYS_THREAD2_H_
13 #define _SYS_THREAD2_H_
14 
15 #ifndef _KERNEL
16 
17 #error "This file should not be included by userland programs."
18 
19 #else
20 
21 /*
22  * Userland will have its own globaldata which it includes prior to this.
23  */
24 #ifndef _SYS_SYSTM_H_
25 #include <sys/systm.h>
26 #endif
27 #ifndef _SYS_GLOBALDATA_H_
28 #include <sys/globaldata.h>
29 #endif
30 #include <machine/cpufunc.h>
31 #include <machine/cpumask.h>
32 
33 /*
34  * Don't let GCC reorder critical section count adjustments, because it
35  * will BLOW US UP if it does.
36  */
37 static __inline void
38 crit_enter_raw(thread_t td)
39 {
40 	cpu_ccfence();
41 	++td->td_critcount;
42 	cpu_ccfence();
43 }
44 
45 static __inline void
46 crit_exit_raw(thread_t td)
47 {
48 	cpu_ccfence();
49 	--td->td_critcount;
50 	cpu_ccfence();
51 }
52 
53 /*
54  * Is a token held either by the specified thread or held shared?
55  *
56  * We can't inexpensively validate the thread for a shared token
57  * without iterating td->td_toks, so this isn't a perfect test.
58  */
59 static __inline int
60 _lwkt_token_held_any(lwkt_token_t tok, thread_t td)
61 {
62 	long count = tok->t_count;
63 
64 	cpu_ccfence();
65 	if (tok->t_ref >= &td->td_toks_base && tok->t_ref < td->td_toks_stop)
66 		return TRUE;
67 	if ((count & TOK_EXCLUSIVE) == 0 &&
68 	    (count & ~(TOK_EXCLUSIVE|TOK_EXCLREQ))) {
69 		return TRUE;
70 	}
71 	return FALSE;
72 }
73 
74 /*
75  * Is a token held by the specified thread?
76  */
77 static __inline int
78 _lwkt_token_held_excl(lwkt_token_t tok, thread_t td)
79 {
80 	return ((tok->t_ref >= &td->td_toks_base &&
81 		 tok->t_ref < td->td_toks_stop));
82 }
83 
84 /*
85  * Critical section debugging
86  */
87 #ifdef DEBUG_CRIT_SECTIONS
88 #define __DEBUG_CRIT_ARG__		const char *id
89 #define __DEBUG_CRIT_ADD_ARG__		, const char *id
90 #define __DEBUG_CRIT_PASS_ARG__		, id
91 #define __DEBUG_CRIT_ENTER(td)		_debug_crit_enter((td), id)
92 #define __DEBUG_CRIT_EXIT(td)		_debug_crit_exit((td), id)
93 #define crit_enter()			_crit_enter(mycpu, __func__)
94 #define crit_enter_id(id)		_crit_enter(mycpu, id)
95 #define crit_enter_gd(curgd)		_crit_enter((curgd), __func__)
96 #define crit_enter_quick(curtd)		_crit_enter_quick((curtd), __func__)
97 #define crit_enter_hard()		_crit_enter_hard(mycpu, __func__)
98 #define crit_enter_hard_gd(curgd)	_crit_enter_hard((curgd), __func__)
99 #define crit_exit()			_crit_exit(mycpu, __func__)
100 #define crit_exit_id(id)		_crit_exit(mycpu, id)
101 #define crit_exit_gd(curgd)		_crit_exit((curgd), __func__)
102 #define crit_exit_quick(curtd)		_crit_exit_quick((curtd), __func__)
103 #define crit_exit_hard()		_crit_exit_hard(mycpu, __func__)
104 #define crit_exit_hard_gd(curgd)	_crit_exit_hard((curgd), __func__)
105 #define crit_exit_noyield(curtd)	_crit_exit_noyield((curtd),__func__)
106 #else
107 #define __DEBUG_CRIT_ARG__		void
108 #define __DEBUG_CRIT_ADD_ARG__
109 #define __DEBUG_CRIT_PASS_ARG__
110 #define __DEBUG_CRIT_ENTER(td)
111 #define __DEBUG_CRIT_EXIT(td)
112 #define crit_enter()			_crit_enter(mycpu)
113 #define crit_enter_id(id)		_crit_enter(mycpu)
114 #define crit_enter_gd(curgd)		_crit_enter((curgd))
115 #define crit_enter_quick(curtd)		_crit_enter_quick((curtd))
116 #define crit_enter_hard()		_crit_enter_hard(mycpu)
117 #define crit_enter_hard_gd(curgd)	_crit_enter_hard((curgd))
118 #define crit_exit()			crit_exit_wrapper()
119 #define crit_exit_id(id)		_crit_exit(mycpu)
120 #define crit_exit_gd(curgd)		_crit_exit((curgd))
121 #define crit_exit_quick(curtd)		_crit_exit_quick((curtd))
122 #define crit_exit_hard()		_crit_exit_hard(mycpu)
123 #define crit_exit_hard_gd(curgd)	_crit_exit_hard((curgd))
124 #define crit_exit_noyield(curtd)	_crit_exit_noyield((curtd))
125 #endif
126 
127 extern void crit_exit_wrapper(__DEBUG_CRIT_ARG__);
128 
129 /*
130  * Track crit_enter()/crit_exit() pairs and warn on mismatches.
131  */
132 #ifdef DEBUG_CRIT_SECTIONS
133 
134 static __inline void
135 _debug_crit_enter(thread_t td, const char *id)
136 {
137     int wi = td->td_crit_debug_index;
138 
139     td->td_crit_debug_array[wi & CRIT_DEBUG_ARRAY_MASK] = id;
140     ++td->td_crit_debug_index;
141 }
142 
143 static __inline void
144 _debug_crit_exit(thread_t td, const char *id)
145 {
146     const char *gid;
147     int wi;
148 
149     wi = td->td_crit_debug_index - 1;
150     if ((gid = td->td_crit_debug_array[wi & CRIT_DEBUG_ARRAY_MASK]) != id) {
151 	if (td->td_in_crit_report == 0) {
152 	    td->td_in_crit_report = 1;
153 	    kprintf("crit_exit(%s) expected id %s\n", id, gid);
154 	    td->td_in_crit_report = 0;
155 	}
156     }
157     --td->td_crit_debug_index;
158 }
159 
160 #endif
161 
162 /*
163  * Critical sections prevent preemption, but allowing explicit blocking
164  * and thread switching.  Any interrupt occuring while in a critical
165  * section is made pending and returns immediately.  Interrupts are not
166  * physically disabled.
167  *
168  * Hard critical sections prevent preemption and disallow any blocking
169  * or thread switching, and in addition will assert on any blockable
170  * operation (acquire token not already held, lockmgr, mutex ops, or
171  * splz).  Spinlocks can still be used in hard sections.
172  *
173  * All critical section routines only operate on the current thread.
174  * Passed gd or td arguments are simply optimizations when mycpu or
175  * curthread is already available to the caller.
176  */
177 
178 /*
179  * crit_enter
180  */
181 static __inline void
182 _crit_enter_quick(thread_t td __DEBUG_CRIT_ADD_ARG__)
183 {
184     crit_enter_raw(td);
185     __DEBUG_CRIT_ENTER(td);
186 }
187 
188 static __inline void
189 _crit_enter(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)
190 {
191     _crit_enter_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
192 }
193 
194 static __inline void
195 _crit_enter_hard(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)
196 {
197     _crit_enter_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
198     ++gd->gd_intr_nesting_level;
199     cpu_ccfence();
200 }
201 
202 
203 /*
204  * crit_exit*()
205  *
206  * NOTE: Conditionalizing just gd_reqflags, a case which is virtually
207  *	 never true regardless of crit_count, should result in 100%
208  *	 optimal code execution.  We don't check crit_count because
209  *	 it just bloats the inline and does not improve performance.
210  *
211  * NOTE: This can produce a considerable amount of code despite the
212  *	 relatively few lines of code so the non-debug case typically
213  *	 just wraps it in a real function, crit_exit_wrapper().
214  */
215 static __inline void
216 _crit_exit_noyield(thread_t td __DEBUG_CRIT_ADD_ARG__)
217 {
218     __DEBUG_CRIT_EXIT(td);
219     crit_exit_raw(td);
220 #ifdef INVARIANTS
221     if (__predict_false(td->td_critcount < 0))
222 	crit_panic();
223 #endif
224 }
225 
226 static __inline void
227 _crit_exit_quick(thread_t td __DEBUG_CRIT_ADD_ARG__)
228 {
229     _crit_exit_noyield(td __DEBUG_CRIT_PASS_ARG__);
230     if (__predict_false(td->td_gd->gd_reqflags & RQF_IDLECHECK_MASK))
231 	lwkt_maybe_splz(td);
232 }
233 
234 static __inline void
235 _crit_exit(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)
236 {
237     _crit_exit_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
238 }
239 
240 static __inline void
241 _crit_exit_hard(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)
242 {
243     cpu_ccfence();
244     --gd->gd_intr_nesting_level;
245     _crit_exit_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
246 }
247 
248 static __inline int
249 crit_test(thread_t td)
250 {
251     return(td->td_critcount);
252 }
253 
254 /*
255  * Return whether any threads are runnable.
256  */
257 static __inline int
258 lwkt_runnable(void)
259 {
260     return (TAILQ_FIRST(&mycpu->gd_tdrunq) != NULL);
261 }
262 
263 static __inline int
264 lwkt_getpri(thread_t td)
265 {
266     return(td->td_pri);
267 }
268 
269 static __inline int
270 lwkt_getpri_self(void)
271 {
272     return(lwkt_getpri(curthread));
273 }
274 
275 /*
276  * Reduce our priority in preparation for a return to userland.  If
277  * our passive release function was still in place, our priority was
278  * never raised and does not need to be reduced.
279  *
280  * See also lwkt_passive_release() and platform/blah/trap.c
281  */
282 static __inline void
283 lwkt_passive_recover(thread_t td)
284 {
285 #ifndef NO_LWKT_SPLIT_USERPRI
286     if (td->td_release == NULL)
287 	lwkt_setpri_self(TDPRI_USER_NORM);
288     td->td_release = NULL;
289 #endif
290 }
291 
292 /*
293  * cpusync support
294  */
295 static __inline void
296 lwkt_cpusync_init(lwkt_cpusync_t cs, cpumask_t mask,
297 		  cpusync_func_t func, void *data)
298 {
299 	cs->cs_mask = mask;
300 	/* cs->cs_mack = 0; handled by _interlock */
301 	cs->cs_func = func;
302 	cs->cs_data = data;
303 }
304 
305 /*
306  * IPIQ messaging wrappers.  IPIQ remote functions are passed three arguments:
307  * a void * pointer, an integer, and a pointer to the trap frame (or NULL if
308  * the trap frame is not known).  However, we wish to provide opaque
309  * interfaces for simpler callbacks... the basic IPI messaging function as
310  * used by the kernel takes a single argument.
311  */
312 static __inline int
313 lwkt_send_ipiq(globaldata_t target, ipifunc1_t func, void *arg)
314 {
315     return(lwkt_send_ipiq3(target, (ipifunc3_t)func, arg, 0));
316 }
317 
318 static __inline int
319 lwkt_send_ipiq2(globaldata_t target, ipifunc2_t func, void *arg1, int arg2)
320 {
321     return(lwkt_send_ipiq3(target, (ipifunc3_t)func, arg1, arg2));
322 }
323 
324 static __inline int
325 lwkt_send_ipiq_mask(cpumask_t mask, ipifunc1_t func, void *arg)
326 {
327     return(lwkt_send_ipiq3_mask(mask, (ipifunc3_t)func, arg, 0));
328 }
329 
330 static __inline int
331 lwkt_send_ipiq2_mask(cpumask_t mask, ipifunc2_t func, void *arg1, int arg2)
332 {
333     return(lwkt_send_ipiq3_mask(mask, (ipifunc3_t)func, arg1, arg2));
334 }
335 
336 static __inline int
337 lwkt_send_ipiq_passive(globaldata_t target, ipifunc1_t func, void *arg)
338 {
339     return(lwkt_send_ipiq3_passive(target, (ipifunc3_t)func, arg, 0));
340 }
341 
342 static __inline int
343 lwkt_send_ipiq2_passive(globaldata_t target, ipifunc2_t func,
344 		       void *arg1, int arg2)
345 {
346     return(lwkt_send_ipiq3_passive(target, (ipifunc3_t)func, arg1, arg2));
347 }
348 
349 static __inline int
350 lwkt_send_ipiq_bycpu(int dcpu, ipifunc1_t func, void *arg)
351 {
352     return(lwkt_send_ipiq3_bycpu(dcpu, (ipifunc3_t)func, arg, 0));
353 }
354 
355 static __inline int
356 lwkt_send_ipiq2_bycpu(int dcpu, ipifunc2_t func, void *arg1, int arg2)
357 {
358     return(lwkt_send_ipiq3_bycpu(dcpu, (ipifunc3_t)func, arg1, arg2));
359 }
360 
361 static __inline int
362 lwkt_need_ipiq_process(globaldata_t gd)
363 {
364     lwkt_ipiq_t ipiq;
365 
366     if (CPUMASK_TESTNZERO(gd->gd_ipimask))
367 	return 1;
368 
369     ipiq = &gd->gd_cpusyncq;
370     return (ipiq->ip_rindex != ipiq->ip_windex);
371 }
372 
373 #endif	/* _KERNEL */
374 #endif	/* _SYS_THREAD2_H_ */
375 
376