xref: /dragonfly/sys/sys/thread2.h (revision 3d33658b)
1 /*
2  * SYS/THREAD2.H
3  *
4  * Implements inline procedure support for the LWKT subsystem.
5  *
6  * Generally speaking these routines only operate on threads associated
7  * with the current cpu.  For example, a higher priority thread pending
8  * on a different cpu will not be immediately scheduled by a yield() on
9  * this cpu.
10  */
11 
12 #ifndef _SYS_THREAD2_H_
13 #define _SYS_THREAD2_H_
14 
15 #ifndef _KERNEL
16 #error "This file should not be included by userland programs."
17 #endif
18 
19 /*
20  * Userland will have its own globaldata which it includes prior to this.
21  */
22 #ifndef _SYS_SYSTM_H_
23 #include <sys/systm.h>
24 #endif
25 #ifndef _SYS_GLOBALDATA_H_
26 #include <sys/globaldata.h>
27 #endif
28 #include <machine/cpufunc.h>
29 #include <machine/cpumask.h>
30 
31 /*
32  * Don't let GCC reorder critical section count adjustments, because it
33  * will BLOW US UP if it does.
34  */
35 static __inline void
36 crit_enter_raw(thread_t td)
37 {
38 	cpu_ccfence();
39 	++td->td_critcount;
40 	cpu_ccfence();
41 }
42 
43 static __inline void
44 crit_exit_raw(thread_t td)
45 {
46 	cpu_ccfence();
47 	--td->td_critcount;
48 	cpu_ccfence();
49 }
50 
51 /*
52  * Is a token held either by the specified thread or held shared?
53  *
54  * We can't inexpensively validate the thread for a shared token
55  * without iterating td->td_toks, so this isn't a perfect test.
56  */
57 static __inline int
58 _lwkt_token_held_any(lwkt_token_t tok, thread_t td)
59 {
60 	long count = tok->t_count;
61 
62 	cpu_ccfence();
63 	if (tok->t_ref >= &td->td_toks_base && tok->t_ref < td->td_toks_stop)
64 		return TRUE;
65 	if ((count & TOK_EXCLUSIVE) == 0 &&
66 	    (count & ~(TOK_EXCLUSIVE|TOK_EXCLREQ))) {
67 		return TRUE;
68 	}
69 	return FALSE;
70 }
71 
72 /*
73  * Is a token held by the specified thread?
74  */
75 static __inline int
76 _lwkt_token_held_excl(lwkt_token_t tok, thread_t td)
77 {
78 	return ((tok->t_ref >= &td->td_toks_base &&
79 		 tok->t_ref < td->td_toks_stop));
80 }
81 
82 /*
83  * Critical section debugging
84  */
85 #ifdef DEBUG_CRIT_SECTIONS
86 #define __DEBUG_CRIT_ARG__		const char *id
87 #define __DEBUG_CRIT_ADD_ARG__		, const char *id
88 #define __DEBUG_CRIT_PASS_ARG__		, id
89 #define __DEBUG_CRIT_ENTER(td)		_debug_crit_enter((td), id)
90 #define __DEBUG_CRIT_EXIT(td)		_debug_crit_exit((td), id)
91 #define crit_enter()			_crit_enter(mycpu, __func__)
92 #define crit_enter_id(id)		_crit_enter(mycpu, id)
93 #define crit_enter_gd(curgd)		_crit_enter((curgd), __func__)
94 #define crit_enter_quick(curtd)		_crit_enter_quick((curtd), __func__)
95 #define crit_enter_hard()		_crit_enter_hard(mycpu, __func__)
96 #define crit_enter_hard_gd(curgd)	_crit_enter_hard((curgd), __func__)
97 #define crit_exit()			_crit_exit(mycpu, __func__)
98 #define crit_exit_id(id)		_crit_exit(mycpu, id)
99 #define crit_exit_gd(curgd)		_crit_exit((curgd), __func__)
100 #define crit_exit_quick(curtd)		_crit_exit_quick((curtd), __func__)
101 #define crit_exit_hard()		_crit_exit_hard(mycpu, __func__)
102 #define crit_exit_hard_gd(curgd)	_crit_exit_hard((curgd), __func__)
103 #define crit_exit_noyield(curtd)	_crit_exit_noyield((curtd),__func__)
104 #else
105 #define __DEBUG_CRIT_ARG__		void
106 #define __DEBUG_CRIT_ADD_ARG__
107 #define __DEBUG_CRIT_PASS_ARG__
108 #define __DEBUG_CRIT_ENTER(td)
109 #define __DEBUG_CRIT_EXIT(td)
110 #define crit_enter()			_crit_enter(mycpu)
111 #define crit_enter_id(id)		_crit_enter(mycpu)
112 #define crit_enter_gd(curgd)		_crit_enter((curgd))
113 #define crit_enter_quick(curtd)		_crit_enter_quick((curtd))
114 #define crit_enter_hard()		_crit_enter_hard(mycpu)
115 #define crit_enter_hard_gd(curgd)	_crit_enter_hard((curgd))
116 #define crit_exit()			crit_exit_wrapper()
117 #define crit_exit_id(id)		_crit_exit(mycpu)
118 #define crit_exit_gd(curgd)		_crit_exit((curgd))
119 #define crit_exit_quick(curtd)		_crit_exit_quick((curtd))
120 #define crit_exit_hard()		_crit_exit_hard(mycpu)
121 #define crit_exit_hard_gd(curgd)	_crit_exit_hard((curgd))
122 #define crit_exit_noyield(curtd)	_crit_exit_noyield((curtd))
123 #endif
124 
125 extern void crit_exit_wrapper(__DEBUG_CRIT_ARG__);
126 
127 /*
128  * Track crit_enter()/crit_exit() pairs and warn on mismatches.
129  */
130 #ifdef DEBUG_CRIT_SECTIONS
131 
132 static __inline void
133 _debug_crit_enter(thread_t td, const char *id)
134 {
135     int wi = td->td_crit_debug_index;
136 
137     td->td_crit_debug_array[wi & CRIT_DEBUG_ARRAY_MASK] = id;
138     ++td->td_crit_debug_index;
139 }
140 
141 static __inline void
142 _debug_crit_exit(thread_t td, const char *id)
143 {
144     const char *gid;
145     int wi;
146 
147     wi = td->td_crit_debug_index - 1;
148     if ((gid = td->td_crit_debug_array[wi & CRIT_DEBUG_ARRAY_MASK]) != id) {
149 	if (td->td_in_crit_report == 0) {
150 	    td->td_in_crit_report = 1;
151 	    kprintf("crit_exit(%s) expected id %s\n", id, gid);
152 	    td->td_in_crit_report = 0;
153 	}
154     }
155     --td->td_crit_debug_index;
156 }
157 
158 #endif
159 
160 /*
161  * Critical sections prevent preemption, but allowing explicit blocking
162  * and thread switching.  Any interrupt occuring while in a critical
163  * section is made pending and returns immediately.  Interrupts are not
164  * physically disabled.
165  *
166  * Hard critical sections prevent preemption and disallow any blocking
167  * or thread switching, and in addition will assert on any blockable
168  * operation (acquire token not already held, lockmgr, mutex ops, or
169  * splz).  Spinlocks can still be used in hard sections.
170  *
171  * All critical section routines only operate on the current thread.
172  * Passed gd or td arguments are simply optimizations when mycpu or
173  * curthread is already available to the caller.
174  */
175 
176 /*
177  * crit_enter
178  */
179 static __inline void
180 _crit_enter_quick(thread_t td __DEBUG_CRIT_ADD_ARG__)
181 {
182     crit_enter_raw(td);
183     __DEBUG_CRIT_ENTER(td);
184 }
185 
186 static __inline void
187 _crit_enter(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)
188 {
189     _crit_enter_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
190 }
191 
192 static __inline void
193 _crit_enter_hard(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)
194 {
195     _crit_enter_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
196     ++gd->gd_intr_nesting_level;
197     cpu_ccfence();
198 }
199 
200 
201 /*
202  * crit_exit*()
203  *
204  * NOTE: Conditionalizing just gd_reqflags, a case which is virtually
205  *	 never true regardless of crit_count, should result in 100%
206  *	 optimal code execution.  We don't check crit_count because
207  *	 it just bloats the inline and does not improve performance.
208  *
209  * NOTE: This can produce a considerable amount of code despite the
210  *	 relatively few lines of code so the non-debug case typically
211  *	 just wraps it in a real function, crit_exit_wrapper().
212  */
213 static __inline void
214 _crit_exit_noyield(thread_t td __DEBUG_CRIT_ADD_ARG__)
215 {
216     __DEBUG_CRIT_EXIT(td);
217     crit_exit_raw(td);
218 #ifdef INVARIANTS
219     if (__predict_false(td->td_critcount < 0))
220 	crit_panic();
221 #endif
222 }
223 
224 static __inline void
225 _crit_exit_quick(thread_t td __DEBUG_CRIT_ADD_ARG__)
226 {
227     _crit_exit_noyield(td __DEBUG_CRIT_PASS_ARG__);
228     if (__predict_false(td->td_gd->gd_reqflags & RQF_IDLECHECK_MASK))
229 	lwkt_maybe_splz(td);
230 }
231 
232 static __inline void
233 _crit_exit(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)
234 {
235     _crit_exit_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
236 }
237 
238 static __inline void
239 _crit_exit_hard(globaldata_t gd __DEBUG_CRIT_ADD_ARG__)
240 {
241     cpu_ccfence();
242     --gd->gd_intr_nesting_level;
243     _crit_exit_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
244 }
245 
246 static __inline int
247 crit_test(thread_t td)
248 {
249     return(td->td_critcount);
250 }
251 
252 /*
253  * Return whether any threads are runnable.
254  */
255 static __inline int
256 lwkt_runnable(void)
257 {
258     return (TAILQ_FIRST(&mycpu->gd_tdrunq) != NULL);
259 }
260 
261 static __inline int
262 lwkt_getpri(thread_t td)
263 {
264     return(td->td_pri);
265 }
266 
267 static __inline int
268 lwkt_getpri_self(void)
269 {
270     return(lwkt_getpri(curthread));
271 }
272 
273 /*
274  * Reduce our priority in preparation for a return to userland.  If
275  * our passive release function was still in place, our priority was
276  * never raised and does not need to be reduced.
277  *
278  * See also lwkt_passive_release() and platform/blah/trap.c
279  */
280 static __inline void
281 lwkt_passive_recover(thread_t td)
282 {
283 #ifndef NO_LWKT_SPLIT_USERPRI
284     if (td->td_release == NULL)
285 	lwkt_setpri_self(TDPRI_USER_NORM);
286     td->td_release = NULL;
287 #endif
288 }
289 
290 /*
291  * cpusync support
292  */
293 static __inline void
294 lwkt_cpusync_init(lwkt_cpusync_t cs, cpumask_t mask,
295 		  cpusync_func_t func, void *data)
296 {
297 	cs->cs_mask = mask;
298 	/* cs->cs_mack = 0; handled by _interlock */
299 	cs->cs_func = func;
300 	cs->cs_data = data;
301 }
302 
303 /*
304  * IPIQ messaging wrappers.  IPIQ remote functions are passed three arguments:
305  * a void * pointer, an integer, and a pointer to the trap frame (or NULL if
306  * the trap frame is not known).  However, we wish to provide opaque
307  * interfaces for simpler callbacks... the basic IPI messaging function as
308  * used by the kernel takes a single argument.
309  */
310 static __inline int
311 lwkt_send_ipiq(globaldata_t target, ipifunc1_t func, void *arg)
312 {
313     return(lwkt_send_ipiq3(target, (ipifunc3_t)func, arg, 0));
314 }
315 
316 static __inline int
317 lwkt_send_ipiq2(globaldata_t target, ipifunc2_t func, void *arg1, int arg2)
318 {
319     return(lwkt_send_ipiq3(target, (ipifunc3_t)func, arg1, arg2));
320 }
321 
322 static __inline int
323 lwkt_send_ipiq_mask(cpumask_t mask, ipifunc1_t func, void *arg)
324 {
325     return(lwkt_send_ipiq3_mask(mask, (ipifunc3_t)func, arg, 0));
326 }
327 
328 static __inline int
329 lwkt_send_ipiq2_mask(cpumask_t mask, ipifunc2_t func, void *arg1, int arg2)
330 {
331     return(lwkt_send_ipiq3_mask(mask, (ipifunc3_t)func, arg1, arg2));
332 }
333 
334 static __inline int
335 lwkt_send_ipiq_passive(globaldata_t target, ipifunc1_t func, void *arg)
336 {
337     return(lwkt_send_ipiq3_passive(target, (ipifunc3_t)func, arg, 0));
338 }
339 
340 static __inline int
341 lwkt_send_ipiq2_passive(globaldata_t target, ipifunc2_t func,
342 		       void *arg1, int arg2)
343 {
344     return(lwkt_send_ipiq3_passive(target, (ipifunc3_t)func, arg1, arg2));
345 }
346 
347 static __inline int
348 lwkt_send_ipiq_bycpu(int dcpu, ipifunc1_t func, void *arg)
349 {
350     return(lwkt_send_ipiq3_bycpu(dcpu, (ipifunc3_t)func, arg, 0));
351 }
352 
353 static __inline int
354 lwkt_send_ipiq2_bycpu(int dcpu, ipifunc2_t func, void *arg1, int arg2)
355 {
356     return(lwkt_send_ipiq3_bycpu(dcpu, (ipifunc3_t)func, arg1, arg2));
357 }
358 
359 static __inline int
360 lwkt_need_ipiq_process(globaldata_t gd)
361 {
362     lwkt_ipiq_t ipiq;
363 
364     if (CPUMASK_TESTNZERO(gd->gd_ipimask))
365 	return 1;
366 
367     ipiq = &gd->gd_cpusyncq;
368     return (ipiq->ip_rindex != ipiq->ip_windex);
369 }
370 
371 #endif	/* _SYS_THREAD2_H_ */
372