xref: /dragonfly/sys/sys/thread2.h (revision 1847e88f)
1 /*
2  * SYS/THREAD2.H
3  *
4  * Implements inline procedure support for the LWKT subsystem.
5  *
6  * Generally speaking these routines only operate on threads associated
7  * with the current cpu.  For example, a higher priority thread pending
8  * on a different cpu will not be immediately scheduled by a yield() on
9  * this cpu.
10  *
11  * $DragonFly: src/sys/sys/thread2.h,v 1.25 2005/11/08 20:47:02 dillon Exp $
12  */
13 
14 #ifndef _SYS_THREAD2_H_
15 #define _SYS_THREAD2_H_
16 
17 /*
18  * Userland will have its own globaldata which it includes prior to this.
19  */
20 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
21 #ifndef _SYS_GLOBALDATA_H_
22 #include <sys/globaldata.h>
23 #endif
24 #ifndef _MACHINE_CPUFUNC_H_
25 #include <machine/cpufunc.h>
26 #endif
27 #endif
28 
29 /*
30  * Critical section debugging
31  */
32 #ifdef DEBUG_CRIT_SECTIONS
33 #define __DEBUG_CRIT_ARG__		const char *id
34 #define __DEBUG_CRIT_ADD_ARG__		, const char *id
35 #define __DEBUG_CRIT_PASS_ARG__		, id
36 #define __DEBUG_CRIT_ENTER(td)		_debug_crit_enter((td), id)
37 #define __DEBUG_CRIT_EXIT(td)		_debug_crit_exit((td), id)
38 #define crit_enter()			_crit_enter(__FUNCTION__)
39 #define crit_enter_id(id)		_crit_enter(id)
40 #define crit_enter_quick(curtd)		_crit_enter_quick((curtd), __FUNCTION__)
41 #define crit_enter_gd(curgd)		_crit_enter_gd(curgd, __FUNCTION__)
42 #define crit_exit()			_crit_exit(__FUNCTION__)
43 #define crit_exit_id(id)		_crit_exit(id)
44 #define crit_exit_quick(curtd)		_crit_exit_quick((curtd), __FUNCTION__)
45 #define crit_exit_noyield(curtd)	_crit_exit_noyield((curtd),__FUNCTION__)
46 #define crit_exit_gd(curgd)		_crit_exit_gd((curgd), __FUNCTION__)
47 #else
48 #define __DEBUG_CRIT_ARG__		void
49 #define __DEBUG_CRIT_ADD_ARG__
50 #define __DEBUG_CRIT_PASS_ARG__
51 #define __DEBUG_CRIT_ENTER(td)
52 #define __DEBUG_CRIT_EXIT(td)
53 #define crit_enter()			_crit_enter()
54 #define crit_enter_id(id)		_crit_enter()
55 #define crit_enter_quick(curtd)		_crit_enter_quick(curtd)
56 #define crit_enter_gd(curgd)		_crit_enter_gd(curgd)
57 #define crit_exit()			_crit_exit()
58 #define crit_exit_id(id)		_crit_exit()
59 #define crit_exit_quick(curtd)		_crit_exit_quick(curtd)
60 #define crit_exit_noyield(curtd)	_crit_exit_noyield(curtd)
61 #define crit_exit_gd(curgd)		_crit_exit_gd(curgd)
62 #endif
63 
64 /*
65  * Track crit_enter()/crit_exit() pairs and warn on mismatches.
66  */
67 #ifdef DEBUG_CRIT_SECTIONS
68 
69 #include <sys/systm.h>
70 
71 static __inline void
72 _debug_crit_enter(thread_t td, const char *id)
73 {
74     int wi = td->td_crit_debug_index;
75 
76     td->td_crit_debug_array[wi & CRIT_DEBUG_ARRAY_MASK] = id;
77     ++td->td_crit_debug_index;
78 }
79 
80 static __inline void
81 _debug_crit_exit(thread_t td, const char *id)
82 {
83     const char *gid;
84     int wi;
85 
86     wi = td->td_crit_debug_index - 1;
87     if ((gid = td->td_crit_debug_array[wi & CRIT_DEBUG_ARRAY_MASK]) != id) {
88 	if (td->td_in_crit_report == 0) {
89 	    td->td_in_crit_report = 1;
90 	    printf("crit_exit(%s) expected id %s\n", id, gid);
91 	    td->td_in_crit_report = 0;
92 	}
93     }
94     --td->td_crit_debug_index;
95 }
96 
97 #endif
98 
99 /*
100  * Critical sections prevent preemption by raising a thread's priority
101  * above the highest possible interrupting priority.  Additionally, the
102  * current cpu will not be able to schedule a new thread but will instead
103  * place it on a pending list (with interrupts physically disabled) and
104  * set mycpu->gd_reqflags to indicate that work needs to be done, which
105  * lwkt_yield_quick() takes care of.
106  *
107  * Some of these routines take a struct thread pointer as an argument.  This
108  * pointer MUST be curthread and is only passed as an optimization.
109  *
110  * Synchronous switching and blocking is allowed while in a critical section.
111  */
112 
113 static __inline void
114 _crit_enter(__DEBUG_CRIT_ARG__)
115 {
116     struct thread *td = curthread;
117 
118 #ifdef INVARIANTS
119     if (td->td_pri < 0)
120 	crit_panic();
121 #endif
122     td->td_pri += TDPRI_CRIT;
123     __DEBUG_CRIT_ENTER(td);
124     cpu_ccfence();
125 }
126 
127 static __inline void
128 _crit_enter_quick(struct thread *curtd __DEBUG_CRIT_ADD_ARG__)
129 {
130     curtd->td_pri += TDPRI_CRIT;
131     __DEBUG_CRIT_ENTER(curtd);
132     cpu_ccfence();
133 }
134 
135 static __inline void
136 _crit_enter_gd(globaldata_t mygd __DEBUG_CRIT_ADD_ARG__)
137 {
138     _crit_enter_quick(mygd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
139 }
140 
141 static __inline void
142 _crit_exit_noyield(struct thread *curtd __DEBUG_CRIT_ADD_ARG__)
143 {
144     __DEBUG_CRIT_EXIT(curtd);
145     curtd->td_pri -= TDPRI_CRIT;
146 #ifdef INVARIANTS
147     if (curtd->td_pri < 0)
148 	crit_panic();
149 #endif
150     cpu_ccfence();	/* prevent compiler reordering */
151 }
152 
153 static __inline void
154 _crit_exit(__DEBUG_CRIT_ARG__)
155 {
156     thread_t td = curthread;
157 
158     __DEBUG_CRIT_EXIT(td);
159     td->td_pri -= TDPRI_CRIT;
160 #ifdef INVARIANTS
161     if (td->td_pri < 0)
162 	crit_panic();
163 #endif
164     cpu_ccfence();	/* prevent compiler reordering */
165     if (td->td_gd->gd_reqflags && td->td_pri < TDPRI_CRIT)
166 	lwkt_yield_quick();
167 }
168 
169 static __inline void
170 _crit_exit_quick(struct thread *curtd __DEBUG_CRIT_ADD_ARG__)
171 {
172     globaldata_t gd = curtd->td_gd;
173 
174     __DEBUG_CRIT_EXIT(curtd);
175     curtd->td_pri -= TDPRI_CRIT;
176     cpu_ccfence();	/* prevent compiler reordering */
177     if (gd->gd_reqflags && curtd->td_pri < TDPRI_CRIT)
178 	lwkt_yield_quick();
179 }
180 
181 static __inline void
182 _crit_exit_gd(globaldata_t mygd __DEBUG_CRIT_ADD_ARG__)
183 {
184     _crit_exit_quick(mygd->gd_curthread __DEBUG_CRIT_PASS_ARG__);
185 }
186 
187 static __inline int
188 crit_test(thread_t td)
189 {
190     return(td->td_pri >= TDPRI_CRIT);
191 }
192 
193 /*
194  * Initialize a tokref_t.  We only need to initialize the token pointer
195  * and the magic number.  We do not have to initialize tr_next, tr_gdreqnext,
196  * or tr_reqgd.
197  */
198 static __inline void
199 lwkt_tokref_init(lwkt_tokref_t ref, lwkt_token_t tok)
200 {
201     ref->tr_magic = LWKT_TOKREF_MAGIC1;
202     ref->tr_tok = tok;
203     ref->tr_flags = 0;
204 }
205 
206 /*
207  * Return whether any threads are runnable, whether they meet mp_lock
208  * requirements or not.
209  */
210 static __inline int
211 lwkt_runnable(void)
212 {
213     return (mycpu->gd_runqmask != 0);
214 }
215 
216 static __inline int
217 lwkt_getpri(thread_t td)
218 {
219     return(td->td_pri & TDPRI_MASK);
220 }
221 
222 static __inline int
223 lwkt_getpri_self(void)
224 {
225     return(lwkt_getpri(curthread));
226 }
227 
228 #ifdef SMP
229 
230 /*
231  * IPIQ messaging wrappers.  IPIQ remote functions are passed three arguments:
232  * a void * pointer, an integer, and a pointer to the trap frame (or NULL if
233  * the trap frame is not known).  However, we wish to provide opaque
234  * interfaces for simpler callbacks... the basic IPI messaging function as
235  * used by the kernel takes a single argument.
236  */
237 static __inline int
238 lwkt_send_ipiq(globaldata_t target, ipifunc1_t func, void *arg)
239 {
240     return(lwkt_send_ipiq3(target, (ipifunc3_t)func, arg, 0));
241 }
242 
243 static __inline int
244 lwkt_send_ipiq2(globaldata_t target, ipifunc2_t func, void *arg1, int arg2)
245 {
246     return(lwkt_send_ipiq3(target, (ipifunc3_t)func, arg1, arg2));
247 }
248 
249 static __inline int
250 lwkt_send_ipiq_mask(u_int32_t mask, ipifunc1_t func, void *arg)
251 {
252     return(lwkt_send_ipiq3_mask(mask, (ipifunc3_t)func, arg, 0));
253 }
254 
255 static __inline int
256 lwkt_send_ipiq2_mask(u_int32_t mask, ipifunc2_t func, void *arg1, int arg2)
257 {
258     return(lwkt_send_ipiq3_mask(mask, (ipifunc3_t)func, arg1, arg2));
259 }
260 
261 static __inline int
262 lwkt_send_ipiq_nowait(globaldata_t target, ipifunc1_t func, void *arg)
263 {
264     return(lwkt_send_ipiq3_nowait(target, (ipifunc3_t)func, arg, 0));
265 }
266 
267 static __inline int
268 lwkt_send_ipiq2_nowait(globaldata_t target, ipifunc2_t func,
269 		       void *arg1, int arg2)
270 {
271     return(lwkt_send_ipiq3_nowait(target, (ipifunc3_t)func, arg1, arg2));
272 }
273 
274 static __inline int
275 lwkt_send_ipiq_passive(globaldata_t target, ipifunc1_t func, void *arg)
276 {
277     return(lwkt_send_ipiq3_passive(target, (ipifunc3_t)func, arg, 0));
278 }
279 
280 static __inline int
281 lwkt_send_ipiq2_passive(globaldata_t target, ipifunc2_t func,
282 		       void *arg1, int arg2)
283 {
284     return(lwkt_send_ipiq3_passive(target, (ipifunc3_t)func, arg1, arg2));
285 }
286 
287 static __inline int
288 lwkt_send_ipiq_bycpu(int dcpu, ipifunc1_t func, void *arg)
289 {
290     return(lwkt_send_ipiq3_bycpu(dcpu, (ipifunc3_t)func, arg, 0));
291 }
292 
293 static __inline int
294 lwkt_send_ipiq2_bycpu(int dcpu, ipifunc2_t func, void *arg1, int arg2)
295 {
296     return(lwkt_send_ipiq3_bycpu(dcpu, (ipifunc3_t)func, arg1, arg2));
297 }
298 
299 #endif
300 
301 #endif
302 
303