1 /* 2 * SYS/THREAD2.H 3 * 4 * Implements inline procedure support for the LWKT subsystem. 5 * 6 * Generally speaking these routines only operate on threads associated 7 * with the current cpu. For example, a higher priority thread pending 8 * on a different cpu will not be immediately scheduled by a yield() on 9 * this cpu. 10 * 11 * $DragonFly: src/sys/sys/thread2.h,v 1.28 2006/12/23 00:27:03 swildner Exp $ 12 */ 13 14 #ifndef _SYS_THREAD2_H_ 15 #define _SYS_THREAD2_H_ 16 17 #ifndef _KERNEL 18 19 #error "This file should not be included by userland programs." 20 21 #else 22 23 /* 24 * Userland will have its own globaldata which it includes prior to this. 25 */ 26 #ifndef _SYS_SYSTM_H_ 27 #include <sys/systm.h> 28 #endif 29 #ifndef _SYS_GLOBALDATA_H_ 30 #include <sys/globaldata.h> 31 #endif 32 #ifndef _MACHINE_CPUFUNC_H_ 33 #include <machine/cpufunc.h> 34 #endif 35 36 /* 37 * Is a token held by the specified thread? 38 */ 39 static __inline int 40 _lwkt_token_held(lwkt_token_t tok, thread_t td) 41 { 42 return (tok->t_ref >= &td->td_toks_base && 43 tok->t_ref < td->td_toks_stop); 44 } 45 46 /* 47 * Critical section debugging 48 */ 49 #ifdef DEBUG_CRIT_SECTIONS 50 #define __DEBUG_CRIT_ARG__ const char *id 51 #define __DEBUG_CRIT_ADD_ARG__ , const char *id 52 #define __DEBUG_CRIT_PASS_ARG__ , id 53 #define __DEBUG_CRIT_ENTER(td) _debug_crit_enter((td), id) 54 #define __DEBUG_CRIT_EXIT(td) _debug_crit_exit((td), id) 55 #define crit_enter() _crit_enter(mycpu, __FUNCTION__) 56 #define crit_enter_id(id) _crit_enter(mycpu, id) 57 #define crit_enter_gd(curgd) _crit_enter((curgd), __FUNCTION__) 58 #define crit_enter_quick(curtd) _crit_enter_quick((curtd), __FUNCTION__) 59 #define crit_enter_hard() _crit_enter_hard(mycpu, __FUNCTION__) 60 #define crit_enter_hard_gd(curgd) _crit_enter_hard((curgd), __FUNCTION__) 61 #define crit_exit() _crit_exit(mycpu, __FUNCTION__) 62 #define crit_exit_id(id) _crit_exit(mycpu, id) 63 #define crit_exit_gd(curgd) _crit_exit((curgd), __FUNCTION__) 64 #define crit_exit_quick(curtd) _crit_exit_quick((curtd), __FUNCTION__) 65 #define crit_exit_hard() _crit_exit_hard(mycpu, __FUNCTION__) 66 #define crit_exit_hard_gd(curgd) _crit_exit_hard((curgd), __FUNCTION__) 67 #define crit_exit_noyield(curtd) _crit_exit_noyield((curtd),__FUNCTION__) 68 #else 69 #define __DEBUG_CRIT_ARG__ void 70 #define __DEBUG_CRIT_ADD_ARG__ 71 #define __DEBUG_CRIT_PASS_ARG__ 72 #define __DEBUG_CRIT_ENTER(td) 73 #define __DEBUG_CRIT_EXIT(td) 74 #define crit_enter() _crit_enter(mycpu) 75 #define crit_enter_id(id) _crit_enter(mycpu) 76 #define crit_enter_gd(curgd) _crit_enter((curgd)) 77 #define crit_enter_quick(curtd) _crit_enter_quick((curtd)) 78 #define crit_enter_hard() _crit_enter_hard(mycpu) 79 #define crit_enter_hard_gd(curgd) _crit_enter_hard((curgd)) 80 #define crit_exit() crit_exit_wrapper() 81 #define crit_exit_id(id) _crit_exit(mycpu) 82 #define crit_exit_gd(curgd) _crit_exit((curgd)) 83 #define crit_exit_quick(curtd) _crit_exit_quick((curtd)) 84 #define crit_exit_hard() _crit_exit_hard(mycpu) 85 #define crit_exit_hard_gd(curgd) _crit_exit_hard((curgd)) 86 #define crit_exit_noyield(curtd) _crit_exit_noyield((curtd)) 87 #endif 88 89 extern void crit_exit_wrapper(__DEBUG_CRIT_ARG__); 90 91 /* 92 * Track crit_enter()/crit_exit() pairs and warn on mismatches. 93 */ 94 #ifdef DEBUG_CRIT_SECTIONS 95 96 static __inline void 97 _debug_crit_enter(thread_t td, const char *id) 98 { 99 int wi = td->td_crit_debug_index; 100 101 td->td_crit_debug_array[wi & CRIT_DEBUG_ARRAY_MASK] = id; 102 ++td->td_crit_debug_index; 103 } 104 105 static __inline void 106 _debug_crit_exit(thread_t td, const char *id) 107 { 108 const char *gid; 109 int wi; 110 111 wi = td->td_crit_debug_index - 1; 112 if ((gid = td->td_crit_debug_array[wi & CRIT_DEBUG_ARRAY_MASK]) != id) { 113 if (td->td_in_crit_report == 0) { 114 td->td_in_crit_report = 1; 115 kprintf("crit_exit(%s) expected id %s\n", id, gid); 116 td->td_in_crit_report = 0; 117 } 118 } 119 --td->td_crit_debug_index; 120 } 121 122 #endif 123 124 /* 125 * Critical sections prevent preemption, but allowing explicit blocking 126 * and thread switching. Any interrupt occuring while in a critical 127 * section is made pending and returns immediately. Interrupts are not 128 * physically disabled. 129 * 130 * Hard critical sections prevent preemption and disallow any blocking 131 * or thread switching, and in addition will assert on any blockable 132 * operation (acquire token not already held, lockmgr, mutex ops, or 133 * splz). Spinlocks can still be used in hard sections. 134 * 135 * All critical section routines only operate on the current thread. 136 * Passed gd or td arguments are simply optimizations when mycpu or 137 * curthread is already available to the caller. 138 */ 139 140 /* 141 * crit_enter 142 */ 143 static __inline void 144 _crit_enter_quick(thread_t td __DEBUG_CRIT_ADD_ARG__) 145 { 146 ++td->td_critcount; 147 __DEBUG_CRIT_ENTER(td); 148 cpu_ccfence(); 149 } 150 151 static __inline void 152 _crit_enter(globaldata_t gd __DEBUG_CRIT_ADD_ARG__) 153 { 154 _crit_enter_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__); 155 } 156 157 static __inline void 158 _crit_enter_hard(globaldata_t gd __DEBUG_CRIT_ADD_ARG__) 159 { 160 _crit_enter_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__); 161 ++gd->gd_intr_nesting_level; 162 } 163 164 165 /* 166 * crit_exit*() 167 * 168 * NOTE: Conditionalizing just gd_reqflags, a case which is virtually 169 * never true regardless of crit_count, should result in 100% 170 * optimal code execution. We don't check crit_count because 171 * it just bloats the inline and does not improve performance. 172 * 173 * NOTE: This can produce a considerable amount of code despite the 174 * relatively few lines of code so the non-debug case typically 175 * just wraps it in a real function, crit_exit_wrapper(). 176 */ 177 static __inline void 178 _crit_exit_noyield(thread_t td __DEBUG_CRIT_ADD_ARG__) 179 { 180 __DEBUG_CRIT_EXIT(td); 181 --td->td_critcount; 182 #ifdef INVARIANTS 183 if (__predict_false(td->td_critcount < 0)) 184 crit_panic(); 185 #endif 186 cpu_ccfence(); /* prevent compiler reordering */ 187 } 188 189 static __inline void 190 _crit_exit_quick(thread_t td __DEBUG_CRIT_ADD_ARG__) 191 { 192 _crit_exit_noyield(td __DEBUG_CRIT_PASS_ARG__); 193 if (__predict_false(td->td_gd->gd_reqflags & RQF_IDLECHECK_MASK)) 194 lwkt_maybe_splz(td); 195 } 196 197 static __inline void 198 _crit_exit(globaldata_t gd __DEBUG_CRIT_ADD_ARG__) 199 { 200 _crit_exit_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__); 201 } 202 203 static __inline void 204 _crit_exit_hard(globaldata_t gd __DEBUG_CRIT_ADD_ARG__) 205 { 206 --gd->gd_intr_nesting_level; 207 _crit_exit_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__); 208 } 209 210 static __inline int 211 crit_test(thread_t td) 212 { 213 return(td->td_critcount); 214 } 215 216 /* 217 * Return whether any threads are runnable. 218 */ 219 static __inline int 220 lwkt_runnable(void) 221 { 222 return (TAILQ_FIRST(&mycpu->gd_tdrunq) != NULL); 223 } 224 225 static __inline int 226 lwkt_getpri(thread_t td) 227 { 228 return(td->td_pri); 229 } 230 231 static __inline int 232 lwkt_getpri_self(void) 233 { 234 return(lwkt_getpri(curthread)); 235 } 236 237 /* 238 * Reduce our priority in preparation for a return to userland. If 239 * our passive release function was still in place, our priority was 240 * never raised and does not need to be reduced. 241 * 242 * See also lwkt_passive_release() and platform/blah/trap.c 243 */ 244 static __inline void 245 lwkt_passive_recover(thread_t td) 246 { 247 if (td->td_release == NULL) 248 lwkt_setpri_self(TDPRI_USER_NORM); 249 td->td_release = NULL; 250 } 251 252 /* 253 * cpusync support 254 */ 255 static __inline void 256 lwkt_cpusync_init(lwkt_cpusync_t cs, cpumask_t mask, 257 cpusync_func_t func, void *data) 258 { 259 cs->cs_mask = mask; 260 /* cs->cs_mack = 0; handled by _interlock */ 261 cs->cs_func = func; 262 cs->cs_data = data; 263 } 264 265 #ifdef SMP 266 267 /* 268 * IPIQ messaging wrappers. IPIQ remote functions are passed three arguments: 269 * a void * pointer, an integer, and a pointer to the trap frame (or NULL if 270 * the trap frame is not known). However, we wish to provide opaque 271 * interfaces for simpler callbacks... the basic IPI messaging function as 272 * used by the kernel takes a single argument. 273 */ 274 static __inline int 275 lwkt_send_ipiq(globaldata_t target, ipifunc1_t func, void *arg) 276 { 277 return(lwkt_send_ipiq3(target, (ipifunc3_t)func, arg, 0)); 278 } 279 280 static __inline int 281 lwkt_send_ipiq2(globaldata_t target, ipifunc2_t func, void *arg1, int arg2) 282 { 283 return(lwkt_send_ipiq3(target, (ipifunc3_t)func, arg1, arg2)); 284 } 285 286 static __inline int 287 lwkt_send_ipiq_mask(cpumask_t mask, ipifunc1_t func, void *arg) 288 { 289 return(lwkt_send_ipiq3_mask(mask, (ipifunc3_t)func, arg, 0)); 290 } 291 292 static __inline int 293 lwkt_send_ipiq2_mask(cpumask_t mask, ipifunc2_t func, void *arg1, int arg2) 294 { 295 return(lwkt_send_ipiq3_mask(mask, (ipifunc3_t)func, arg1, arg2)); 296 } 297 298 static __inline int 299 lwkt_send_ipiq_nowait(globaldata_t target, ipifunc1_t func, void *arg) 300 { 301 return(lwkt_send_ipiq3_nowait(target, (ipifunc3_t)func, arg, 0)); 302 } 303 304 static __inline int 305 lwkt_send_ipiq2_nowait(globaldata_t target, ipifunc2_t func, 306 void *arg1, int arg2) 307 { 308 return(lwkt_send_ipiq3_nowait(target, (ipifunc3_t)func, arg1, arg2)); 309 } 310 311 static __inline int 312 lwkt_send_ipiq_passive(globaldata_t target, ipifunc1_t func, void *arg) 313 { 314 return(lwkt_send_ipiq3_passive(target, (ipifunc3_t)func, arg, 0)); 315 } 316 317 static __inline int 318 lwkt_send_ipiq2_passive(globaldata_t target, ipifunc2_t func, 319 void *arg1, int arg2) 320 { 321 return(lwkt_send_ipiq3_passive(target, (ipifunc3_t)func, arg1, arg2)); 322 } 323 324 static __inline int 325 lwkt_send_ipiq_bycpu(int dcpu, ipifunc1_t func, void *arg) 326 { 327 return(lwkt_send_ipiq3_bycpu(dcpu, (ipifunc3_t)func, arg, 0)); 328 } 329 330 static __inline int 331 lwkt_send_ipiq2_bycpu(int dcpu, ipifunc2_t func, void *arg1, int arg2) 332 { 333 return(lwkt_send_ipiq3_bycpu(dcpu, (ipifunc3_t)func, arg1, arg2)); 334 } 335 336 #endif /* SMP */ 337 #endif /* _KERNEL */ 338 #endif /* _SYS_THREAD2_H_ */ 339 340