1 /* 2 * SYS/THREAD2.H 3 * 4 * Implements inline procedure support for the LWKT subsystem. 5 * 6 * Generally speaking these routines only operate on threads associated 7 * with the current cpu. For example, a higher priority thread pending 8 * on a different cpu will not be immediately scheduled by a yield() on 9 * this cpu. 10 */ 11 12 #ifndef _SYS_THREAD2_H_ 13 #define _SYS_THREAD2_H_ 14 15 #ifndef _KERNEL 16 17 #error "This file should not be included by userland programs." 18 19 #else 20 21 /* 22 * Userland will have its own globaldata which it includes prior to this. 23 */ 24 #ifndef _SYS_SYSTM_H_ 25 #include <sys/systm.h> 26 #endif 27 #ifndef _SYS_GLOBALDATA_H_ 28 #include <sys/globaldata.h> 29 #endif 30 #include <machine/cpufunc.h> 31 32 /* 33 * Is a token held by the specified thread? 34 */ 35 static __inline int 36 _lwkt_token_held(lwkt_token_t tok, thread_t td) 37 { 38 return (tok->t_ref >= &td->td_toks_base && 39 tok->t_ref < td->td_toks_stop); 40 } 41 42 /* 43 * Critical section debugging 44 */ 45 #ifdef DEBUG_CRIT_SECTIONS 46 #define __DEBUG_CRIT_ARG__ const char *id 47 #define __DEBUG_CRIT_ADD_ARG__ , const char *id 48 #define __DEBUG_CRIT_PASS_ARG__ , id 49 #define __DEBUG_CRIT_ENTER(td) _debug_crit_enter((td), id) 50 #define __DEBUG_CRIT_EXIT(td) _debug_crit_exit((td), id) 51 #define crit_enter() _crit_enter(mycpu, __FUNCTION__) 52 #define crit_enter_id(id) _crit_enter(mycpu, id) 53 #define crit_enter_gd(curgd) _crit_enter((curgd), __FUNCTION__) 54 #define crit_enter_quick(curtd) _crit_enter_quick((curtd), __FUNCTION__) 55 #define crit_enter_hard() _crit_enter_hard(mycpu, __FUNCTION__) 56 #define crit_enter_hard_gd(curgd) _crit_enter_hard((curgd), __FUNCTION__) 57 #define crit_exit() _crit_exit(mycpu, __FUNCTION__) 58 #define crit_exit_id(id) _crit_exit(mycpu, id) 59 #define crit_exit_gd(curgd) _crit_exit((curgd), __FUNCTION__) 60 #define crit_exit_quick(curtd) _crit_exit_quick((curtd), __FUNCTION__) 61 #define crit_exit_hard() _crit_exit_hard(mycpu, __FUNCTION__) 62 #define crit_exit_hard_gd(curgd) _crit_exit_hard((curgd), __FUNCTION__) 63 #define crit_exit_noyield(curtd) _crit_exit_noyield((curtd),__FUNCTION__) 64 #else 65 #define __DEBUG_CRIT_ARG__ void 66 #define __DEBUG_CRIT_ADD_ARG__ 67 #define __DEBUG_CRIT_PASS_ARG__ 68 #define __DEBUG_CRIT_ENTER(td) 69 #define __DEBUG_CRIT_EXIT(td) 70 #define crit_enter() _crit_enter(mycpu) 71 #define crit_enter_id(id) _crit_enter(mycpu) 72 #define crit_enter_gd(curgd) _crit_enter((curgd)) 73 #define crit_enter_quick(curtd) _crit_enter_quick((curtd)) 74 #define crit_enter_hard() _crit_enter_hard(mycpu) 75 #define crit_enter_hard_gd(curgd) _crit_enter_hard((curgd)) 76 #define crit_exit() crit_exit_wrapper() 77 #define crit_exit_id(id) _crit_exit(mycpu) 78 #define crit_exit_gd(curgd) _crit_exit((curgd)) 79 #define crit_exit_quick(curtd) _crit_exit_quick((curtd)) 80 #define crit_exit_hard() _crit_exit_hard(mycpu) 81 #define crit_exit_hard_gd(curgd) _crit_exit_hard((curgd)) 82 #define crit_exit_noyield(curtd) _crit_exit_noyield((curtd)) 83 #endif 84 85 extern void crit_exit_wrapper(__DEBUG_CRIT_ARG__); 86 87 /* 88 * Track crit_enter()/crit_exit() pairs and warn on mismatches. 89 */ 90 #ifdef DEBUG_CRIT_SECTIONS 91 92 static __inline void 93 _debug_crit_enter(thread_t td, const char *id) 94 { 95 int wi = td->td_crit_debug_index; 96 97 td->td_crit_debug_array[wi & CRIT_DEBUG_ARRAY_MASK] = id; 98 ++td->td_crit_debug_index; 99 } 100 101 static __inline void 102 _debug_crit_exit(thread_t td, const char *id) 103 { 104 const char *gid; 105 int wi; 106 107 wi = td->td_crit_debug_index - 1; 108 if ((gid = td->td_crit_debug_array[wi & CRIT_DEBUG_ARRAY_MASK]) != id) { 109 if (td->td_in_crit_report == 0) { 110 td->td_in_crit_report = 1; 111 kprintf("crit_exit(%s) expected id %s\n", id, gid); 112 td->td_in_crit_report = 0; 113 } 114 } 115 --td->td_crit_debug_index; 116 } 117 118 #endif 119 120 /* 121 * Critical sections prevent preemption, but allowing explicit blocking 122 * and thread switching. Any interrupt occuring while in a critical 123 * section is made pending and returns immediately. Interrupts are not 124 * physically disabled. 125 * 126 * Hard critical sections prevent preemption and disallow any blocking 127 * or thread switching, and in addition will assert on any blockable 128 * operation (acquire token not already held, lockmgr, mutex ops, or 129 * splz). Spinlocks can still be used in hard sections. 130 * 131 * All critical section routines only operate on the current thread. 132 * Passed gd or td arguments are simply optimizations when mycpu or 133 * curthread is already available to the caller. 134 */ 135 136 /* 137 * crit_enter 138 */ 139 static __inline void 140 _crit_enter_quick(thread_t td __DEBUG_CRIT_ADD_ARG__) 141 { 142 ++td->td_critcount; 143 __DEBUG_CRIT_ENTER(td); 144 cpu_ccfence(); 145 } 146 147 static __inline void 148 _crit_enter(globaldata_t gd __DEBUG_CRIT_ADD_ARG__) 149 { 150 _crit_enter_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__); 151 } 152 153 static __inline void 154 _crit_enter_hard(globaldata_t gd __DEBUG_CRIT_ADD_ARG__) 155 { 156 _crit_enter_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__); 157 ++gd->gd_intr_nesting_level; 158 } 159 160 161 /* 162 * crit_exit*() 163 * 164 * NOTE: Conditionalizing just gd_reqflags, a case which is virtually 165 * never true regardless of crit_count, should result in 100% 166 * optimal code execution. We don't check crit_count because 167 * it just bloats the inline and does not improve performance. 168 * 169 * NOTE: This can produce a considerable amount of code despite the 170 * relatively few lines of code so the non-debug case typically 171 * just wraps it in a real function, crit_exit_wrapper(). 172 */ 173 static __inline void 174 _crit_exit_noyield(thread_t td __DEBUG_CRIT_ADD_ARG__) 175 { 176 __DEBUG_CRIT_EXIT(td); 177 --td->td_critcount; 178 #ifdef INVARIANTS 179 if (__predict_false(td->td_critcount < 0)) 180 crit_panic(); 181 #endif 182 cpu_ccfence(); /* prevent compiler reordering */ 183 } 184 185 static __inline void 186 _crit_exit_quick(thread_t td __DEBUG_CRIT_ADD_ARG__) 187 { 188 _crit_exit_noyield(td __DEBUG_CRIT_PASS_ARG__); 189 if (__predict_false(td->td_gd->gd_reqflags & RQF_IDLECHECK_MASK)) 190 lwkt_maybe_splz(td); 191 } 192 193 static __inline void 194 _crit_exit(globaldata_t gd __DEBUG_CRIT_ADD_ARG__) 195 { 196 _crit_exit_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__); 197 } 198 199 static __inline void 200 _crit_exit_hard(globaldata_t gd __DEBUG_CRIT_ADD_ARG__) 201 { 202 --gd->gd_intr_nesting_level; 203 _crit_exit_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__); 204 } 205 206 static __inline int 207 crit_test(thread_t td) 208 { 209 return(td->td_critcount); 210 } 211 212 /* 213 * Return whether any threads are runnable. 214 */ 215 static __inline int 216 lwkt_runnable(void) 217 { 218 return (TAILQ_FIRST(&mycpu->gd_tdrunq) != NULL); 219 } 220 221 static __inline int 222 lwkt_getpri(thread_t td) 223 { 224 return(td->td_pri); 225 } 226 227 static __inline int 228 lwkt_getpri_self(void) 229 { 230 return(lwkt_getpri(curthread)); 231 } 232 233 /* 234 * Reduce our priority in preparation for a return to userland. If 235 * our passive release function was still in place, our priority was 236 * never raised and does not need to be reduced. 237 * 238 * See also lwkt_passive_release() and platform/blah/trap.c 239 */ 240 static __inline void 241 lwkt_passive_recover(thread_t td) 242 { 243 if (td->td_release == NULL) 244 lwkt_setpri_self(TDPRI_USER_NORM); 245 td->td_release = NULL; 246 } 247 248 /* 249 * cpusync support 250 */ 251 static __inline void 252 lwkt_cpusync_init(lwkt_cpusync_t cs, cpumask_t mask, 253 cpusync_func_t func, void *data) 254 { 255 cs->cs_mask = mask; 256 /* cs->cs_mack = 0; handled by _interlock */ 257 cs->cs_func = func; 258 cs->cs_data = data; 259 } 260 261 #ifdef SMP 262 263 /* 264 * IPIQ messaging wrappers. IPIQ remote functions are passed three arguments: 265 * a void * pointer, an integer, and a pointer to the trap frame (or NULL if 266 * the trap frame is not known). However, we wish to provide opaque 267 * interfaces for simpler callbacks... the basic IPI messaging function as 268 * used by the kernel takes a single argument. 269 */ 270 static __inline int 271 lwkt_send_ipiq(globaldata_t target, ipifunc1_t func, void *arg) 272 { 273 return(lwkt_send_ipiq3(target, (ipifunc3_t)func, arg, 0)); 274 } 275 276 static __inline int 277 lwkt_send_ipiq2(globaldata_t target, ipifunc2_t func, void *arg1, int arg2) 278 { 279 return(lwkt_send_ipiq3(target, (ipifunc3_t)func, arg1, arg2)); 280 } 281 282 static __inline int 283 lwkt_send_ipiq_mask(cpumask_t mask, ipifunc1_t func, void *arg) 284 { 285 return(lwkt_send_ipiq3_mask(mask, (ipifunc3_t)func, arg, 0)); 286 } 287 288 static __inline int 289 lwkt_send_ipiq2_mask(cpumask_t mask, ipifunc2_t func, void *arg1, int arg2) 290 { 291 return(lwkt_send_ipiq3_mask(mask, (ipifunc3_t)func, arg1, arg2)); 292 } 293 294 static __inline int 295 lwkt_send_ipiq_nowait(globaldata_t target, ipifunc1_t func, void *arg) 296 { 297 return(lwkt_send_ipiq3_nowait(target, (ipifunc3_t)func, arg, 0)); 298 } 299 300 static __inline int 301 lwkt_send_ipiq2_nowait(globaldata_t target, ipifunc2_t func, 302 void *arg1, int arg2) 303 { 304 return(lwkt_send_ipiq3_nowait(target, (ipifunc3_t)func, arg1, arg2)); 305 } 306 307 static __inline int 308 lwkt_send_ipiq_passive(globaldata_t target, ipifunc1_t func, void *arg) 309 { 310 return(lwkt_send_ipiq3_passive(target, (ipifunc3_t)func, arg, 0)); 311 } 312 313 static __inline int 314 lwkt_send_ipiq2_passive(globaldata_t target, ipifunc2_t func, 315 void *arg1, int arg2) 316 { 317 return(lwkt_send_ipiq3_passive(target, (ipifunc3_t)func, arg1, arg2)); 318 } 319 320 static __inline int 321 lwkt_send_ipiq_bycpu(int dcpu, ipifunc1_t func, void *arg) 322 { 323 return(lwkt_send_ipiq3_bycpu(dcpu, (ipifunc3_t)func, arg, 0)); 324 } 325 326 static __inline int 327 lwkt_send_ipiq2_bycpu(int dcpu, ipifunc2_t func, void *arg1, int arg2) 328 { 329 return(lwkt_send_ipiq3_bycpu(dcpu, (ipifunc3_t)func, arg1, arg2)); 330 } 331 332 #endif /* SMP */ 333 #endif /* _KERNEL */ 334 #endif /* _SYS_THREAD2_H_ */ 335 336