1 /* 2 * SYS/THREAD2.H 3 * 4 * Implements inline procedure support for the LWKT subsystem. 5 * 6 * Generally speaking these routines only operate on threads associated 7 * with the current cpu. For example, a higher priority thread pending 8 * on a different cpu will not be immediately scheduled by a yield() on 9 * this cpu. 10 */ 11 12 #ifndef _SYS_THREAD2_H_ 13 #define _SYS_THREAD2_H_ 14 15 #ifndef _KERNEL 16 17 #error "This file should not be included by userland programs." 18 19 #else 20 21 /* 22 * Userland will have its own globaldata which it includes prior to this. 23 */ 24 #ifndef _SYS_SYSTM_H_ 25 #include <sys/systm.h> 26 #endif 27 #ifndef _SYS_GLOBALDATA_H_ 28 #include <sys/globaldata.h> 29 #endif 30 #include <machine/cpufunc.h> 31 #include <machine/cpumask.h> 32 33 /* 34 * Is a token held either by the specified thread or held shared? 35 * 36 * We can't inexpensively validate the thread for a shared token 37 * without iterating td->td_toks, so this isn't a perfect test. 38 */ 39 static __inline int 40 _lwkt_token_held_any(lwkt_token_t tok, thread_t td) 41 { 42 long count = tok->t_count; 43 44 cpu_ccfence(); 45 if (tok->t_ref >= &td->td_toks_base && tok->t_ref < td->td_toks_stop) 46 return TRUE; 47 if ((count & TOK_EXCLUSIVE) == 0 && 48 (count & ~(TOK_EXCLUSIVE|TOK_EXCLREQ))) { 49 return TRUE; 50 } 51 return FALSE; 52 } 53 54 /* 55 * Is a token held by the specified thread? 56 */ 57 static __inline int 58 _lwkt_token_held_excl(lwkt_token_t tok, thread_t td) 59 { 60 return ((tok->t_ref >= &td->td_toks_base && 61 tok->t_ref < td->td_toks_stop)); 62 } 63 64 /* 65 * Critical section debugging 66 */ 67 #ifdef DEBUG_CRIT_SECTIONS 68 #define __DEBUG_CRIT_ARG__ const char *id 69 #define __DEBUG_CRIT_ADD_ARG__ , const char *id 70 #define __DEBUG_CRIT_PASS_ARG__ , id 71 #define __DEBUG_CRIT_ENTER(td) _debug_crit_enter((td), id) 72 #define __DEBUG_CRIT_EXIT(td) _debug_crit_exit((td), id) 73 #define crit_enter() _crit_enter(mycpu, __func__) 74 #define crit_enter_id(id) _crit_enter(mycpu, id) 75 #define crit_enter_gd(curgd) _crit_enter((curgd), __func__) 76 #define crit_enter_quick(curtd) _crit_enter_quick((curtd), __func__) 77 #define crit_enter_hard() _crit_enter_hard(mycpu, __func__) 78 #define crit_enter_hard_gd(curgd) _crit_enter_hard((curgd), __func__) 79 #define crit_exit() _crit_exit(mycpu, __func__) 80 #define crit_exit_id(id) _crit_exit(mycpu, id) 81 #define crit_exit_gd(curgd) _crit_exit((curgd), __func__) 82 #define crit_exit_quick(curtd) _crit_exit_quick((curtd), __func__) 83 #define crit_exit_hard() _crit_exit_hard(mycpu, __func__) 84 #define crit_exit_hard_gd(curgd) _crit_exit_hard((curgd), __func__) 85 #define crit_exit_noyield(curtd) _crit_exit_noyield((curtd),__func__) 86 #else 87 #define __DEBUG_CRIT_ARG__ void 88 #define __DEBUG_CRIT_ADD_ARG__ 89 #define __DEBUG_CRIT_PASS_ARG__ 90 #define __DEBUG_CRIT_ENTER(td) 91 #define __DEBUG_CRIT_EXIT(td) 92 #define crit_enter() _crit_enter(mycpu) 93 #define crit_enter_id(id) _crit_enter(mycpu) 94 #define crit_enter_gd(curgd) _crit_enter((curgd)) 95 #define crit_enter_quick(curtd) _crit_enter_quick((curtd)) 96 #define crit_enter_hard() _crit_enter_hard(mycpu) 97 #define crit_enter_hard_gd(curgd) _crit_enter_hard((curgd)) 98 #define crit_exit() crit_exit_wrapper() 99 #define crit_exit_id(id) _crit_exit(mycpu) 100 #define crit_exit_gd(curgd) _crit_exit((curgd)) 101 #define crit_exit_quick(curtd) _crit_exit_quick((curtd)) 102 #define crit_exit_hard() _crit_exit_hard(mycpu) 103 #define crit_exit_hard_gd(curgd) _crit_exit_hard((curgd)) 104 #define crit_exit_noyield(curtd) _crit_exit_noyield((curtd)) 105 #endif 106 107 extern void crit_exit_wrapper(__DEBUG_CRIT_ARG__); 108 109 /* 110 * Track crit_enter()/crit_exit() pairs and warn on mismatches. 111 */ 112 #ifdef DEBUG_CRIT_SECTIONS 113 114 static __inline void 115 _debug_crit_enter(thread_t td, const char *id) 116 { 117 int wi = td->td_crit_debug_index; 118 119 td->td_crit_debug_array[wi & CRIT_DEBUG_ARRAY_MASK] = id; 120 ++td->td_crit_debug_index; 121 } 122 123 static __inline void 124 _debug_crit_exit(thread_t td, const char *id) 125 { 126 const char *gid; 127 int wi; 128 129 wi = td->td_crit_debug_index - 1; 130 if ((gid = td->td_crit_debug_array[wi & CRIT_DEBUG_ARRAY_MASK]) != id) { 131 if (td->td_in_crit_report == 0) { 132 td->td_in_crit_report = 1; 133 kprintf("crit_exit(%s) expected id %s\n", id, gid); 134 td->td_in_crit_report = 0; 135 } 136 } 137 --td->td_crit_debug_index; 138 } 139 140 #endif 141 142 /* 143 * Critical sections prevent preemption, but allowing explicit blocking 144 * and thread switching. Any interrupt occuring while in a critical 145 * section is made pending and returns immediately. Interrupts are not 146 * physically disabled. 147 * 148 * Hard critical sections prevent preemption and disallow any blocking 149 * or thread switching, and in addition will assert on any blockable 150 * operation (acquire token not already held, lockmgr, mutex ops, or 151 * splz). Spinlocks can still be used in hard sections. 152 * 153 * All critical section routines only operate on the current thread. 154 * Passed gd or td arguments are simply optimizations when mycpu or 155 * curthread is already available to the caller. 156 */ 157 158 /* 159 * crit_enter 160 */ 161 static __inline void 162 _crit_enter_quick(thread_t td __DEBUG_CRIT_ADD_ARG__) 163 { 164 ++td->td_critcount; 165 __DEBUG_CRIT_ENTER(td); 166 cpu_ccfence(); 167 } 168 169 static __inline void 170 _crit_enter(globaldata_t gd __DEBUG_CRIT_ADD_ARG__) 171 { 172 _crit_enter_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__); 173 } 174 175 static __inline void 176 _crit_enter_hard(globaldata_t gd __DEBUG_CRIT_ADD_ARG__) 177 { 178 _crit_enter_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__); 179 ++gd->gd_intr_nesting_level; 180 } 181 182 183 /* 184 * crit_exit*() 185 * 186 * NOTE: Conditionalizing just gd_reqflags, a case which is virtually 187 * never true regardless of crit_count, should result in 100% 188 * optimal code execution. We don't check crit_count because 189 * it just bloats the inline and does not improve performance. 190 * 191 * NOTE: This can produce a considerable amount of code despite the 192 * relatively few lines of code so the non-debug case typically 193 * just wraps it in a real function, crit_exit_wrapper(). 194 */ 195 static __inline void 196 _crit_exit_noyield(thread_t td __DEBUG_CRIT_ADD_ARG__) 197 { 198 __DEBUG_CRIT_EXIT(td); 199 --td->td_critcount; 200 #ifdef INVARIANTS 201 if (__predict_false(td->td_critcount < 0)) 202 crit_panic(); 203 #endif 204 cpu_ccfence(); /* prevent compiler reordering */ 205 } 206 207 static __inline void 208 _crit_exit_quick(thread_t td __DEBUG_CRIT_ADD_ARG__) 209 { 210 _crit_exit_noyield(td __DEBUG_CRIT_PASS_ARG__); 211 if (__predict_false(td->td_gd->gd_reqflags & RQF_IDLECHECK_MASK)) 212 lwkt_maybe_splz(td); 213 } 214 215 static __inline void 216 _crit_exit(globaldata_t gd __DEBUG_CRIT_ADD_ARG__) 217 { 218 _crit_exit_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__); 219 } 220 221 static __inline void 222 _crit_exit_hard(globaldata_t gd __DEBUG_CRIT_ADD_ARG__) 223 { 224 --gd->gd_intr_nesting_level; 225 _crit_exit_quick(gd->gd_curthread __DEBUG_CRIT_PASS_ARG__); 226 } 227 228 static __inline int 229 crit_test(thread_t td) 230 { 231 return(td->td_critcount); 232 } 233 234 /* 235 * Return whether any threads are runnable. 236 */ 237 static __inline int 238 lwkt_runnable(void) 239 { 240 return (TAILQ_FIRST(&mycpu->gd_tdrunq) != NULL); 241 } 242 243 static __inline int 244 lwkt_getpri(thread_t td) 245 { 246 return(td->td_pri); 247 } 248 249 static __inline int 250 lwkt_getpri_self(void) 251 { 252 return(lwkt_getpri(curthread)); 253 } 254 255 /* 256 * Reduce our priority in preparation for a return to userland. If 257 * our passive release function was still in place, our priority was 258 * never raised and does not need to be reduced. 259 * 260 * See also lwkt_passive_release() and platform/blah/trap.c 261 */ 262 static __inline void 263 lwkt_passive_recover(thread_t td) 264 { 265 #ifndef NO_LWKT_SPLIT_USERPRI 266 if (td->td_release == NULL) 267 lwkt_setpri_self(TDPRI_USER_NORM); 268 td->td_release = NULL; 269 #endif 270 } 271 272 /* 273 * cpusync support 274 */ 275 static __inline void 276 lwkt_cpusync_init(lwkt_cpusync_t cs, cpumask_t mask, 277 cpusync_func_t func, void *data) 278 { 279 cs->cs_mask = mask; 280 /* cs->cs_mack = 0; handled by _interlock */ 281 cs->cs_func = func; 282 cs->cs_data = data; 283 } 284 285 /* 286 * IPIQ messaging wrappers. IPIQ remote functions are passed three arguments: 287 * a void * pointer, an integer, and a pointer to the trap frame (or NULL if 288 * the trap frame is not known). However, we wish to provide opaque 289 * interfaces for simpler callbacks... the basic IPI messaging function as 290 * used by the kernel takes a single argument. 291 */ 292 static __inline int 293 lwkt_send_ipiq(globaldata_t target, ipifunc1_t func, void *arg) 294 { 295 return(lwkt_send_ipiq3(target, (ipifunc3_t)func, arg, 0)); 296 } 297 298 static __inline int 299 lwkt_send_ipiq2(globaldata_t target, ipifunc2_t func, void *arg1, int arg2) 300 { 301 return(lwkt_send_ipiq3(target, (ipifunc3_t)func, arg1, arg2)); 302 } 303 304 static __inline int 305 lwkt_send_ipiq_mask(cpumask_t mask, ipifunc1_t func, void *arg) 306 { 307 return(lwkt_send_ipiq3_mask(mask, (ipifunc3_t)func, arg, 0)); 308 } 309 310 static __inline int 311 lwkt_send_ipiq2_mask(cpumask_t mask, ipifunc2_t func, void *arg1, int arg2) 312 { 313 return(lwkt_send_ipiq3_mask(mask, (ipifunc3_t)func, arg1, arg2)); 314 } 315 316 static __inline int 317 lwkt_send_ipiq_passive(globaldata_t target, ipifunc1_t func, void *arg) 318 { 319 return(lwkt_send_ipiq3_passive(target, (ipifunc3_t)func, arg, 0)); 320 } 321 322 static __inline int 323 lwkt_send_ipiq2_passive(globaldata_t target, ipifunc2_t func, 324 void *arg1, int arg2) 325 { 326 return(lwkt_send_ipiq3_passive(target, (ipifunc3_t)func, arg1, arg2)); 327 } 328 329 static __inline int 330 lwkt_send_ipiq_bycpu(int dcpu, ipifunc1_t func, void *arg) 331 { 332 return(lwkt_send_ipiq3_bycpu(dcpu, (ipifunc3_t)func, arg, 0)); 333 } 334 335 static __inline int 336 lwkt_send_ipiq2_bycpu(int dcpu, ipifunc2_t func, void *arg1, int arg2) 337 { 338 return(lwkt_send_ipiq3_bycpu(dcpu, (ipifunc3_t)func, arg1, arg2)); 339 } 340 341 static __inline int 342 lwkt_need_ipiq_process(globaldata_t gd) 343 { 344 lwkt_ipiq_t ipiq; 345 346 if (CPUMASK_TESTNZERO(gd->gd_ipimask)) 347 return 1; 348 349 ipiq = &gd->gd_cpusyncq; 350 return (ipiq->ip_rindex != ipiq->ip_windex); 351 } 352 353 #endif /* _KERNEL */ 354 #endif /* _SYS_THREAD2_H_ */ 355 356