1 /* 2 * SYS/THREAD2.H 3 * 4 * Implements inline procedure support for the LWKT subsystem. 5 * 6 * Generally speaking these routines only operate on threads associated 7 * with the current cpu. For example, a higher priority thread pending 8 * on a different cpu will not be immediately scheduled by a yield() on 9 * this cpu. 10 * 11 * $DragonFly: src/sys/sys/thread2.h,v 1.28 2006/12/23 00:27:03 swildner Exp $ 12 */ 13 14 #ifndef _SYS_THREAD2_H_ 15 #define _SYS_THREAD2_H_ 16 17 #ifndef _KERNEL 18 19 #error "This file should not be included by userland programs." 20 21 #else 22 23 /* 24 * Userland will have its own globaldata which it includes prior to this. 25 */ 26 #ifndef _SYS_SYSTM_H_ 27 #include <sys/systm.h> 28 #endif 29 #ifndef _SYS_GLOBALDATA_H_ 30 #include <sys/globaldata.h> 31 #endif 32 #ifndef _MACHINE_CPUFUNC_H_ 33 #include <machine/cpufunc.h> 34 #endif 35 36 /* 37 * Critical section debugging 38 */ 39 #ifdef DEBUG_CRIT_SECTIONS 40 #define __DEBUG_CRIT_ARG__ const char *id 41 #define __DEBUG_CRIT_ADD_ARG__ , const char *id 42 #define __DEBUG_CRIT_PASS_ARG__ , id 43 #define __DEBUG_CRIT_ENTER(td) _debug_crit_enter((td), id) 44 #define __DEBUG_CRIT_EXIT(td) _debug_crit_exit((td), id) 45 #define crit_enter() _crit_enter(__FUNCTION__) 46 #define crit_enter_id(id) _crit_enter(id) 47 #define crit_enter_quick(curtd) _crit_enter_quick((curtd), __FUNCTION__) 48 #define crit_enter_gd(curgd) _crit_enter_gd(curgd, __FUNCTION__) 49 #define crit_exit() _crit_exit(__FUNCTION__) 50 #define crit_exit_id(id) _crit_exit(id) 51 #define crit_exit_quick(curtd) _crit_exit_quick((curtd), __FUNCTION__) 52 #define crit_exit_noyield(curtd) _crit_exit_noyield((curtd),__FUNCTION__) 53 #define crit_exit_gd(curgd) _crit_exit_gd((curgd), __FUNCTION__) 54 #else 55 #define __DEBUG_CRIT_ARG__ void 56 #define __DEBUG_CRIT_ADD_ARG__ 57 #define __DEBUG_CRIT_PASS_ARG__ 58 #define __DEBUG_CRIT_ENTER(td) 59 #define __DEBUG_CRIT_EXIT(td) 60 #define crit_enter() _crit_enter() 61 #define crit_enter_id(id) _crit_enter() 62 #define crit_enter_quick(curtd) _crit_enter_quick(curtd) 63 #define crit_enter_gd(curgd) _crit_enter_gd(curgd) 64 #define crit_exit() _crit_exit() 65 #define crit_exit_id(id) _crit_exit() 66 #define crit_exit_quick(curtd) _crit_exit_quick(curtd) 67 #define crit_exit_noyield(curtd) _crit_exit_noyield(curtd) 68 #define crit_exit_gd(curgd) _crit_exit_gd(curgd) 69 #endif 70 71 /* 72 * Track crit_enter()/crit_exit() pairs and warn on mismatches. 73 */ 74 #ifdef DEBUG_CRIT_SECTIONS 75 76 static __inline void 77 _debug_crit_enter(thread_t td, const char *id) 78 { 79 int wi = td->td_crit_debug_index; 80 81 td->td_crit_debug_array[wi & CRIT_DEBUG_ARRAY_MASK] = id; 82 ++td->td_crit_debug_index; 83 } 84 85 static __inline void 86 _debug_crit_exit(thread_t td, const char *id) 87 { 88 const char *gid; 89 int wi; 90 91 wi = td->td_crit_debug_index - 1; 92 if ((gid = td->td_crit_debug_array[wi & CRIT_DEBUG_ARRAY_MASK]) != id) { 93 if (td->td_in_crit_report == 0) { 94 td->td_in_crit_report = 1; 95 kprintf("crit_exit(%s) expected id %s\n", id, gid); 96 td->td_in_crit_report = 0; 97 } 98 } 99 --td->td_crit_debug_index; 100 } 101 102 #endif 103 104 /* 105 * Critical sections prevent preemption by raising a thread's priority 106 * above the highest possible interrupting priority. Additionally, the 107 * current cpu will not be able to schedule a new thread but will instead 108 * place it on a pending list (with interrupts physically disabled) and 109 * set mycpu->gd_reqflags to indicate that work needs to be done, which 110 * splz_check() takes care of. 111 * 112 * Some of these routines take a struct thread pointer as an argument. This 113 * pointer MUST be curthread and is only passed as an optimization. 114 * 115 * Synchronous switching and blocking is allowed while in a critical section. 116 */ 117 118 static __inline void 119 _crit_enter(__DEBUG_CRIT_ARG__) 120 { 121 struct thread *td = curthread; 122 123 #ifdef INVARIANTS 124 if (td->td_pri < 0) 125 crit_panic(); 126 #endif 127 td->td_pri += TDPRI_CRIT; 128 __DEBUG_CRIT_ENTER(td); 129 cpu_ccfence(); 130 } 131 132 static __inline void 133 _crit_enter_quick(struct thread *curtd __DEBUG_CRIT_ADD_ARG__) 134 { 135 curtd->td_pri += TDPRI_CRIT; 136 __DEBUG_CRIT_ENTER(curtd); 137 cpu_ccfence(); 138 } 139 140 static __inline void 141 _crit_enter_gd(globaldata_t mygd __DEBUG_CRIT_ADD_ARG__) 142 { 143 _crit_enter_quick(mygd->gd_curthread __DEBUG_CRIT_PASS_ARG__); 144 } 145 146 static __inline void 147 _crit_exit_noyield(struct thread *curtd __DEBUG_CRIT_ADD_ARG__) 148 { 149 __DEBUG_CRIT_EXIT(curtd); 150 curtd->td_pri -= TDPRI_CRIT; 151 #ifdef INVARIANTS 152 if (curtd->td_pri < 0) 153 crit_panic(); 154 #endif 155 cpu_ccfence(); /* prevent compiler reordering */ 156 } 157 158 static __inline void 159 _crit_exit(__DEBUG_CRIT_ARG__) 160 { 161 thread_t td = curthread; 162 163 __DEBUG_CRIT_EXIT(td); 164 td->td_pri -= TDPRI_CRIT; 165 #ifdef INVARIANTS 166 if (td->td_pri < 0) 167 crit_panic(); 168 #endif 169 cpu_ccfence(); /* prevent compiler reordering */ 170 if (td->td_gd->gd_reqflags && td->td_pri < TDPRI_CRIT) 171 splz_check(); 172 } 173 174 static __inline void 175 _crit_exit_quick(struct thread *curtd __DEBUG_CRIT_ADD_ARG__) 176 { 177 globaldata_t gd = curtd->td_gd; 178 179 __DEBUG_CRIT_EXIT(curtd); 180 curtd->td_pri -= TDPRI_CRIT; 181 cpu_ccfence(); /* prevent compiler reordering */ 182 if (gd->gd_reqflags && curtd->td_pri < TDPRI_CRIT) 183 splz_check(); 184 } 185 186 static __inline void 187 _crit_exit_gd(globaldata_t mygd __DEBUG_CRIT_ADD_ARG__) 188 { 189 _crit_exit_quick(mygd->gd_curthread __DEBUG_CRIT_PASS_ARG__); 190 } 191 192 static __inline int 193 crit_test(thread_t td) 194 { 195 return(td->td_pri >= TDPRI_CRIT); 196 } 197 198 /* 199 * Initialize a tokref_t. We only need to initialize the token pointer 200 * and the magic number. We do not have to initialize tr_next, tr_gdreqnext, 201 * or tr_reqgd. 202 */ 203 static __inline void 204 lwkt_tokref_init(lwkt_tokref_t ref, lwkt_token_t tok) 205 { 206 ref->tr_tok = tok; 207 ref->tr_state = 0; 208 } 209 210 /* 211 * Return whether any threads are runnable, whether they meet mp_lock 212 * requirements or not. 213 */ 214 static __inline int 215 lwkt_runnable(void) 216 { 217 return (mycpu->gd_runqmask != 0); 218 } 219 220 static __inline int 221 lwkt_getpri(thread_t td) 222 { 223 return(td->td_pri & TDPRI_MASK); 224 } 225 226 static __inline int 227 lwkt_getpri_self(void) 228 { 229 return(lwkt_getpri(curthread)); 230 } 231 232 /* 233 * Reduce our priority in preparation for a return to userland. If 234 * our passive release function was still in place, our priority was 235 * never raised and does not need to be reduced. 236 * 237 * See also lwkt_passive_release() and platform/blah/trap.c 238 */ 239 static __inline void 240 lwkt_passive_recover(thread_t td) 241 { 242 if (td->td_release == NULL) 243 lwkt_setpri_self(TDPRI_USER_NORM); 244 td->td_release = NULL; 245 } 246 247 #ifdef SMP 248 249 /* 250 * IPIQ messaging wrappers. IPIQ remote functions are passed three arguments: 251 * a void * pointer, an integer, and a pointer to the trap frame (or NULL if 252 * the trap frame is not known). However, we wish to provide opaque 253 * interfaces for simpler callbacks... the basic IPI messaging function as 254 * used by the kernel takes a single argument. 255 */ 256 static __inline int 257 lwkt_send_ipiq(globaldata_t target, ipifunc1_t func, void *arg) 258 { 259 return(lwkt_send_ipiq3(target, (ipifunc3_t)func, arg, 0)); 260 } 261 262 static __inline int 263 lwkt_send_ipiq2(globaldata_t target, ipifunc2_t func, void *arg1, int arg2) 264 { 265 return(lwkt_send_ipiq3(target, (ipifunc3_t)func, arg1, arg2)); 266 } 267 268 static __inline int 269 lwkt_send_ipiq_mask(u_int32_t mask, ipifunc1_t func, void *arg) 270 { 271 return(lwkt_send_ipiq3_mask(mask, (ipifunc3_t)func, arg, 0)); 272 } 273 274 static __inline int 275 lwkt_send_ipiq2_mask(u_int32_t mask, ipifunc2_t func, void *arg1, int arg2) 276 { 277 return(lwkt_send_ipiq3_mask(mask, (ipifunc3_t)func, arg1, arg2)); 278 } 279 280 static __inline int 281 lwkt_send_ipiq_nowait(globaldata_t target, ipifunc1_t func, void *arg) 282 { 283 return(lwkt_send_ipiq3_nowait(target, (ipifunc3_t)func, arg, 0)); 284 } 285 286 static __inline int 287 lwkt_send_ipiq2_nowait(globaldata_t target, ipifunc2_t func, 288 void *arg1, int arg2) 289 { 290 return(lwkt_send_ipiq3_nowait(target, (ipifunc3_t)func, arg1, arg2)); 291 } 292 293 static __inline int 294 lwkt_send_ipiq_passive(globaldata_t target, ipifunc1_t func, void *arg) 295 { 296 return(lwkt_send_ipiq3_passive(target, (ipifunc3_t)func, arg, 0)); 297 } 298 299 static __inline int 300 lwkt_send_ipiq2_passive(globaldata_t target, ipifunc2_t func, 301 void *arg1, int arg2) 302 { 303 return(lwkt_send_ipiq3_passive(target, (ipifunc3_t)func, arg1, arg2)); 304 } 305 306 static __inline int 307 lwkt_send_ipiq_bycpu(int dcpu, ipifunc1_t func, void *arg) 308 { 309 return(lwkt_send_ipiq3_bycpu(dcpu, (ipifunc3_t)func, arg, 0)); 310 } 311 312 static __inline int 313 lwkt_send_ipiq2_bycpu(int dcpu, ipifunc2_t func, void *arg1, int arg2) 314 { 315 return(lwkt_send_ipiq3_bycpu(dcpu, (ipifunc3_t)func, arg1, arg2)); 316 } 317 318 #endif /* SMP */ 319 #endif /* _KERNEL */ 320 #endif /* _SYS_THREAD2_H_ */ 321 322