1 /* 2 * SYS/THREAD2.H 3 * 4 * Implements inline procedure support for the LWKT subsystem. 5 * 6 * Generally speaking these routines only operate on threads associated 7 * with the current cpu. For example, a higher priority thread pending 8 * on a different cpu will not be immediately scheduled by a yield() on 9 * this cpu. 10 * 11 * $DragonFly: src/sys/sys/thread2.h,v 1.17 2004/10/12 19:29:29 dillon Exp $ 12 */ 13 14 #ifndef _SYS_THREAD2_H_ 15 #define _SYS_THREAD2_H_ 16 17 /* 18 * Userland will have its own globaldata which it includes prior to this. 19 */ 20 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES) 21 #ifndef _SYS_GLOBALDATA_H_ 22 #include <sys/globaldata.h> 23 #endif 24 #ifndef _MACHINE_CPUFUNC_H_ 25 #include <machine/cpufunc.h> 26 #endif 27 #endif 28 29 /* 30 * Critical sections prevent preemption by raising a thread's priority 31 * above the highest possible interrupting priority. Additionally, the 32 * current cpu will not be able to schedule a new thread but will instead 33 * place it on a pending list (with interrupts physically disabled) and 34 * set mycpu->gd_reqflags to indicate that work needs to be done, which 35 * lwkt_yield_quick() takes care of. 36 * 37 * Some of these routines take a struct thread pointer as an argument. This 38 * pointer MUST be curthread and is only passed as an optimization. 39 * 40 * Synchronous switching and blocking is allowed while in a critical section. 41 */ 42 43 static __inline void 44 crit_enter(void) 45 { 46 struct thread *td = curthread; 47 48 td->td_pri += TDPRI_CRIT; 49 #ifdef INVARIANTS 50 if (td->td_pri < 0) 51 crit_panic(); 52 #endif 53 } 54 55 static __inline void 56 crit_enter_quick(struct thread *curtd) 57 { 58 curtd->td_pri += TDPRI_CRIT; 59 } 60 61 static __inline void 62 crit_enter_gd(globaldata_t mygd) 63 { 64 crit_enter_quick(mygd->gd_curthread); 65 } 66 67 static __inline void 68 crit_exit_noyield(struct thread *curtd) 69 { 70 curtd->td_pri -= TDPRI_CRIT; 71 #ifdef INVARIANTS 72 if (curtd->td_pri < 0) 73 crit_panic(); 74 #endif 75 } 76 77 static __inline void 78 crit_exit(void) 79 { 80 thread_t td = curthread; 81 82 td->td_pri -= TDPRI_CRIT; 83 #ifdef INVARIANTS 84 if (td->td_pri < 0) 85 crit_panic(); 86 #endif 87 cpu_mb1(); /* must flush td_pri before checking gd_reqflags */ 88 if (td->td_gd->gd_reqflags && td->td_pri < TDPRI_CRIT) 89 lwkt_yield_quick(); 90 } 91 92 static __inline void 93 crit_exit_quick(struct thread *curtd) 94 { 95 globaldata_t gd = curtd->td_gd; 96 97 curtd->td_pri -= TDPRI_CRIT; 98 cpu_mb1(); /* must flush td_pri before checking gd_reqflags */ 99 if (gd->gd_reqflags && curtd->td_pri < TDPRI_CRIT) 100 lwkt_yield_quick(); 101 } 102 103 static __inline void 104 crit_exit_gd(globaldata_t mygd) 105 { 106 crit_exit_quick(mygd->gd_curthread); 107 } 108 109 static __inline int 110 crit_panic_save(void) 111 { 112 thread_t td = curthread; 113 int pri = td->td_pri; 114 td->td_pri = td->td_pri & TDPRI_MASK; 115 return(pri); 116 } 117 118 static __inline void 119 crit_panic_restore(int cpri) 120 { 121 curthread->td_pri = cpri; 122 } 123 124 static __inline int 125 crit_test(thread_t td) 126 { 127 return(td->td_pri >= TDPRI_CRIT); 128 } 129 130 /* 131 * Initialize a tokref_t. We only need to initialize the token pointer 132 * and the magic number. We do not have to initialize tr_next, tr_gdreqnext, 133 * or tr_reqgd. 134 */ 135 static __inline void 136 lwkt_tokref_init(lwkt_tokref_t ref, lwkt_token_t tok) 137 { 138 ref->tr_magic = LWKT_TOKREF_MAGIC1; 139 ref->tr_tok = tok; 140 } 141 142 /* 143 * Return whether any threads are runnable, whether they meet mp_lock 144 * requirements or not. 145 */ 146 static __inline int 147 lwkt_runnable(void) 148 { 149 return (mycpu->gd_runqmask != 0); 150 } 151 152 static __inline int 153 lwkt_getpri(thread_t td) 154 { 155 return(td->td_pri & TDPRI_MASK); 156 } 157 158 static __inline int 159 lwkt_getpri_self(void) 160 { 161 return(lwkt_getpri(curthread)); 162 } 163 164 #endif 165 166