1 /* 2 * SYS/MPLOCK2.H 3 * 4 * Implement the MP lock. Note that debug operations 5 */ 6 #ifndef _SYS_MPLOCK2_H_ 7 #define _SYS_MPLOCK2_H_ 8 9 #ifndef _MACHINE_ATOMIC_H_ 10 #include <machine/atomic.h> 11 #endif 12 #ifndef _SYS_THREAD_H_ 13 #include <sys/thread.h> 14 #endif 15 #ifndef _SYS_GLOBALDATA_H_ 16 #include <sys/globaldata.h> 17 #endif 18 19 #ifdef SMP 20 21 #define get_mplock() get_mplock_debug(__FILE__, __LINE__) 22 #define try_mplock() try_mplock_debug(__FILE__, __LINE__) 23 #define cpu_try_mplock() cpu_try_mplock_debug(__FILE__, __LINE__) 24 25 void _get_mplock_contested(const char *file, int line); 26 void _try_mplock_contested(const char *file, int line); 27 void _cpu_try_mplock_contested(const char *file, int line); 28 void _rel_mplock_contested(void); 29 void cpu_get_initial_mplock(void); 30 void cpu_mplock_contested(void); 31 void yield_mplock(struct thread *td); 32 33 extern int mp_lock; 34 extern int mp_lock_contention_mask; 35 extern const char *mp_lock_holder_file; 36 extern int mp_lock_holder_line; 37 38 /* 39 * Acquire the MP lock, block until we get it. 40 * 41 * In order to acquire the MP lock we must first pre-dispose td_mpcount 42 * for the acquisition and then get the actual lock. 43 * 44 * The contested function is called only if we do not have or are unable 45 * to acquire the actual lock. It will not return until the lock has 46 * been acquired. 47 */ 48 static __inline 49 void 50 get_mplock_debug(const char *file, int line) 51 { 52 globaldata_t gd = mycpu; 53 thread_t td = gd->gd_curthread; 54 55 ++td->td_mpcount; 56 if (mp_lock != gd->gd_cpuid) { 57 if (atomic_cmpset_int(&mp_lock, -1, gd->gd_cpuid) == 0) 58 _get_mplock_contested(file, line); 59 #ifdef INVARIANTS 60 mp_lock_holder_file = file; 61 mp_lock_holder_line = line; 62 #endif 63 } 64 } 65 66 /* 67 * Release the MP lock 68 * 69 * In order to release the MP lock we must first pre-dispose td_mpcount 70 * for the release and then, if it is 0, release the actual lock. 71 * 72 * The contested function is called only if we are unable to release the 73 * Actual lock. This can occur if we raced an interrupt after decrementing 74 * td_mpcount to 0 and the interrupt acquired and released the lock. 75 * 76 * The function also catches the td_mpcount underflow case because the 77 * lock will be in a released state and thus fail the subsequent release. 78 */ 79 static __inline 80 void 81 rel_mplock(void) 82 { 83 globaldata_t gd = mycpu; 84 thread_t td = gd->gd_curthread; 85 int n; 86 87 n = --td->td_mpcount; 88 if (n <= 0 && atomic_cmpset_int(&mp_lock, gd->gd_cpuid, -1) == 0) 89 _rel_mplock_contested(); 90 } 91 92 /* 93 * Attempt to acquire the MP lock, returning 0 on failure and 1 on success. 94 * 95 * The contested function is called on failure and typically serves simply 96 * to log the attempt (if debugging enabled). 97 */ 98 static __inline 99 int 100 try_mplock_debug(const char *file, int line) 101 { 102 globaldata_t gd = mycpu; 103 thread_t td = gd->gd_curthread; 104 105 ++td->td_mpcount; 106 if (mp_lock != gd->gd_cpuid && 107 atomic_cmpset_int(&mp_lock, -1, gd->gd_cpuid) == 0) { 108 _try_mplock_contested(file, line); 109 return(0); 110 } 111 #ifdef INVARIANTS 112 mp_lock_holder_file = file; 113 mp_lock_holder_line = line; 114 #endif 115 return(1); 116 } 117 118 /* 119 * Low level acquisition of the MP lock ignoring curthred->td_mpcount 120 * 121 * This version of try_mplock() is used when the caller has already 122 * predisposed td->td_mpcount. 123 * 124 * Returns non-zero on success, 0 on failure. 125 * 126 * WARNING: Must be called from within a critical section if td_mpcount is 127 * zero, otherwise an itnerrupt race can cause the lock to be lost. 128 */ 129 static __inline 130 int 131 cpu_try_mplock_debug(const char *file, int line) 132 { 133 globaldata_t gd = mycpu; 134 135 if (mp_lock != gd->gd_cpuid && 136 atomic_cmpset_int(&mp_lock, -1, gd->gd_cpuid) == 0) { 137 _cpu_try_mplock_contested(file, line); 138 return(0); 139 } 140 #ifdef INVARIANTS 141 mp_lock_holder_file = file; 142 mp_lock_holder_line = line; 143 #endif 144 return(1); 145 } 146 147 /* 148 * A cpu wanted the MP lock but could not get it. This function is also 149 * called directly from the LWKT scheduler. 150 * 151 * Reentrant, may be called even if the cpu is already contending the MP 152 * lock. 153 */ 154 static __inline 155 void 156 set_mplock_contention_mask(globaldata_t gd) 157 { 158 atomic_set_int(&mp_lock_contention_mask, gd->gd_cpumask); 159 } 160 161 /* 162 * A cpu is no longer contending for the MP lock after previously contending 163 * for it. 164 * 165 * Reentrant, may be called even if the cpu was not previously contending 166 * the MP lock. 167 */ 168 static __inline 169 void 170 clr_mplock_contention_mask(globaldata_t gd) 171 { 172 atomic_clear_int(&mp_lock_contention_mask, gd->gd_cpumask); 173 } 174 175 static __inline 176 int 177 owner_mplock(void) 178 { 179 return (mp_lock); 180 } 181 182 /* 183 * Low level release of the MP lock ignoring curthread->td_mpcount 184 * 185 * WARNING: Caller must be in a critical section, otherwise the 186 * mp_lock can be lost from an interrupt race and we would 187 * end up clearing someone else's lock. 188 */ 189 static __inline void 190 cpu_rel_mplock(void) 191 { 192 mp_lock = -1; 193 } 194 195 #define MP_LOCK_HELD() \ 196 (mp_lock == mycpu->gd_cpuid) 197 #define ASSERT_MP_LOCK_HELD(td) \ 198 KASSERT(MP_LOCK_HELD(), ("MP_LOCK_HELD: Not held thread %p", td)) 199 200 #else 201 202 /* 203 * UNI-PROCESSOR BUILD - Degenerate case macros 204 */ 205 #define get_mplock() 206 #define rel_mplock() 207 #define try_mplock() 1 208 #define owner_mplock() 0 209 #define MP_LOCK_HELD() (!0) 210 #define ASSERT_MP_LOCK_HELD(td) 211 212 #endif 213 214 #endif 215