1 /*- 2 * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org> 3 * Copyright (c) 2001 Jason Evans <jasone@freebsd.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice(s), this list of conditions and the following disclaimer as 11 * the first lines of this file unmodified other than the possible 12 * addition of one or more copyright notices. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice(s), this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 21 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 24 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 27 * DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 32 #ifndef _SYS_SX_H_ 33 #define _SYS_SX_H_ 34 35 #include <sys/_lock.h> 36 #include <sys/_sx.h> 37 38 #ifdef _KERNEL 39 #include <sys/pcpu.h> 40 #include <sys/lock_profile.h> 41 #include <sys/lockstat.h> 42 #include <machine/atomic.h> 43 #endif 44 45 /* 46 * In general, the sx locks and rwlocks use very similar algorithms. 47 * The main difference in the implementations is how threads are 48 * blocked when a lock is unavailable. For this, sx locks use sleep 49 * queues which do not support priority propagation, and rwlocks use 50 * turnstiles which do. 51 * 52 * The sx_lock field consists of several fields. The low bit 53 * indicates if the lock is locked with a shared or exclusive lock. A 54 * value of 0 indicates an exclusive lock, and a value of 1 indicates 55 * a shared lock. Bit 1 is a boolean indicating if there are any 56 * threads waiting for a shared lock. Bit 2 is a boolean indicating 57 * if there are any threads waiting for an exclusive lock. Bit 3 is a 58 * boolean indicating if an exclusive lock is recursively held. The 59 * rest of the variable's definition is dependent on the value of the 60 * first bit. For an exclusive lock, it is a pointer to the thread 61 * holding the lock, similar to the mtx_lock field of mutexes. For 62 * shared locks, it is a count of read locks that are held. 63 * 64 * When the lock is not locked by any thread, it is encoded as a 65 * shared lock with zero waiters. 66 */ 67 68 #define SX_LOCK_SHARED 0x01 69 #define SX_LOCK_SHARED_WAITERS 0x02 70 #define SX_LOCK_EXCLUSIVE_WAITERS 0x04 71 #define SX_LOCK_RECURSED 0x08 72 #define SX_LOCK_FLAGMASK \ 73 (SX_LOCK_SHARED | SX_LOCK_SHARED_WAITERS | \ 74 SX_LOCK_EXCLUSIVE_WAITERS | SX_LOCK_RECURSED) 75 76 #define SX_OWNER(x) ((x) & ~SX_LOCK_FLAGMASK) 77 #define SX_SHARERS_SHIFT 4 78 #define SX_SHARERS(x) (SX_OWNER(x) >> SX_SHARERS_SHIFT) 79 #define SX_SHARERS_LOCK(x) \ 80 ((x) << SX_SHARERS_SHIFT | SX_LOCK_SHARED) 81 #define SX_ONE_SHARER (1 << SX_SHARERS_SHIFT) 82 83 #define SX_LOCK_UNLOCKED SX_SHARERS_LOCK(0) 84 #define SX_LOCK_DESTROYED \ 85 (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS) 86 87 #ifdef _KERNEL 88 89 #define sx_recurse lock_object.lo_data 90 91 /* 92 * Function prototipes. Routines that start with an underscore are not part 93 * of the public interface and are wrappered with a macro. 94 */ 95 void sx_sysinit(void *arg); 96 #define sx_init(sx, desc) sx_init_flags((sx), (desc), 0) 97 void sx_init_flags(struct sx *sx, const char *description, int opts); 98 void sx_destroy(struct sx *sx); 99 int sx_try_slock_(struct sx *sx, const char *file, int line); 100 int sx_try_xlock_(struct sx *sx, const char *file, int line); 101 int sx_try_upgrade_(struct sx *sx, const char *file, int line); 102 void sx_downgrade_(struct sx *sx, const char *file, int line); 103 int _sx_slock(struct sx *sx, int opts, const char *file, int line); 104 int _sx_xlock(struct sx *sx, int opts, const char *file, int line); 105 void _sx_sunlock(struct sx *sx, const char *file, int line); 106 void _sx_xunlock(struct sx *sx, const char *file, int line); 107 int _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, 108 const char *file, int line); 109 int _sx_slock_hard(struct sx *sx, int opts, const char *file, int line); 110 void _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int 111 line); 112 void _sx_sunlock_hard(struct sx *sx, const char *file, int line); 113 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 114 void _sx_assert(const struct sx *sx, int what, const char *file, int line); 115 #endif 116 #ifdef DDB 117 int sx_chain(struct thread *td, struct thread **ownerp); 118 #endif 119 120 struct sx_args { 121 struct sx *sa_sx; 122 const char *sa_desc; 123 int sa_flags; 124 }; 125 126 #define SX_SYSINIT_FLAGS(name, sxa, desc, flags) \ 127 static struct sx_args name##_args = { \ 128 (sxa), \ 129 (desc), \ 130 (flags) \ 131 }; \ 132 SYSINIT(name##_sx_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ 133 sx_sysinit, &name##_args); \ 134 SYSUNINIT(name##_sx_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ 135 sx_destroy, (sxa)) 136 137 #define SX_SYSINIT(name, sxa, desc) SX_SYSINIT_FLAGS(name, sxa, desc, 0) 138 139 /* 140 * Full lock operations that are suitable to be inlined in non-debug kernels. 141 * If the lock can't be acquired or released trivially then the work is 142 * deferred to 'tougher' functions. 143 */ 144 145 /* Acquire an exclusive lock. */ 146 static __inline int 147 __sx_xlock(struct sx *sx, struct thread *td, int opts, const char *file, 148 int line) 149 { 150 uintptr_t tid = (uintptr_t)td; 151 int error = 0; 152 153 if (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) 154 error = _sx_xlock_hard(sx, tid, opts, file, line); 155 else 156 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 157 0, 0, file, line, LOCKSTAT_WRITER); 158 159 return (error); 160 } 161 162 /* Release an exclusive lock. */ 163 static __inline void 164 __sx_xunlock(struct sx *sx, struct thread *td, const char *file, int line) 165 { 166 uintptr_t tid = (uintptr_t)td; 167 168 if (sx->sx_recurse == 0) 169 LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, 170 LOCKSTAT_WRITER); 171 if (!atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED)) 172 _sx_xunlock_hard(sx, tid, file, line); 173 } 174 175 /* Acquire a shared lock. */ 176 static __inline int 177 __sx_slock(struct sx *sx, int opts, const char *file, int line) 178 { 179 uintptr_t x = sx->sx_lock; 180 int error = 0; 181 182 if (!(x & SX_LOCK_SHARED) || 183 !atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER)) 184 error = _sx_slock_hard(sx, opts, file, line); 185 else 186 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 187 0, 0, file, line, LOCKSTAT_READER); 188 189 return (error); 190 } 191 192 /* 193 * Release a shared lock. We can just drop a single shared lock so 194 * long as we aren't trying to drop the last shared lock when other 195 * threads are waiting for an exclusive lock. This takes advantage of 196 * the fact that an unlocked lock is encoded as a shared lock with a 197 * count of 0. 198 */ 199 static __inline void 200 __sx_sunlock(struct sx *sx, const char *file, int line) 201 { 202 uintptr_t x = sx->sx_lock; 203 204 LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_READER); 205 if (x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS) || 206 !atomic_cmpset_rel_ptr(&sx->sx_lock, x, x - SX_ONE_SHARER)) 207 _sx_sunlock_hard(sx, file, line); 208 } 209 210 /* 211 * Public interface for lock operations. 212 */ 213 #ifndef LOCK_DEBUG 214 #error "LOCK_DEBUG not defined, include <sys/lock.h> before <sys/sx.h>" 215 #endif 216 #if (LOCK_DEBUG > 0) || defined(SX_NOINLINE) 217 #define sx_xlock_(sx, file, line) \ 218 (void)_sx_xlock((sx), 0, (file), (line)) 219 #define sx_xlock_sig_(sx, file, line) \ 220 _sx_xlock((sx), SX_INTERRUPTIBLE, (file), (line)) 221 #define sx_xunlock_(sx, file, line) \ 222 _sx_xunlock((sx), (file), (line)) 223 #define sx_slock_(sx, file, line) \ 224 (void)_sx_slock((sx), 0, (file), (line)) 225 #define sx_slock_sig_(sx, file, line) \ 226 _sx_slock((sx), SX_INTERRUPTIBLE, (file) , (line)) 227 #define sx_sunlock_(sx, file, line) \ 228 _sx_sunlock((sx), (file), (line)) 229 #else 230 #define sx_xlock_(sx, file, line) \ 231 (void)__sx_xlock((sx), curthread, 0, (file), (line)) 232 #define sx_xlock_sig_(sx, file, line) \ 233 __sx_xlock((sx), curthread, SX_INTERRUPTIBLE, (file), (line)) 234 #define sx_xunlock_(sx, file, line) \ 235 __sx_xunlock((sx), curthread, (file), (line)) 236 #define sx_slock_(sx, file, line) \ 237 (void)__sx_slock((sx), 0, (file), (line)) 238 #define sx_slock_sig_(sx, file, line) \ 239 __sx_slock((sx), SX_INTERRUPTIBLE, (file), (line)) 240 #define sx_sunlock_(sx, file, line) \ 241 __sx_sunlock((sx), (file), (line)) 242 #endif /* LOCK_DEBUG > 0 || SX_NOINLINE */ 243 #define sx_try_slock(sx) sx_try_slock_((sx), LOCK_FILE, LOCK_LINE) 244 #define sx_try_xlock(sx) sx_try_xlock_((sx), LOCK_FILE, LOCK_LINE) 245 #define sx_try_upgrade(sx) sx_try_upgrade_((sx), LOCK_FILE, LOCK_LINE) 246 #define sx_downgrade(sx) sx_downgrade_((sx), LOCK_FILE, LOCK_LINE) 247 #ifdef INVARIANTS 248 #define sx_assert_(sx, what, file, line) \ 249 _sx_assert((sx), (what), (file), (line)) 250 #else 251 #define sx_assert_(sx, what, file, line) (void)0 252 #endif 253 254 #define sx_xlock(sx) sx_xlock_((sx), LOCK_FILE, LOCK_LINE) 255 #define sx_xlock_sig(sx) sx_xlock_sig_((sx), LOCK_FILE, LOCK_LINE) 256 #define sx_xunlock(sx) sx_xunlock_((sx), LOCK_FILE, LOCK_LINE) 257 #define sx_slock(sx) sx_slock_((sx), LOCK_FILE, LOCK_LINE) 258 #define sx_slock_sig(sx) sx_slock_sig_((sx), LOCK_FILE, LOCK_LINE) 259 #define sx_sunlock(sx) sx_sunlock_((sx), LOCK_FILE, LOCK_LINE) 260 #define sx_assert(sx, what) sx_assert_((sx), (what), __FILE__, __LINE__) 261 262 /* 263 * Return a pointer to the owning thread if the lock is exclusively 264 * locked. 265 */ 266 #define sx_xholder(sx) \ 267 ((sx)->sx_lock & SX_LOCK_SHARED ? NULL : \ 268 (struct thread *)SX_OWNER((sx)->sx_lock)) 269 270 #define sx_xlocked(sx) \ 271 (((sx)->sx_lock & ~(SX_LOCK_FLAGMASK & ~SX_LOCK_SHARED)) == \ 272 (uintptr_t)curthread) 273 274 #define sx_unlock_(sx, file, line) do { \ 275 if (sx_xlocked(sx)) \ 276 sx_xunlock_(sx, file, line); \ 277 else \ 278 sx_sunlock_(sx, file, line); \ 279 } while (0) 280 281 #define sx_unlock(sx) sx_unlock_((sx), LOCK_FILE, LOCK_LINE) 282 283 #define sx_sleep(chan, sx, pri, wmesg, timo) \ 284 _sleep((chan), &(sx)->lock_object, (pri), (wmesg), \ 285 tick_sbt * (timo), 0, C_HARDCLOCK) 286 287 /* 288 * Options passed to sx_init_flags(). 289 */ 290 #define SX_DUPOK 0x01 291 #define SX_NOPROFILE 0x02 292 #define SX_NOWITNESS 0x04 293 #define SX_QUIET 0x08 294 #define SX_NOADAPTIVE 0x10 295 #define SX_RECURSE 0x20 296 #define SX_NEW 0x40 297 298 /* 299 * Options passed to sx_*lock_hard(). 300 */ 301 #define SX_INTERRUPTIBLE 0x40 302 303 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 304 #define SA_LOCKED LA_LOCKED 305 #define SA_SLOCKED LA_SLOCKED 306 #define SA_XLOCKED LA_XLOCKED 307 #define SA_UNLOCKED LA_UNLOCKED 308 #define SA_RECURSED LA_RECURSED 309 #define SA_NOTRECURSED LA_NOTRECURSED 310 311 /* Backwards compatability. */ 312 #define SX_LOCKED LA_LOCKED 313 #define SX_SLOCKED LA_SLOCKED 314 #define SX_XLOCKED LA_XLOCKED 315 #define SX_UNLOCKED LA_UNLOCKED 316 #define SX_RECURSED LA_RECURSED 317 #define SX_NOTRECURSED LA_NOTRECURSED 318 #endif 319 320 #endif /* _KERNEL */ 321 322 #endif /* !_SYS_SX_H_ */ 323