xref: /dragonfly/sys/sys/mutex2.h (revision 8a0bcd56)
1 /*
2  * Copyright (c) 2009 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #ifndef	_SYS_MUTEX2_H_
36 #define	_SYS_MUTEX2_H_
37 
38 #ifndef _SYS_MUTEX_H_
39 #include <sys/mutex.h>
40 #endif
41 #ifndef _MACHINE_ATOMIC_H_
42 #include <machine/atomic.h>
43 #endif
44 
45 /*
46  * Initialize a new mutex, placing it in an unlocked state with no refs.
47  */
48 static __inline void
49 mtx_init(mtx_t mtx)
50 {
51 	mtx->mtx_lock = 0;
52 	mtx->mtx_refs = 0;
53 	mtx->mtx_owner = NULL;
54 	mtx->mtx_link = NULL;
55 }
56 
57 static __inline void
58 mtx_link_init(mtx_link_t link)
59 {
60 	link->state = MTX_LINK_IDLE;
61 }
62 
63 /*
64  * Deinitialize a mutex
65  */
66 static __inline void
67 mtx_uninit(mtx_t mtx)
68 {
69 	/* empty */
70 }
71 
72 /*
73  * Exclusive-lock a mutex, block until acquired or aborted.  Recursion
74  * is allowed.
75  *
76  * This version of the function allows the mtx_link to be passed in, thus
77  * giving the caller visibility for the link structure which is required
78  * when calling mtx_abort_ex_link().
79  *
80  * The mutex may be aborted at any time while the passed link structure
81  * is valid.
82  */
83 static __inline int
84 mtx_lock_ex_link(mtx_t mtx, struct mtx_link *link,
85                  const char *ident, int flags, int to)
86 {
87 	if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
88 		return(_mtx_lock_ex_link(mtx, link, ident, flags, to));
89 	mtx->mtx_owner = curthread;
90 	return(0);
91 }
92 
93 /*
94  * Short-form exclusive-lock a mutex, block until acquired.  Recursion is
95  * allowed.  This is equivalent to mtx_lock_ex(mtx, "mtxex", 0, 0).
96  */
97 static __inline void
98 mtx_lock(mtx_t mtx)
99 {
100 	if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) {
101 		_mtx_lock_ex(mtx, "mtxex", 0, 0);
102 		return;
103 	}
104 	mtx->mtx_owner = curthread;
105 }
106 
107 /*
108  * Exclusive-lock a mutex, block until acquired.  Recursion is allowed.
109  *
110  * Returns 0 on success, or the tsleep() return code on failure.
111  * An error can only be returned if PCATCH is specified in the flags.
112  */
113 static __inline int
114 mtx_lock_ex(mtx_t mtx, const char *ident, int flags, int to)
115 {
116 	if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
117 		return(_mtx_lock_ex(mtx, ident, flags, to));
118 	mtx->mtx_owner = curthread;
119 	return(0);
120 }
121 
122 static __inline int
123 mtx_lock_ex_quick(mtx_t mtx, const char *ident)
124 {
125 	if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
126 		return(_mtx_lock_ex_quick(mtx, ident));
127 	mtx->mtx_owner = curthread;
128 	return(0);
129 }
130 
131 /*
132  * Share-lock a mutex, block until acquired.  Recursion is allowed.
133  *
134  * Returns 0 on success, or the tsleep() return code on failure.
135  * An error can only be returned if PCATCH is specified in the flags.
136  */
137 static __inline int
138 mtx_lock_sh(mtx_t mtx, const char *ident, int flags, int to)
139 {
140 	if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
141 		return(_mtx_lock_sh(mtx, ident, flags, to));
142 	return(0);
143 }
144 
145 static __inline int
146 mtx_lock_sh_quick(mtx_t mtx, const char *ident)
147 {
148 	if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
149 		return(_mtx_lock_sh_quick(mtx, ident));
150 	return(0);
151 }
152 
153 /*
154  * Short-form exclusive-lock a mutex, spin until acquired.  Recursion is
155  * allowed.  This form is identical to mtx_spinlock_ex().
156  */
157 static __inline void
158 mtx_spinlock(mtx_t mtx)
159 {
160 	if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
161 		_mtx_spinlock_ex(mtx);
162 }
163 
164 /*
165  * Exclusive-lock a mutex, spin until acquired.  Recursion is allowed.
166  */
167 static __inline void
168 mtx_spinlock_ex(mtx_t mtx)
169 {
170 	if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
171 		_mtx_spinlock_ex(mtx);
172 }
173 
174 /*
175  * Share-lock a mutex, spin until acquired.  Recursion is allowed.
176  */
177 static __inline void
178 mtx_spinlock_sh(mtx_t mtx)
179 {
180 	if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
181 		_mtx_spinlock_sh(mtx);
182 }
183 
184 /*
185  * Attempt to exclusive-lock a mutex, return 0 on success and
186  * EAGAIN on failure.
187  */
188 static __inline int
189 mtx_lock_ex_try(mtx_t mtx)
190 {
191 	if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
192 		return (_mtx_lock_ex_try(mtx));
193 	mtx->mtx_owner = curthread;
194 	return (0);
195 }
196 
197 /*
198  * Attempt to share-lock a mutex, return 0 on success and
199  * EAGAIN on failure.
200  */
201 static __inline int
202 mtx_lock_sh_try(mtx_t mtx)
203 {
204 	if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
205 		return (_mtx_lock_sh_try(mtx));
206 	return (0);
207 }
208 
209 /*
210  * If the lock is held exclusively it must be owned by the caller.  If the
211  * lock is already a shared lock this operation is a NOP.    A panic will
212  * occur if the lock is not held either shared or exclusive.
213  *
214  * The exclusive count is converted to a shared count.
215  */
216 static __inline void
217 mtx_downgrade(mtx_t mtx)
218 {
219 	mtx->mtx_owner = NULL;
220 	if (atomic_cmpset_int(&mtx->mtx_lock, MTX_EXCLUSIVE | 1, 0) == 0)
221 		_mtx_downgrade(mtx);
222 }
223 
224 /*
225  * Upgrade a shared lock to an exclusive lock.  The upgrade will fail if
226  * the shared lock has a count other then 1.  Optimize the most likely case
227  * but note that a single cmpset can fail due to WANTED races.
228  *
229  * If the lock is held exclusively it must be owned by the caller and
230  * this function will simply return without doing anything.  A panic will
231  * occur if the lock is held exclusively by someone other then the caller.
232  *
233  * Returns 0 on success, EDEADLK on failure.
234  */
235 static __inline int
236 mtx_upgrade_try(mtx_t mtx)
237 {
238 	if (atomic_cmpset_int(&mtx->mtx_lock, 1, MTX_EXCLUSIVE | 1))
239 		return(0);
240 	return (_mtx_upgrade_try(mtx));
241 }
242 
243 /*
244  * Optimized unlock cases.
245  *
246  * NOTE: mtx_unlock() handles any type of mutex: exclusive, shared, and
247  *	 both blocking and spin methods.
248  *
249  *	 The mtx_unlock_ex/sh() forms are optimized for exclusive or shared
250  *	 mutexes and produce less code, but it is ok for code to just use
251  *	 mtx_unlock() and, in fact, if code uses the short-form mtx_lock()
252  *	 or mtx_spinlock() to lock it should also use mtx_unlock() to unlock.
253  */
254 static __inline void
255 mtx_unlock(mtx_t mtx)
256 {
257 	u_int lock = mtx->mtx_lock;
258 
259 	if (lock == (MTX_EXCLUSIVE | 1)) {
260 		mtx->mtx_owner = NULL;
261 		if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
262 			_mtx_unlock(mtx);
263 	} else if (lock == 1) {
264 		if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
265 			_mtx_unlock(mtx);
266 	} else {
267 		_mtx_unlock(mtx);
268 	}
269 }
270 
271 static __inline void
272 mtx_unlock_ex(mtx_t mtx)
273 {
274 	u_int lock = mtx->mtx_lock;
275 
276 	if (lock == (MTX_EXCLUSIVE | 1)) {
277 		mtx->mtx_owner = NULL;
278 		if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
279 			_mtx_unlock(mtx);
280 	} else {
281 		_mtx_unlock(mtx);
282 	}
283 }
284 
285 static __inline void
286 mtx_unlock_sh(mtx_t mtx)
287 {
288 	if (atomic_cmpset_int(&mtx->mtx_lock, 1, 0) == 0)
289 		_mtx_unlock(mtx);
290 }
291 
292 /*
293  * Return TRUE (non-zero) if the mutex is locked shared or exclusive by
294  * anyone, including the owner.
295  */
296 static __inline int
297 mtx_islocked(mtx_t mtx)
298 {
299 	return(mtx->mtx_lock != 0);
300 }
301 
302 /*
303  * Return TRUE (non-zero) if the mutex is locked exclusively by anyone,
304  * including the owner.
305  *
306  * The mutex may in an unlocked or shared lock state.
307  */
308 static __inline int
309 mtx_islocked_ex(mtx_t mtx)
310 {
311 	return((mtx->mtx_lock & MTX_EXCLUSIVE) != 0);
312 }
313 
314 /*
315  * Return TRUE (non-zero) if the mutex is not locked.
316  */
317 static __inline int
318 mtx_notlocked(mtx_t mtx)
319 {
320 	return(mtx->mtx_lock == 0);
321 }
322 
323 /*
324  * Return TRUE (non-zero) if the mutex is not locked exclusively.
325  * The mutex may in an unlocked or shared lock state.
326  */
327 static __inline int
328 mtx_notlocked_ex(mtx_t mtx)
329 {
330 	return((mtx->mtx_lock & MTX_EXCLUSIVE) != 0);
331 }
332 
333 /*
334  * Return TRUE (non-zero) if the mutex is exclusively locked by
335  * the caller.
336  */
337 static __inline int
338 mtx_owned(mtx_t mtx)
339 {
340 	return((mtx->mtx_lock & MTX_EXCLUSIVE) && mtx->mtx_owner == curthread);
341 }
342 
343 /*
344  * Return TRUE (non-zero) if the mutex is not exclusively locked by
345  * the caller.
346  */
347 static __inline int
348 mtx_notowned(mtx_t mtx)
349 {
350 	return((mtx->mtx_lock & MTX_EXCLUSIVE) == 0 ||
351 	       mtx->mtx_owner != curthread);
352 }
353 
354 /*
355  * Return the shared or exclusive lock count.  A return value of 0
356  * indicate that the mutex is not locked.
357  *
358  * NOTE: If the mutex is held exclusively by someone other then the
359  *	 caller the lock count for the other owner is still returned.
360  */
361 static __inline int
362 mtx_lockrefs(mtx_t mtx)
363 {
364 	return(mtx->mtx_lock & MTX_MASK);
365 }
366 
367 /*
368  * Bump the lock's ref count.  This field is independent of the lock.
369  */
370 static __inline void
371 mtx_hold(mtx_t mtx)
372 {
373 	atomic_add_acq_int(&mtx->mtx_refs, 1);
374 }
375 
376 /*
377  * Drop the lock's ref count.  This field is independent of the lock.
378  *
379  * Returns the previous ref count, interlocked so testing against
380  * 1 means you won the 1->0 transition
381  */
382 static __inline int
383 mtx_drop(mtx_t mtx)
384 {
385 	return (atomic_fetchadd_int(&mtx->mtx_refs, -1));
386 }
387 
388 #endif
389