xref: /dragonfly/sys/sys/mutex2.h (revision b58f1e66)
1 /*
2  * Copyright (c) 2009 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #ifndef	_SYS_MUTEX2_H_
36 #define	_SYS_MUTEX2_H_
37 
38 #ifndef _SYS_MUTEX_H_
39 #include <sys/mutex.h>
40 #endif
41 #ifndef _SYS_THREAD2_H_
42 #include <sys/thread2.h>
43 #endif
44 #ifndef _SYS_GLOBALDATA_H_
45 #include <sys/globaldata.h>
46 #endif
47 #ifndef _MACHINE_ATOMIC_H_
48 #include <machine/atomic.h>
49 #endif
50 
51 /*
52  * Initialize a new mutex, placing it in an unlocked state with no refs.
53  */
54 static __inline void
55 mtx_init(mtx_t mtx)
56 {
57 	mtx->mtx_lock = 0;
58 	mtx->mtx_refs = 0;
59 	mtx->mtx_owner = NULL;
60 	mtx->mtx_link = NULL;
61 }
62 
63 static __inline void
64 mtx_link_init(mtx_link_t link)
65 {
66 	link->state = MTX_LINK_IDLE;
67 }
68 
69 /*
70  * Deinitialize a mutex
71  */
72 static __inline void
73 mtx_uninit(mtx_t mtx)
74 {
75 	/* empty */
76 }
77 
78 /*
79  * Exclusive-lock a mutex, block until acquired or aborted.  Recursion
80  * is allowed.
81  *
82  * This version of the function allows the mtx_link to be passed in, thus
83  * giving the caller visibility for the link structure which is required
84  * when calling mtx_abort_ex_link().
85  *
86  * The mutex may be aborted at any time while the passed link structure
87  * is valid.
88  */
89 static __inline int
90 mtx_lock_ex_link(mtx_t mtx, struct mtx_link *link,
91                  const char *ident, int flags, int to)
92 {
93 	if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
94 		return(_mtx_lock_ex_link(mtx, link, ident, flags, to));
95 	mtx->mtx_owner = curthread;
96 	return(0);
97 }
98 
99 /*
100  * Short-form exclusive-lock a mutex, block until acquired.  Recursion is
101  * allowed.  This is equivalent to mtx_lock_ex(mtx, "mtxex", 0, 0).
102  */
103 static __inline void
104 mtx_lock(mtx_t mtx)
105 {
106 	if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) {
107 		_mtx_lock_ex(mtx, "mtxex", 0, 0);
108 		return;
109 	}
110 	mtx->mtx_owner = curthread;
111 }
112 
113 /*
114  * Exclusive-lock a mutex, block until acquired.  Recursion is allowed.
115  *
116  * Returns 0 on success, or the tsleep() return code on failure.
117  * An error can only be returned if PCATCH is specified in the flags.
118  */
119 static __inline int
120 mtx_lock_ex(mtx_t mtx, const char *ident, int flags, int to)
121 {
122 	if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
123 		return(_mtx_lock_ex(mtx, ident, flags, to));
124 	mtx->mtx_owner = curthread;
125 	return(0);
126 }
127 
128 static __inline int
129 mtx_lock_ex_quick(mtx_t mtx, const char *ident)
130 {
131 	if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
132 		return(_mtx_lock_ex_quick(mtx, ident));
133 	mtx->mtx_owner = curthread;
134 	return(0);
135 }
136 
137 /*
138  * Share-lock a mutex, block until acquired.  Recursion is allowed.
139  *
140  * Returns 0 on success, or the tsleep() return code on failure.
141  * An error can only be returned if PCATCH is specified in the flags.
142  */
143 static __inline int
144 mtx_lock_sh(mtx_t mtx, const char *ident, int flags, int to)
145 {
146 	if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
147 		return(_mtx_lock_sh(mtx, ident, flags, to));
148 	return(0);
149 }
150 
151 static __inline int
152 mtx_lock_sh_quick(mtx_t mtx, const char *ident)
153 {
154 	if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
155 		return(_mtx_lock_sh_quick(mtx, ident));
156 	return(0);
157 }
158 
159 /*
160  * Short-form exclusive spinlock a mutex.  Must be paired with
161  * mtx_spinunlock().
162  */
163 static __inline void
164 mtx_spinlock(mtx_t mtx)
165 {
166 	globaldata_t gd = mycpu;
167 
168 	/*
169 	 * Predispose a hard critical section
170 	 */
171 	++gd->gd_curthread->td_critcount;
172 	cpu_ccfence();
173 	++gd->gd_spinlocks_wr;
174 
175 	/*
176 	 * If we cannot get it trivially get it the hard way.
177 	 *
178 	 * Note that mtx_owner will be set twice if we fail to get it
179 	 * trivially, but there's no point conditionalizing it as a
180 	 * conditional will be slower.
181 	 */
182 	if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
183 		_mtx_spinlock(mtx);
184 	mtx->mtx_owner = gd->gd_curthread;
185 }
186 
187 static __inline int
188 mtx_spinlock_try(mtx_t mtx)
189 {
190 	globaldata_t gd = mycpu;
191 
192 	/*
193 	 * Predispose a hard critical section
194 	 */
195 	++gd->gd_curthread->td_critcount;
196 	cpu_ccfence();
197 	++gd->gd_spinlocks_wr;
198 
199 	/*
200 	 * If we cannot get it trivially call _mtx_spinlock_try().  This
201 	 * function will clean up the hard critical section if it fails.
202 	 */
203 	if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
204 		return(_mtx_spinlock_try(mtx));
205 	mtx->mtx_owner = gd->gd_curthread;
206 	return (0);
207 }
208 
209 /*
210  * Short-form exclusive-lock a mutex, spin until acquired.  Recursion is
211  * allowed.  This form is identical to mtx_spinlock_ex().
212  *
213  * Attempt to exclusive-lock a mutex, return 0 on success and
214  * EAGAIN on failure.
215  */
216 static __inline int
217 mtx_lock_ex_try(mtx_t mtx)
218 {
219 	if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
220 		return (_mtx_lock_ex_try(mtx));
221 	mtx->mtx_owner = curthread;
222 	return (0);
223 }
224 
225 /*
226  * Attempt to share-lock a mutex, return 0 on success and
227  * EAGAIN on failure.
228  */
229 static __inline int
230 mtx_lock_sh_try(mtx_t mtx)
231 {
232 	if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
233 		return (_mtx_lock_sh_try(mtx));
234 	return (0);
235 }
236 
237 /*
238  * If the lock is held exclusively it must be owned by the caller.  If the
239  * lock is already a shared lock this operation is a NOP.    A panic will
240  * occur if the lock is not held either shared or exclusive.
241  *
242  * The exclusive count is converted to a shared count.
243  */
244 static __inline void
245 mtx_downgrade(mtx_t mtx)
246 {
247 	mtx->mtx_owner = NULL;
248 	if (atomic_cmpset_int(&mtx->mtx_lock, MTX_EXCLUSIVE | 1, 0) == 0)
249 		_mtx_downgrade(mtx);
250 }
251 
252 /*
253  * Upgrade a shared lock to an exclusive lock.  The upgrade will fail if
254  * the shared lock has a count other then 1.  Optimize the most likely case
255  * but note that a single cmpset can fail due to WANTED races.
256  *
257  * If the lock is held exclusively it must be owned by the caller and
258  * this function will simply return without doing anything.  A panic will
259  * occur if the lock is held exclusively by someone other then the caller.
260  *
261  * Returns 0 on success, EDEADLK on failure.
262  */
263 static __inline int
264 mtx_upgrade_try(mtx_t mtx)
265 {
266 	if (atomic_cmpset_int(&mtx->mtx_lock, 1, MTX_EXCLUSIVE | 1))
267 		return(0);
268 	return (_mtx_upgrade_try(mtx));
269 }
270 
271 /*
272  * Optimized unlock cases.
273  *
274  * NOTE: mtx_unlock() handles any type of mutex: exclusive, shared, and
275  *	 both blocking and spin methods.
276  *
277  *	 The mtx_unlock_ex/sh() forms are optimized for exclusive or shared
278  *	 mutexes and produce less code, but it is ok for code to just use
279  *	 mtx_unlock() and, in fact, if code uses the short-form mtx_lock()
280  *	 or mtx_spinlock() to lock it should also use mtx_unlock() to unlock.
281  */
282 static __inline void
283 mtx_unlock(mtx_t mtx)
284 {
285 	u_int lock = mtx->mtx_lock;
286 
287 	if (lock == (MTX_EXCLUSIVE | 1)) {
288 		mtx->mtx_owner = NULL;
289 		if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
290 			_mtx_unlock(mtx);
291 	} else if (lock == 1) {
292 		if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
293 			_mtx_unlock(mtx);
294 	} else {
295 		_mtx_unlock(mtx);
296 	}
297 }
298 
299 static __inline void
300 mtx_unlock_ex(mtx_t mtx)
301 {
302 	u_int lock = mtx->mtx_lock;
303 
304 	if (lock == (MTX_EXCLUSIVE | 1)) {
305 		mtx->mtx_owner = NULL;
306 		if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
307 			_mtx_unlock(mtx);
308 	} else {
309 		_mtx_unlock(mtx);
310 	}
311 }
312 
313 static __inline void
314 mtx_unlock_sh(mtx_t mtx)
315 {
316 	if (atomic_cmpset_int(&mtx->mtx_lock, 1, 0) == 0)
317 		_mtx_unlock(mtx);
318 }
319 
320 /*
321  * NOTE: spinlocks are exclusive-only
322  */
323 static __inline void
324 mtx_spinunlock(mtx_t mtx)
325 {
326 	globaldata_t gd = mycpu;
327 
328 	mtx_unlock(mtx);
329 
330 	--gd->gd_spinlocks_wr;
331 	cpu_ccfence();
332 	--gd->gd_curthread->td_critcount;
333 }
334 
335 /*
336  * Return TRUE (non-zero) if the mutex is locked shared or exclusive by
337  * anyone, including the owner.
338  */
339 static __inline int
340 mtx_islocked(mtx_t mtx)
341 {
342 	return(mtx->mtx_lock != 0);
343 }
344 
345 /*
346  * Return TRUE (non-zero) if the mutex is locked exclusively by anyone,
347  * including the owner.
348  *
349  * The mutex may in an unlocked or shared lock state.
350  */
351 static __inline int
352 mtx_islocked_ex(mtx_t mtx)
353 {
354 	return((mtx->mtx_lock & MTX_EXCLUSIVE) != 0);
355 }
356 
357 /*
358  * Return TRUE (non-zero) if the mutex is not locked.
359  */
360 static __inline int
361 mtx_notlocked(mtx_t mtx)
362 {
363 	return(mtx->mtx_lock == 0);
364 }
365 
366 /*
367  * Return TRUE (non-zero) if the mutex is not locked exclusively.
368  * The mutex may in an unlocked or shared lock state.
369  */
370 static __inline int
371 mtx_notlocked_ex(mtx_t mtx)
372 {
373 	return((mtx->mtx_lock & MTX_EXCLUSIVE) != 0);
374 }
375 
376 /*
377  * Return TRUE (non-zero) if the mutex is exclusively locked by
378  * the caller.
379  */
380 static __inline int
381 mtx_owned(mtx_t mtx)
382 {
383 	return((mtx->mtx_lock & MTX_EXCLUSIVE) && mtx->mtx_owner == curthread);
384 }
385 
386 /*
387  * Return TRUE (non-zero) if the mutex is not exclusively locked by
388  * the caller.
389  */
390 static __inline int
391 mtx_notowned(mtx_t mtx)
392 {
393 	return((mtx->mtx_lock & MTX_EXCLUSIVE) == 0 ||
394 	       mtx->mtx_owner != curthread);
395 }
396 
397 /*
398  * Return the shared or exclusive lock count.  A return value of 0
399  * indicate that the mutex is not locked.
400  *
401  * NOTE: If the mutex is held exclusively by someone other then the
402  *	 caller the lock count for the other owner is still returned.
403  */
404 static __inline int
405 mtx_lockrefs(mtx_t mtx)
406 {
407 	return(mtx->mtx_lock & MTX_MASK);
408 }
409 
410 /*
411  * Bump the lock's ref count.  This field is independent of the lock.
412  */
413 static __inline void
414 mtx_hold(mtx_t mtx)
415 {
416 	atomic_add_acq_int(&mtx->mtx_refs, 1);
417 }
418 
419 /*
420  * Drop the lock's ref count.  This field is independent of the lock.
421  *
422  * Returns the previous ref count, interlocked so testing against
423  * 1 means you won the 1->0 transition
424  */
425 static __inline int
426 mtx_drop(mtx_t mtx)
427 {
428 	return (atomic_fetchadd_int(&mtx->mtx_refs, -1));
429 }
430 
431 #endif
432