xref: /dragonfly/sys/sys/mutex2.h (revision 0db87cb7)
1 /*
2  * Copyright (c) 2009 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #ifndef	_SYS_MUTEX2_H_
36 #define	_SYS_MUTEX2_H_
37 
38 #ifndef _SYS_MUTEX_H_
39 #include <sys/mutex.h>
40 #endif
41 #ifndef _SYS_THREAD2_H_
42 #include <sys/thread2.h>
43 #endif
44 #ifndef _SYS_GLOBALDATA_H_
45 #include <sys/globaldata.h>
46 #endif
47 #include <machine/atomic.h>
48 
49 /*
50  * Initialize a new mutex, placing it in an unlocked state with no refs.
51  */
52 static __inline void
53 mtx_init(mtx_t *mtx, const char *ident)
54 {
55 	mtx->mtx_lock = 0;
56 	mtx->mtx_owner = NULL;
57 	mtx->mtx_exlink = NULL;
58 	mtx->mtx_shlink = NULL;
59 	mtx->mtx_ident = ident;
60 }
61 
62 /*
63  * Initialize a mtx link structure for deeper control over the mutex
64  * operation.
65  */
66 static __inline void
67 mtx_link_init(mtx_link_t *link)
68 {
69 	link->state = MTX_LINK_IDLE;
70 	link->callback = NULL;
71 	link->arg = NULL;
72 }
73 
74 /*
75  * A link structure initialized this way causes mutex operations to not block,
76  * caller must specify a callback.  Caller may still abort the mutex via
77  * the link.
78  */
79 static __inline void
80 mtx_link_init_async(mtx_link_t *link,
81 		    void (*callback)(mtx_link_t *link, void *arg, int error),
82 		    void *arg)
83 {
84 	link->state = MTX_LINK_IDLE;
85 	link->callback = callback;
86 	link->arg = arg;
87 }
88 
89 /*
90  * Deinitialize a mutex
91  */
92 static __inline void
93 mtx_uninit(mtx_t *mtx)
94 {
95 	/* empty */
96 }
97 
98 /*
99  * Exclusive-lock a mutex, block until acquired or aborted.  Recursion
100  * is allowed.
101  *
102  * This version of the function allows the mtx_link to be passed in, thus
103  * giving the caller visibility for the link structure which is required
104  * when calling mtx_abort_ex_link() or when requesting an asynchronous lock.
105  *
106  * The mutex may be aborted at any time while the passed link structure
107  * is valid.
108  */
109 static __inline int
110 mtx_lock_ex_link(mtx_t *mtx, mtx_link_t *link, int flags, int to)
111 {
112 	if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
113 		return(_mtx_lock_ex_link(mtx, link, flags, to));
114 	mtx->mtx_owner = curthread;
115 	link->state = MTX_LINK_ACQUIRED;
116 
117 	return(0);
118 }
119 
120 /*
121  * Short-form exclusive-lock a mutex, block until acquired.  Recursion is
122  * allowed.  This is equivalent to mtx_lock_ex(mtx, "mtxex", 0, 0).
123  */
124 static __inline void
125 mtx_lock(mtx_t *mtx)
126 {
127 	if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) {
128 		_mtx_lock_ex(mtx, 0, 0);
129 		return;
130 	}
131 	mtx->mtx_owner = curthread;
132 }
133 
134 /*
135  * Exclusive-lock a mutex, block until acquired.  Recursion is allowed.
136  *
137  * Returns 0 on success, or the tsleep() return code on failure.
138  * An error can only be returned if PCATCH is specified in the flags.
139  */
140 static __inline int
141 mtx_lock_ex(mtx_t *mtx, int flags, int to)
142 {
143 	if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
144 		return(_mtx_lock_ex(mtx, flags, to));
145 	mtx->mtx_owner = curthread;
146 	return(0);
147 }
148 
149 static __inline int
150 mtx_lock_ex_quick(mtx_t *mtx)
151 {
152 	if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
153 		return(_mtx_lock_ex_quick(mtx));
154 	mtx->mtx_owner = curthread;
155 	return(0);
156 }
157 
158 static __inline int
159 mtx_lock_sh_link(mtx_t *mtx, mtx_link_t *link, int flags, int to)
160 {
161 	if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
162 		return(_mtx_lock_sh_link(mtx, link, flags, to));
163 	link->state = MTX_LINK_ACQUIRED;
164 	return(0);
165 }
166 
167 /*
168  * Share-lock a mutex, block until acquired.  Recursion is allowed.
169  *
170  * Returns 0 on success, or the tsleep() return code on failure.
171  * An error can only be returned if PCATCH is specified in the flags.
172  */
173 static __inline int
174 mtx_lock_sh(mtx_t *mtx, int flags, int to)
175 {
176 	if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
177 		return(_mtx_lock_sh(mtx, flags, to));
178 	return(0);
179 }
180 
181 static __inline int
182 mtx_lock_sh_quick(mtx_t *mtx)
183 {
184 	if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
185 		return(_mtx_lock_sh_quick(mtx));
186 	return(0);
187 }
188 
189 /*
190  * Short-form exclusive spinlock a mutex.  Must be paired with
191  * mtx_spinunlock().
192  */
193 static __inline void
194 mtx_spinlock(mtx_t *mtx)
195 {
196 	globaldata_t gd = mycpu;
197 
198 	/*
199 	 * Predispose a hard critical section
200 	 */
201 	++gd->gd_curthread->td_critcount;
202 	cpu_ccfence();
203 	++gd->gd_spinlocks;
204 
205 	/*
206 	 * If we cannot get it trivially get it the hard way.
207 	 *
208 	 * Note that mtx_owner will be set twice if we fail to get it
209 	 * trivially, but there's no point conditionalizing it as a
210 	 * conditional will be slower.
211 	 */
212 	if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
213 		_mtx_spinlock(mtx);
214 	mtx->mtx_owner = gd->gd_curthread;
215 }
216 
217 static __inline int
218 mtx_spinlock_try(mtx_t *mtx)
219 {
220 	globaldata_t gd = mycpu;
221 
222 	/*
223 	 * Predispose a hard critical section
224 	 */
225 	++gd->gd_curthread->td_critcount;
226 	cpu_ccfence();
227 	++gd->gd_spinlocks;
228 
229 	/*
230 	 * If we cannot get it trivially call _mtx_spinlock_try().  This
231 	 * function will clean up the hard critical section if it fails.
232 	 */
233 	if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
234 		return(_mtx_spinlock_try(mtx));
235 	mtx->mtx_owner = gd->gd_curthread;
236 	return (0);
237 }
238 
239 /*
240  * Short-form exclusive-lock a mutex, spin until acquired.  Recursion is
241  * allowed.  This form is identical to mtx_spinlock_ex().
242  *
243  * Attempt to exclusive-lock a mutex, return 0 on success and
244  * EAGAIN on failure.
245  */
246 static __inline int
247 mtx_lock_ex_try(mtx_t *mtx)
248 {
249 	if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
250 		return (_mtx_lock_ex_try(mtx));
251 	mtx->mtx_owner = curthread;
252 	return (0);
253 }
254 
255 /*
256  * Attempt to share-lock a mutex, return 0 on success and
257  * EAGAIN on failure.
258  */
259 static __inline int
260 mtx_lock_sh_try(mtx_t *mtx)
261 {
262 	if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
263 		return (_mtx_lock_sh_try(mtx));
264 	return (0);
265 }
266 
267 /*
268  * If the lock is held exclusively it must be owned by the caller.  If the
269  * lock is already a shared lock this operation is a NOP.    A panic will
270  * occur if the lock is not held either shared or exclusive.
271  *
272  * The exclusive count is converted to a shared count.
273  */
274 static __inline void
275 mtx_downgrade(mtx_t *mtx)
276 {
277 	mtx->mtx_owner = NULL;
278 	if (atomic_cmpset_int(&mtx->mtx_lock, MTX_EXCLUSIVE | 1, 1) == 0)
279 		_mtx_downgrade(mtx);
280 }
281 
282 /*
283  * Upgrade a shared lock to an exclusive lock.  The upgrade will fail if
284  * the shared lock has a count other then 1.  Optimize the most likely case
285  * but note that a single cmpset can fail due to WANTED races.
286  *
287  * If the lock is held exclusively it must be owned by the caller and
288  * this function will simply return without doing anything.  A panic will
289  * occur if the lock is held exclusively by someone other then the caller.
290  *
291  * Returns 0 on success, EDEADLK on failure.
292  */
293 static __inline int
294 mtx_upgrade_try(mtx_t *mtx)
295 {
296 	if (atomic_cmpset_int(&mtx->mtx_lock, 1, MTX_EXCLUSIVE | 1))
297 		return(0);
298 	return (_mtx_upgrade_try(mtx));
299 }
300 
301 /*
302  * Optimized unlock cases.
303  *
304  * NOTE: mtx_unlock() handles any type of mutex: exclusive, shared, and
305  *	 both blocking and spin methods.
306  *
307  *	 The mtx_unlock_ex/sh() forms are optimized for exclusive or shared
308  *	 mutexes and produce less code, but it is ok for code to just use
309  *	 mtx_unlock() and, in fact, if code uses the short-form mtx_lock()
310  *	 or mtx_spinlock() to lock it should also use mtx_unlock() to unlock.
311  */
312 static __inline void
313 mtx_unlock(mtx_t *mtx)
314 {
315 	u_int lock = mtx->mtx_lock;
316 
317 	if (lock == (MTX_EXCLUSIVE | 1)) {
318 		mtx->mtx_owner = NULL;
319 		if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
320 			_mtx_unlock(mtx);
321 	} else if (lock == 1) {
322 		if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
323 			_mtx_unlock(mtx);
324 	} else {
325 		_mtx_unlock(mtx);
326 	}
327 }
328 
329 static __inline void
330 mtx_unlock_ex(mtx_t *mtx)
331 {
332 	u_int lock = mtx->mtx_lock;
333 
334 	if (lock == (MTX_EXCLUSIVE | 1)) {
335 		mtx->mtx_owner = NULL;
336 		if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
337 			_mtx_unlock(mtx);
338 	} else {
339 		_mtx_unlock(mtx);
340 	}
341 }
342 
343 static __inline void
344 mtx_unlock_sh(mtx_t *mtx)
345 {
346 	if (atomic_cmpset_int(&mtx->mtx_lock, 1, 0) == 0)
347 		_mtx_unlock(mtx);
348 }
349 
350 /*
351  * NOTE: spinlocks are exclusive-only
352  */
353 static __inline void
354 mtx_spinunlock(mtx_t *mtx)
355 {
356 	globaldata_t gd = mycpu;
357 
358 	mtx_unlock(mtx);
359 
360 	--gd->gd_spinlocks;
361 	cpu_ccfence();
362 	--gd->gd_curthread->td_critcount;
363 }
364 
365 /*
366  * Return TRUE (non-zero) if the mutex is locked shared or exclusive by
367  * anyone, including the owner.
368  */
369 static __inline int
370 mtx_islocked(mtx_t *mtx)
371 {
372 	return(mtx->mtx_lock != 0);
373 }
374 
375 /*
376  * Return TRUE (non-zero) if the mutex is locked exclusively by anyone,
377  * including the owner.  Returns FALSE (0) if the mutex is unlocked or
378  * if it is locked shared by one or more entities.
379  *
380  * A caller wishing to check whether a lock is owned exclusively by it
381  * should use mtx_owned().
382  */
383 static __inline int
384 mtx_islocked_ex(mtx_t *mtx)
385 {
386 	return((mtx->mtx_lock & MTX_EXCLUSIVE) != 0);
387 }
388 
389 /*
390  * Return TRUE (non-zero) if the mutex is not locked.
391  */
392 static __inline int
393 mtx_notlocked(mtx_t *mtx)
394 {
395 	return(mtx->mtx_lock == 0);
396 }
397 
398 /*
399  * Return TRUE (non-zero) if the mutex is not locked exclusively.
400  * The mutex may in an unlocked or shared lock state.
401  */
402 static __inline int
403 mtx_notlocked_ex(mtx_t *mtx)
404 {
405 	return((mtx->mtx_lock & MTX_EXCLUSIVE) != 0);
406 }
407 
408 /*
409  * Return TRUE (non-zero) if the mutex is exclusively locked by
410  * the caller.
411  */
412 static __inline int
413 mtx_owned(mtx_t *mtx)
414 {
415 	return((mtx->mtx_lock & MTX_EXCLUSIVE) && mtx->mtx_owner == curthread);
416 }
417 
418 /*
419  * Return TRUE (non-zero) if the mutex is not exclusively locked by
420  * the caller.
421  */
422 static __inline int
423 mtx_notowned(mtx_t *mtx)
424 {
425 	return((mtx->mtx_lock & MTX_EXCLUSIVE) == 0 ||
426 	       mtx->mtx_owner != curthread);
427 }
428 
429 /*
430  * Return the shared or exclusive lock count.  A return value of 0
431  * indicate that the mutex is not locked.
432  *
433  * NOTE: If the mutex is held exclusively by someone other then the
434  *	 caller the lock count for the other owner is still returned.
435  */
436 static __inline
437 int
438 mtx_lockrefs(mtx_t *mtx)
439 {
440 	return(mtx->mtx_lock & MTX_MASK);
441 }
442 
443 /*
444  * Lock must held and will be released on return.  Returns state
445  * which can be passed to mtx_lock_temp_restore() to return the
446  * lock to its previous state.
447  */
448 static __inline
449 mtx_state_t
450 mtx_lock_temp_release(mtx_t *mtx)
451 {
452 	mtx_state_t state;
453 
454 	state = (mtx->mtx_lock & MTX_EXCLUSIVE);
455 	mtx_unlock(mtx);
456 
457 	return state;
458 }
459 
460 /*
461  * Restore the previous state of a lock released with
462  * mtx_lock_temp_release() or mtx_lock_upgrade().
463  */
464 static __inline
465 void
466 mtx_lock_temp_restore(mtx_t *mtx, mtx_state_t state)
467 {
468 	if (state & MTX_EXCLUSIVE)
469 		mtx_lock_ex_quick(mtx);
470 	else
471 		mtx_lock_sh_quick(mtx);
472 }
473 
474 #endif
475