xref: /dragonfly/sys/dev/drm/linux_wwmutex.c (revision 23d2c0d6)
16cd4d95dSMatthew Dillon /*-
26cd4d95dSMatthew Dillon  * Copyright (c) 2003-2011 The DragonFly Project.  All rights reserved.
36cd4d95dSMatthew Dillon  * Copyright (c) 2015 Michael Neumann <mneumann@ntecs.de>.  All rights reserved.
46cd4d95dSMatthew Dillon  *
56cd4d95dSMatthew Dillon  * This code is derived from software contributed to The DragonFly Project
66cd4d95dSMatthew Dillon  * by Matthew Dillon <dillon@backplane.com> and
76cd4d95dSMatthew Dillon  *    Michael Neumann <mneumann@ntecs.de>
86cd4d95dSMatthew Dillon  *
96cd4d95dSMatthew Dillon  * Redistribution and use in source and binary forms, with or without
106cd4d95dSMatthew Dillon  * modification, are permitted provided that the following conditions
116cd4d95dSMatthew Dillon  * are met:
126cd4d95dSMatthew Dillon  * 1. Redistributions of source code must retain the above copyright
136cd4d95dSMatthew Dillon  *    notice unmodified, this list of conditions, and the following
146cd4d95dSMatthew Dillon  *    disclaimer.
156cd4d95dSMatthew Dillon  * 2. Redistributions in binary form must reproduce the above copyright
166cd4d95dSMatthew Dillon  *    notice, this list of conditions and the following disclaimer in the
176cd4d95dSMatthew Dillon  *    documentation and/or other materials provided with the distribution.
186cd4d95dSMatthew Dillon  *
196cd4d95dSMatthew Dillon  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
206cd4d95dSMatthew Dillon  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
216cd4d95dSMatthew Dillon  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
226cd4d95dSMatthew Dillon  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
236cd4d95dSMatthew Dillon  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
246cd4d95dSMatthew Dillon  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
256cd4d95dSMatthew Dillon  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
266cd4d95dSMatthew Dillon  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
276cd4d95dSMatthew Dillon  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
286cd4d95dSMatthew Dillon  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
296cd4d95dSMatthew Dillon  */
306cd4d95dSMatthew Dillon 
316cd4d95dSMatthew Dillon #include <sys/types.h>
326cd4d95dSMatthew Dillon #include <sys/errno.h>
336cd4d95dSMatthew Dillon #include <sys/kernel.h>
346cd4d95dSMatthew Dillon #include <sys/spinlock.h>
356cd4d95dSMatthew Dillon #include <sys/spinlock2.h>
366cd4d95dSMatthew Dillon 
376cd4d95dSMatthew Dillon #include <machine/atomic.h>
386cd4d95dSMatthew Dillon 
396cd4d95dSMatthew Dillon #include <linux/ww_mutex.h>
406cd4d95dSMatthew Dillon 
416cd4d95dSMatthew Dillon void
ww_acquire_init(struct ww_acquire_ctx * ctx,struct ww_class * ww_class)426cd4d95dSMatthew Dillon ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *ww_class)
436cd4d95dSMatthew Dillon {
446cd4d95dSMatthew Dillon 	ctx->stamp = atomic_fetchadd_long(&ww_class->stamp, 1);
456cd4d95dSMatthew Dillon 	ctx->acquired = 0;
466cd4d95dSMatthew Dillon 	ctx->ww_class = ww_class;
476cd4d95dSMatthew Dillon }
486cd4d95dSMatthew Dillon 
496cd4d95dSMatthew Dillon void
ww_acquire_done(struct ww_acquire_ctx * ctx __unused)506cd4d95dSMatthew Dillon ww_acquire_done(struct ww_acquire_ctx *ctx __unused)
516cd4d95dSMatthew Dillon {
526cd4d95dSMatthew Dillon }
536cd4d95dSMatthew Dillon 
546cd4d95dSMatthew Dillon void
ww_acquire_fini(struct ww_acquire_ctx * ctx __unused)556cd4d95dSMatthew Dillon ww_acquire_fini(struct ww_acquire_ctx *ctx __unused)
566cd4d95dSMatthew Dillon {
576cd4d95dSMatthew Dillon }
586cd4d95dSMatthew Dillon 
596cd4d95dSMatthew Dillon void
ww_mutex_init(struct ww_mutex * ww,struct ww_class * ww_class)606cd4d95dSMatthew Dillon ww_mutex_init(struct ww_mutex *ww, struct ww_class *ww_class)
616cd4d95dSMatthew Dillon {
626cd4d95dSMatthew Dillon 	lockinit(&ww->base, ww_class->name, 0, LK_CANRECURSE);
636cd4d95dSMatthew Dillon 	ww->ctx = NULL;
646cd4d95dSMatthew Dillon 	ww->stamp = 0xFFFFFFFFFFFFFFFFLU;
656cd4d95dSMatthew Dillon 	ww->blocked = 0;
666cd4d95dSMatthew Dillon }
676cd4d95dSMatthew Dillon 
686cd4d95dSMatthew Dillon void
ww_mutex_destroy(struct ww_mutex * ww)696cd4d95dSMatthew Dillon ww_mutex_destroy(struct ww_mutex *ww)
706cd4d95dSMatthew Dillon {
716cd4d95dSMatthew Dillon 	lockuninit(&ww->base);
726cd4d95dSMatthew Dillon }
736cd4d95dSMatthew Dillon 
746cd4d95dSMatthew Dillon /*
756cd4d95dSMatthew Dillon  * Optimized lock path.
766cd4d95dSMatthew Dillon  *
776cd4d95dSMatthew Dillon  * (slow) is optional as long as we block normally on the initial lock.
786cd4d95dSMatthew Dillon  * Currently not implemented.
796cd4d95dSMatthew Dillon  */
806cd4d95dSMatthew Dillon static __inline
816cd4d95dSMatthew Dillon int
__wwlock(struct ww_mutex * ww,struct ww_acquire_ctx * ctx,bool slow __unused,bool intr)826cd4d95dSMatthew Dillon __wwlock(struct ww_mutex *ww, struct ww_acquire_ctx *ctx,
836cd4d95dSMatthew Dillon 	 bool slow __unused, bool intr)
846cd4d95dSMatthew Dillon {
856cd4d95dSMatthew Dillon 	int flags = LK_EXCLUSIVE;
866cd4d95dSMatthew Dillon 	int error;
876cd4d95dSMatthew Dillon 
886cd4d95dSMatthew Dillon 	if (intr)
896cd4d95dSMatthew Dillon 		flags |= LK_PCATCH;
906cd4d95dSMatthew Dillon 
916cd4d95dSMatthew Dillon 	/*
926cd4d95dSMatthew Dillon 	 * Normal mutex if ctx is NULL
936cd4d95dSMatthew Dillon 	 */
946cd4d95dSMatthew Dillon 	if (ctx == NULL) {
956cd4d95dSMatthew Dillon 		error = lockmgr(&ww->base, flags);
966cd4d95dSMatthew Dillon 		if (error)
976cd4d95dSMatthew Dillon 			error = -EINTR;
986cd4d95dSMatthew Dillon 		return error;
996cd4d95dSMatthew Dillon 	}
1006cd4d95dSMatthew Dillon 
1016cd4d95dSMatthew Dillon 	/*
1026cd4d95dSMatthew Dillon 	 * A normal blocking lock can be used when ctx->acquired is 0 (no
1036cd4d95dSMatthew Dillon 	 * prior locks are held).  If prior locks are held then we cannot
1046cd4d95dSMatthew Dillon 	 * block here.
1056cd4d95dSMatthew Dillon 	 *
106*23d2c0d6SFrançois Tigeot 	 * In the non-blocking case setup our tsleep interlock using
107*23d2c0d6SFrançois Tigeot 	 * ww->blocked first.
1086cd4d95dSMatthew Dillon 	 */
1096cd4d95dSMatthew Dillon 	for (;;) {
1106cd4d95dSMatthew Dillon 		if (ctx->acquired != 0) {
111*23d2c0d6SFrançois Tigeot 			atomic_swap_int(&ww->blocked, 1);
1126cd4d95dSMatthew Dillon 			flags |= LK_NOWAIT;
1136cd4d95dSMatthew Dillon 			tsleep_interlock(ww, (intr ? PCATCH : 0));
1146cd4d95dSMatthew Dillon 		}
1156cd4d95dSMatthew Dillon 		error = lockmgr(&ww->base, flags);
1166cd4d95dSMatthew Dillon 		if (error == 0) {
1176cd4d95dSMatthew Dillon 			ww->ctx = ctx;
1186cd4d95dSMatthew Dillon 			ww->stamp = ctx->stamp;
1196cd4d95dSMatthew Dillon 			++ctx->acquired;
1206cd4d95dSMatthew Dillon 			return 0;
1216cd4d95dSMatthew Dillon 		}
1226cd4d95dSMatthew Dillon 
1236cd4d95dSMatthew Dillon 		/*
1246cd4d95dSMatthew Dillon 		 * EINTR or ERESTART returns -EINTR.  ENOLCK and EWOULDBLOCK
1256cd4d95dSMatthew Dillon 		 * cannot happen (LK_SLEEPFAIL not set, timeout is not set).
1266cd4d95dSMatthew Dillon 		 */
1276cd4d95dSMatthew Dillon 		if (error != EBUSY)
1286cd4d95dSMatthew Dillon 			return -EINTR;
1296cd4d95dSMatthew Dillon 
1306cd4d95dSMatthew Dillon 		/*
1316cd4d95dSMatthew Dillon 		 * acquired can only be non-zero in this path.
1326cd4d95dSMatthew Dillon 		 * NOTE: ww->ctx is not MPSAFE.
1336cd4d95dSMatthew Dillon 		 * NOTE: ww->stamp is heuristical, a race is possible.
1346cd4d95dSMatthew Dillon 		 */
1356cd4d95dSMatthew Dillon 		KKASSERT(ctx->acquired > 0);
1366cd4d95dSMatthew Dillon 
1376cd4d95dSMatthew Dillon 		/*
1386cd4d95dSMatthew Dillon 		 * Unwind if we aren't the oldest.
1396cd4d95dSMatthew Dillon 		 */
1406cd4d95dSMatthew Dillon 		if (ctx->stamp > ww->stamp)
1416cd4d95dSMatthew Dillon 			return -EDEADLK;
1426cd4d95dSMatthew Dillon 
1436cd4d95dSMatthew Dillon 		/*
144*23d2c0d6SFrançois Tigeot 		 * We have priority over the currently held lock.  We have
145*23d2c0d6SFrançois Tigeot 		 * already setup the interlock so we can tsleep() until the
146*23d2c0d6SFrançois Tigeot 		 * remote wakes us up (which may have already happened).
1476cd4d95dSMatthew Dillon 		 *
1486cd4d95dSMatthew Dillon 		 * error is zero if woken up
1496cd4d95dSMatthew Dillon 		 *	    EINTR / ERESTART - signal
1506cd4d95dSMatthew Dillon 		 *	    EWOULDBLOCK	     - timeout expired (if not 0)
1516cd4d95dSMatthew Dillon 		 */
152*23d2c0d6SFrançois Tigeot 		if (flags & LK_NOWAIT) {
1536cd4d95dSMatthew Dillon 			error = tsleep(ww, PINTERLOCKED | (intr ? PCATCH : 0),
1546cd4d95dSMatthew Dillon 				       ctx->ww_class->name, 0);
1556cd4d95dSMatthew Dillon 			if (intr && (error == EINTR || error == ERESTART))
1566cd4d95dSMatthew Dillon 				return -EINTR;
157*23d2c0d6SFrançois Tigeot 			flags &= ~LK_NOWAIT;
158*23d2c0d6SFrançois Tigeot 		}
1596cd4d95dSMatthew Dillon 		/* retry */
1606cd4d95dSMatthew Dillon 	}
1616cd4d95dSMatthew Dillon }
1626cd4d95dSMatthew Dillon 
1636cd4d95dSMatthew Dillon int
ww_mutex_lock(struct ww_mutex * ww,struct ww_acquire_ctx * ctx)1646cd4d95dSMatthew Dillon ww_mutex_lock(struct ww_mutex *ww, struct ww_acquire_ctx *ctx)
1656cd4d95dSMatthew Dillon {
1666cd4d95dSMatthew Dillon 	return __wwlock(ww, ctx, 0, 0);
1676cd4d95dSMatthew Dillon }
1686cd4d95dSMatthew Dillon 
1696cd4d95dSMatthew Dillon int
ww_mutex_lock_slow(struct ww_mutex * ww,struct ww_acquire_ctx * ctx)1706cd4d95dSMatthew Dillon ww_mutex_lock_slow(struct ww_mutex *ww, struct ww_acquire_ctx *ctx)
1716cd4d95dSMatthew Dillon {
1726cd4d95dSMatthew Dillon 	return __wwlock(ww, ctx, 1, 0);
1736cd4d95dSMatthew Dillon }
1746cd4d95dSMatthew Dillon 
1756cd4d95dSMatthew Dillon int
ww_mutex_lock_interruptible(struct ww_mutex * ww,struct ww_acquire_ctx * ctx)1766cd4d95dSMatthew Dillon ww_mutex_lock_interruptible(struct ww_mutex *ww, struct ww_acquire_ctx *ctx)
1776cd4d95dSMatthew Dillon {
1786cd4d95dSMatthew Dillon 	return __wwlock(ww, ctx, 0, 1);
1796cd4d95dSMatthew Dillon }
1806cd4d95dSMatthew Dillon 
1816cd4d95dSMatthew Dillon int
ww_mutex_lock_slow_interruptible(struct ww_mutex * ww,struct ww_acquire_ctx * ctx)1826cd4d95dSMatthew Dillon ww_mutex_lock_slow_interruptible(struct ww_mutex *ww,
1836cd4d95dSMatthew Dillon 				 struct ww_acquire_ctx *ctx)
1846cd4d95dSMatthew Dillon {
1856cd4d95dSMatthew Dillon 	return __wwlock(ww, ctx, 1, 1);
1866cd4d95dSMatthew Dillon }
1876cd4d95dSMatthew Dillon 
1886cd4d95dSMatthew Dillon void
ww_mutex_unlock(struct ww_mutex * ww)1896cd4d95dSMatthew Dillon ww_mutex_unlock(struct ww_mutex *ww)
1906cd4d95dSMatthew Dillon {
1916cd4d95dSMatthew Dillon 	struct ww_acquire_ctx *ctx;
1926cd4d95dSMatthew Dillon 
1936cd4d95dSMatthew Dillon 	ctx = ww->ctx;
1946cd4d95dSMatthew Dillon 	if (ctx) {
1956cd4d95dSMatthew Dillon 		KKASSERT(ctx->acquired > 0);
1966cd4d95dSMatthew Dillon 		--ctx->acquired;
1976cd4d95dSMatthew Dillon 		ww->ctx = NULL;
1986cd4d95dSMatthew Dillon 		ww->stamp = 0xFFFFFFFFFFFFFFFFLU;
1996cd4d95dSMatthew Dillon 	}
2006cd4d95dSMatthew Dillon 	lockmgr(&ww->base, LK_RELEASE);
2016cd4d95dSMatthew Dillon 	if (atomic_swap_int(&ww->blocked, 0))
2026cd4d95dSMatthew Dillon 		wakeup(ww);
2036cd4d95dSMatthew Dillon }
204