xref: /dragonfly/sys/dev/drm/linux_wwmutex.c (revision 23d2c0d6)
1 /*-
2  * Copyright (c) 2003-2011 The DragonFly Project.  All rights reserved.
3  * Copyright (c) 2015 Michael Neumann <mneumann@ntecs.de>.  All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Matthew Dillon <dillon@backplane.com> and
7  *    Michael Neumann <mneumann@ntecs.de>
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice unmodified, this list of conditions, and the following
14  *    disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/types.h>
32 #include <sys/errno.h>
33 #include <sys/kernel.h>
34 #include <sys/spinlock.h>
35 #include <sys/spinlock2.h>
36 
37 #include <machine/atomic.h>
38 
39 #include <linux/ww_mutex.h>
40 
41 void
ww_acquire_init(struct ww_acquire_ctx * ctx,struct ww_class * ww_class)42 ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *ww_class)
43 {
44 	ctx->stamp = atomic_fetchadd_long(&ww_class->stamp, 1);
45 	ctx->acquired = 0;
46 	ctx->ww_class = ww_class;
47 }
48 
49 void
ww_acquire_done(struct ww_acquire_ctx * ctx __unused)50 ww_acquire_done(struct ww_acquire_ctx *ctx __unused)
51 {
52 }
53 
54 void
ww_acquire_fini(struct ww_acquire_ctx * ctx __unused)55 ww_acquire_fini(struct ww_acquire_ctx *ctx __unused)
56 {
57 }
58 
59 void
ww_mutex_init(struct ww_mutex * ww,struct ww_class * ww_class)60 ww_mutex_init(struct ww_mutex *ww, struct ww_class *ww_class)
61 {
62 	lockinit(&ww->base, ww_class->name, 0, LK_CANRECURSE);
63 	ww->ctx = NULL;
64 	ww->stamp = 0xFFFFFFFFFFFFFFFFLU;
65 	ww->blocked = 0;
66 }
67 
68 void
ww_mutex_destroy(struct ww_mutex * ww)69 ww_mutex_destroy(struct ww_mutex *ww)
70 {
71 	lockuninit(&ww->base);
72 }
73 
74 /*
75  * Optimized lock path.
76  *
77  * (slow) is optional as long as we block normally on the initial lock.
78  * Currently not implemented.
79  */
80 static __inline
81 int
__wwlock(struct ww_mutex * ww,struct ww_acquire_ctx * ctx,bool slow __unused,bool intr)82 __wwlock(struct ww_mutex *ww, struct ww_acquire_ctx *ctx,
83 	 bool slow __unused, bool intr)
84 {
85 	int flags = LK_EXCLUSIVE;
86 	int error;
87 
88 	if (intr)
89 		flags |= LK_PCATCH;
90 
91 	/*
92 	 * Normal mutex if ctx is NULL
93 	 */
94 	if (ctx == NULL) {
95 		error = lockmgr(&ww->base, flags);
96 		if (error)
97 			error = -EINTR;
98 		return error;
99 	}
100 
101 	/*
102 	 * A normal blocking lock can be used when ctx->acquired is 0 (no
103 	 * prior locks are held).  If prior locks are held then we cannot
104 	 * block here.
105 	 *
106 	 * In the non-blocking case setup our tsleep interlock using
107 	 * ww->blocked first.
108 	 */
109 	for (;;) {
110 		if (ctx->acquired != 0) {
111 			atomic_swap_int(&ww->blocked, 1);
112 			flags |= LK_NOWAIT;
113 			tsleep_interlock(ww, (intr ? PCATCH : 0));
114 		}
115 		error = lockmgr(&ww->base, flags);
116 		if (error == 0) {
117 			ww->ctx = ctx;
118 			ww->stamp = ctx->stamp;
119 			++ctx->acquired;
120 			return 0;
121 		}
122 
123 		/*
124 		 * EINTR or ERESTART returns -EINTR.  ENOLCK and EWOULDBLOCK
125 		 * cannot happen (LK_SLEEPFAIL not set, timeout is not set).
126 		 */
127 		if (error != EBUSY)
128 			return -EINTR;
129 
130 		/*
131 		 * acquired can only be non-zero in this path.
132 		 * NOTE: ww->ctx is not MPSAFE.
133 		 * NOTE: ww->stamp is heuristical, a race is possible.
134 		 */
135 		KKASSERT(ctx->acquired > 0);
136 
137 		/*
138 		 * Unwind if we aren't the oldest.
139 		 */
140 		if (ctx->stamp > ww->stamp)
141 			return -EDEADLK;
142 
143 		/*
144 		 * We have priority over the currently held lock.  We have
145 		 * already setup the interlock so we can tsleep() until the
146 		 * remote wakes us up (which may have already happened).
147 		 *
148 		 * error is zero if woken up
149 		 *	    EINTR / ERESTART - signal
150 		 *	    EWOULDBLOCK	     - timeout expired (if not 0)
151 		 */
152 		if (flags & LK_NOWAIT) {
153 			error = tsleep(ww, PINTERLOCKED | (intr ? PCATCH : 0),
154 				       ctx->ww_class->name, 0);
155 			if (intr && (error == EINTR || error == ERESTART))
156 				return -EINTR;
157 			flags &= ~LK_NOWAIT;
158 		}
159 		/* retry */
160 	}
161 }
162 
163 int
ww_mutex_lock(struct ww_mutex * ww,struct ww_acquire_ctx * ctx)164 ww_mutex_lock(struct ww_mutex *ww, struct ww_acquire_ctx *ctx)
165 {
166 	return __wwlock(ww, ctx, 0, 0);
167 }
168 
169 int
ww_mutex_lock_slow(struct ww_mutex * ww,struct ww_acquire_ctx * ctx)170 ww_mutex_lock_slow(struct ww_mutex *ww, struct ww_acquire_ctx *ctx)
171 {
172 	return __wwlock(ww, ctx, 1, 0);
173 }
174 
175 int
ww_mutex_lock_interruptible(struct ww_mutex * ww,struct ww_acquire_ctx * ctx)176 ww_mutex_lock_interruptible(struct ww_mutex *ww, struct ww_acquire_ctx *ctx)
177 {
178 	return __wwlock(ww, ctx, 0, 1);
179 }
180 
181 int
ww_mutex_lock_slow_interruptible(struct ww_mutex * ww,struct ww_acquire_ctx * ctx)182 ww_mutex_lock_slow_interruptible(struct ww_mutex *ww,
183 				 struct ww_acquire_ctx *ctx)
184 {
185 	return __wwlock(ww, ctx, 1, 1);
186 }
187 
188 void
ww_mutex_unlock(struct ww_mutex * ww)189 ww_mutex_unlock(struct ww_mutex *ww)
190 {
191 	struct ww_acquire_ctx *ctx;
192 
193 	ctx = ww->ctx;
194 	if (ctx) {
195 		KKASSERT(ctx->acquired > 0);
196 		--ctx->acquired;
197 		ww->ctx = NULL;
198 		ww->stamp = 0xFFFFFFFFFFFFFFFFLU;
199 	}
200 	lockmgr(&ww->base, LK_RELEASE);
201 	if (atomic_swap_int(&ww->blocked, 0))
202 		wakeup(ww);
203 }
204