1 /*
2 * Copyright (c) 2015 Michael Neumann <mneumann@ntecs.de>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #ifndef _LINUX_WW_MUTEX_H_
28 #define _LINUX_WW_MUTEX_H_
29
30 /*
31 * A basic, unoptimized implementation of wound/wait mutexes for DragonFly
32 * modelled after the Linux API [1].
33 *
34 * [1]: http://lxr.free-electrons.com/source/include/linux/ww_mutex.h
35 */
36
37 #include <sys/types.h>
38 #include <sys/systm.h>
39 #include <sys/param.h>
40 #include <sys/mutex.h>
41 #include <machine/intr.h>
42 #include <linux/mutex.h>
43
44 struct ww_class {
45 volatile u_long stamp;
46 const char *name;
47 };
48
49 struct ww_acquire_ctx {
50 u_long stamp;
51 struct ww_class *ww_class;
52 };
53
54 struct ww_mutex {
55 struct mutex base;
56 volatile int acquired;
57 struct ww_acquire_ctx *ctx;
58 volatile struct proc *owner;
59 };
60
61 #define DEFINE_WW_CLASS(classname) \
62 struct ww_class classname = { \
63 .stamp = 0, \
64 .name = #classname \
65 }
66
67 #define DEFINE_WD_CLASS(classname) \
68 struct ww_class classname = { \
69 .stamp = 0, \
70 .name = #classname \
71 }
72
73 static inline void
ww_acquire_init(struct ww_acquire_ctx * ctx,struct ww_class * ww_class)74 ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *ww_class) {
75 ctx->stamp = __sync_fetch_and_add(&ww_class->stamp, 1);
76 ctx->ww_class = ww_class;
77 }
78
79 static inline void
ww_acquire_done(__unused struct ww_acquire_ctx * ctx)80 ww_acquire_done(__unused struct ww_acquire_ctx *ctx) {
81 }
82
83 static inline void
ww_acquire_fini(__unused struct ww_acquire_ctx * ctx)84 ww_acquire_fini(__unused struct ww_acquire_ctx *ctx) {
85 }
86
87 static inline void
ww_mutex_init(struct ww_mutex * lock,struct ww_class * ww_class)88 ww_mutex_init(struct ww_mutex *lock, struct ww_class *ww_class) {
89 mtx_init(&lock->base, IPL_NONE);
90 lock->acquired = 0;
91 lock->ctx = NULL;
92 lock->owner = NULL;
93 }
94
95 static inline bool
ww_mutex_is_locked(struct ww_mutex * lock)96 ww_mutex_is_locked(struct ww_mutex *lock) {
97 bool res = false;
98 mtx_enter(&lock->base);
99 if (lock->acquired > 0) res = true;
100 mtx_leave(&lock->base);
101 return res;
102 }
103
104 /*
105 * Return 1 if lock could be acquired, else 0 (contended).
106 */
107 static inline int
ww_mutex_trylock(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)108 ww_mutex_trylock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) {
109 int res = 0;
110
111 mtx_enter(&lock->base);
112 /*
113 * In case no one holds the ww_mutex yet, we acquire it.
114 */
115 if (lock->acquired == 0) {
116 KASSERT(lock->ctx == NULL);
117 lock->acquired = 1;
118 lock->owner = curproc;
119 res = 1;
120 }
121 mtx_leave(&lock->base);
122 return res;
123 }
124
125 /*
126 * When `slow` is `true`, it will always block if the ww_mutex is contended.
127 * It is assumed that the called will not hold any (ww_mutex) resources when
128 * calling the slow path as this could lead to deadlocks.
129 *
130 * When `intr` is `true`, the ssleep will be interruptible.
131 */
132 static inline int
__ww_mutex_lock(struct ww_mutex * lock,struct ww_acquire_ctx * ctx,bool slow,bool intr)133 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx, bool slow, bool intr) {
134 int err;
135
136 mtx_enter(&lock->base);
137 for (;;) {
138 /*
139 * In case no one holds the ww_mutex yet, we acquire it.
140 */
141 if (lock->acquired == 0) {
142 KASSERT(lock->ctx == NULL);
143 lock->acquired = 1;
144 lock->ctx = ctx;
145 lock->owner = curproc;
146 err = 0;
147 break;
148 }
149 /*
150 * In case we already hold the return -EALREADY.
151 */
152 else if (lock->owner == curproc) {
153 err = -EALREADY;
154 break;
155 }
156 /*
157 * This is the contention case where the ww_mutex is
158 * already held by another context.
159 */
160 else {
161 /*
162 * Three cases:
163 *
164 * - We are in the slow-path (first lock to obtain).
165 *
166 * - No context was specified. We assume a single
167 * resource, so there is no danger of a deadlock.
168 *
169 * - An `older` process (`ctx`) tries to acquire a
170 * lock already held by a `younger` process.
171 * We put the `older` process to sleep until
172 * the `younger` process gives up all it's
173 * resources.
174 */
175 if (slow || ctx == NULL ||
176 (lock->ctx && ctx->stamp < lock->ctx->stamp)) {
177 KASSERT(!cold);
178 int s = msleep_nsec(lock, &lock->base,
179 intr ? PCATCH : 0,
180 ctx ? ctx->ww_class->name : "ww_mutex_lock",
181 INFSLP);
182 if (intr && (s == EINTR || s == ERESTART)) {
183 // XXX: Should we handle ERESTART?
184 err = -EINTR;
185 break;
186 }
187 }
188 /*
189 * If a `younger` process tries to acquire a lock
190 * already held by an `older` process, we `wound` it,
191 * i.e. we return -EDEADLK because there is a potential
192 * risk for a deadlock. The `younger` process then
193 * should give up all it's resources and try again to
194 * acquire the lock in question, this time in a
195 * blocking manner.
196 */
197 else {
198 err = -EDEADLK;
199 break;
200 }
201 }
202
203 } /* for */
204 mtx_leave(&lock->base);
205 return err;
206 }
207
208 static inline int
ww_mutex_lock(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)209 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) {
210 return __ww_mutex_lock(lock, ctx, false, false);
211 }
212
213 static inline void
ww_mutex_lock_slow(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)214 ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) {
215 (void)__ww_mutex_lock(lock, ctx, true, false);
216 }
217
218 static inline int
ww_mutex_lock_interruptible(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)219 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) {
220 return __ww_mutex_lock(lock, ctx, false, true);
221 }
222
223 static inline int __must_check
ww_mutex_lock_slow_interruptible(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)224 ww_mutex_lock_slow_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) {
225 return __ww_mutex_lock(lock, ctx, true, true);
226 }
227
228 static inline void
ww_mutex_unlock(struct ww_mutex * lock)229 ww_mutex_unlock(struct ww_mutex *lock) {
230 mtx_enter(&lock->base);
231 KASSERT(lock->owner == curproc);
232 KASSERT(lock->acquired == 1);
233
234 lock->acquired = 0;
235 lock->ctx = NULL;
236 lock->owner = NULL;
237 mtx_leave(&lock->base);
238 wakeup(lock);
239 }
240
241 static inline void
ww_mutex_destroy(struct ww_mutex * lock)242 ww_mutex_destroy(struct ww_mutex *lock) {
243 KASSERT(lock->acquired == 0);
244 KASSERT(lock->ctx == NULL);
245 KASSERT(lock->owner == NULL);
246 }
247
248 #endif /* _LINUX_WW_MUTEX_H_ */
249