xref: /dragonfly/sys/dev/drm/include/linux/ww_mutex.h (revision f4a28304)
1 /*
2  * Copyright (c) 2015 Michael Neumann <mneumann@ntecs.de>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #ifndef _LINUX_WW_MUTEX_H_
28 #define _LINUX_WW_MUTEX_H_
29 
30 #include <linux/mutex.h>
31 
32 /*
33  * A basic, unoptimized implementation of wound/wait mutexes for DragonFly
34  * modelled after the Linux API [1].
35  *
36  * [1]: http://lxr.free-electrons.com/source/include/linux/ww_mutex.h
37  */
38 
39 #include <sys/errno.h>
40 #include <sys/types.h>
41 #include <machine/atomic.h>
42 #include <sys/spinlock.h>
43 #include <sys/spinlock2.h>
44 
45 struct ww_class {
46 	volatile u_long			stamp;
47 	const char			*name;
48 };
49 
50 struct ww_acquire_ctx {
51 	u_long				stamp;
52 	struct ww_class			*ww_class;
53 };
54 
55 struct ww_mutex {
56 	struct spinlock			lock;
57 	volatile int			acquired;
58 	volatile struct ww_acquire_ctx	*ctx;
59 	volatile struct thread		*owner;
60 };
61 
62 #define DEFINE_WW_CLASS(classname)	\
63 	struct ww_class classname = {	\
64 		.stamp = 0,		\
65 		.name = #classname	\
66 	}
67 
68 static inline void
69 ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *ww_class) {
70 	ctx->stamp = atomic_fetchadd_long(&ww_class->stamp, 1);
71 	ctx->ww_class = ww_class;
72 }
73 
74 static inline void
75 ww_acquire_done(__unused struct ww_acquire_ctx *ctx) {
76 }
77 
78 static inline void
79 ww_acquire_fini(__unused struct ww_acquire_ctx *ctx) {
80 }
81 
82 static inline void
83 ww_mutex_init(struct ww_mutex *lock, struct ww_class *ww_class) {
84 	spin_init(&lock->lock, ww_class->name);
85 	lock->acquired = 0;
86 	lock->ctx = NULL;
87 	lock->owner = NULL;
88 }
89 
90 static inline bool
91 ww_mutex_is_locked(struct ww_mutex *lock) {
92 	bool res = false;
93 	spin_lock(&lock->lock);
94 	if (lock->acquired > 0) res = true;
95 	spin_unlock(&lock->lock);
96 	return res;
97 }
98 
99 /*
100  * Return 1 if lock could be acquired, else 0 (contended).
101  */
102 static inline int
103 ww_mutex_trylock(struct ww_mutex *lock) {
104 	int res = 1;
105 	KKASSERT(curthread);
106 
107 	spin_lock(&lock->lock);
108 	/*
109 	 * In case no one holds the ww_mutex yet, we acquire it.
110 	 */
111 	if (lock->acquired == 0) {
112 		KKASSERT(lock->ctx == NULL);
113 		lock->acquired += 1;
114 		lock->owner = curthread;
115 	}
116 	/*
117 	 * In case we already hold the ww_mutex, increase a count.
118 	 */
119 	else if (lock->owner == curthread) {
120 		lock->acquired += 1;
121 	}
122 	else {
123 		res = 0;
124 	}
125 	spin_unlock(&lock->lock);
126 	return res;
127 }
128 
129 /*
130  * When `slow` is `true`, it will always block if the ww_mutex is contended.
131  * It is assumed that the called will not hold any (ww_mutex) resources when
132  * calling the slow path as this could lead to deadlocks.
133  *
134  * When `intr` is `true`, the ssleep will be interruptable.
135  */
136 static inline int
137 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx, bool slow, bool intr) {
138 	int err;
139 
140 	KKASSERT(curthread);
141 
142 	spin_lock(&lock->lock);
143 	for (;;) {
144 		/*
145 		 * In case no one holds the ww_mutex yet, we acquire it.
146 		 */
147 		if (lock->acquired == 0) {
148 			KKASSERT(lock->ctx == NULL);
149 			lock->acquired += 1;
150 			lock->ctx = ctx;
151 			lock->owner = curthread;
152 			err = 0;
153 			break;
154 		}
155 		/*
156 		 * In case we already hold the ww_mutex, simply increase
157 		 * a count and return -ALREADY.
158 		 */
159 		else if (lock->owner == curthread) {
160 			lock->acquired += 1;
161 			err = -EALREADY;
162 			break;
163 		}
164 		/*
165 		 * This is the contention case where the ww_mutex is
166 		 * already held by another context.
167 		 */
168 		else {
169 			/*
170 			 * Three cases:
171 			 *
172 			 * - We are in the slow-path (first lock to obtain).
173                          *
174 			 * - No context was specified. We assume a single
175 			 *   resouce, so there is no danger of a deadlock.
176                          *
177 			 * - An `older` process (`ctx`) tries to acquire a
178 			 *   lock already held by a `younger` process.
179                          *   We put the `older` process to sleep until
180                          *   the `younger` process gives up all it's
181                          *   resources.
182 			 */
183 			if (slow || ctx == NULL || ctx->stamp < lock->ctx->stamp) {
184 				int s = ssleep(lock, &lock->lock,
185 					       intr ? PCATCH : 0,
186 					       ctx ? ctx->ww_class->name : "ww_mutex_lock", 0);
187 				if (intr && (s == EINTR || s == ERESTART)) {
188 					// XXX: Should we handle ERESTART?
189 					err = -EINTR;
190 					break;
191 				}
192 			}
193 			/*
194 			 * If a `younger` process tries to acquire a lock
195 			 * already held by an `older` process, we `wound` it,
196 			 * i.e. we return -EDEADLK because there is a potential
197 			 * risk for a deadlock. The `younger` process then
198 			 * should give up all it's resources and try again to
199 			 * acquire the lock in question, this time in a
200 			 * blocking manner.
201 			 */
202 			else {
203 				err = -EDEADLK;
204 				break;
205 			}
206 		}
207 
208 	} /* for */
209 	spin_unlock(&lock->lock);
210 	return err;
211 }
212 
213 static inline int
214 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) {
215 	return __ww_mutex_lock(lock, ctx, false, false);
216 }
217 
218 static inline void
219 ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) {
220 	(void)__ww_mutex_lock(lock, ctx, true, false);
221 }
222 
223 static inline int
224 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) {
225 	return __ww_mutex_lock(lock, ctx, false, true);
226 }
227 
228 static inline int __must_check
229 ww_mutex_lock_slow_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) {
230 	return __ww_mutex_lock(lock, ctx, true, true);
231 }
232 
233 static inline void
234 ww_mutex_unlock(struct ww_mutex *lock) {
235 	spin_lock(&lock->lock);
236 	KKASSERT(lock->owner == curthread);
237 	KKASSERT(lock->acquired > 0);
238 
239 	--lock->acquired;
240 	if (lock->acquired > 0) {
241 		spin_unlock(&lock->lock);
242 		return;
243 	}
244 
245 	KKASSERT(lock->acquired == 0);
246 	lock->ctx = NULL;
247 	lock->owner = NULL;
248 	spin_unlock(&lock->lock);
249 	wakeup(lock);
250 }
251 
252 static inline void
253 ww_mutex_destroy(struct ww_mutex *lock) {
254 	KKASSERT(lock->acquired == 0);
255 	KKASSERT(lock->ctx == NULL);
256 	KKASSERT(lock->owner == NULL);
257 	spin_uninit(&lock->lock);
258 }
259 
260 #endif	/* _LINUX_WW_MUTEX_H_ */
261