xref: /dragonfly/sys/dev/drm/include/linux/ww_mutex.h (revision d9f85b33)
1 /*
2  * Copyright (c) 2015 Michael Neumann <mneumann@ntecs.de>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #ifndef _LINUX_WW_MUTEX_H_
28 #define _LINUX_WW_MUTEX_H_
29 
30 /*
31  * A basic, unoptimized implementation of wound/wait mutexes for DragonFly
32  * modelled after the Linux API [1].
33  *
34  * [1]: http://lxr.free-electrons.com/source/include/linux/ww_mutex.h
35  */
36 
37 #include <sys/errno.h>
38 #include <sys/types.h>
39 #include <machine/atomic.h>
40 #include <sys/spinlock.h>
41 #include <sys/spinlock2.h>
42 
43 struct ww_class {
44 	volatile u_long			stamp;
45 	const char			*name;
46 };
47 
48 struct ww_acquire_ctx {
49 	u_long				stamp;
50 	struct ww_class			*ww_class;
51 };
52 
53 struct ww_mutex {
54 	struct spinlock			lock;
55 	volatile int			acquired;
56 	volatile struct ww_acquire_ctx	*ctx;
57 	volatile struct thread		*owner;
58 };
59 
60 #define DEFINE_WW_CLASS(classname)	\
61 	struct ww_class classname = {	\
62 		.stamp = 0,		\
63 		.name = #classname	\
64 	}
65 
66 static inline void
67 ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *ww_class) {
68 	ctx->stamp = atomic_fetchadd_long(&ww_class->stamp, 1);
69 	ctx->ww_class = ww_class;
70 }
71 
72 static inline void
73 ww_acquire_done(__unused struct ww_acquire_ctx *ctx) {
74 }
75 
76 static inline void
77 ww_acquire_fini(__unused struct ww_acquire_ctx *ctx) {
78 }
79 
80 static inline void
81 ww_mutex_init(struct ww_mutex *lock, struct ww_class *ww_class) {
82 	spin_init(&lock->lock, ww_class->name);
83 	lock->acquired = 0;
84 	lock->ctx = NULL;
85 	lock->owner = NULL;
86 }
87 
88 static inline bool
89 ww_mutex_is_locked(struct ww_mutex *lock) {
90 	bool res = false;
91 	spin_lock(&lock->lock);
92 	if (lock->acquired > 0) res = true;
93 	spin_unlock(&lock->lock);
94 	return res;
95 }
96 
97 /*
98  * Return 1 if lock could be acquired, else 0 (contended).
99  */
100 static inline int
101 ww_mutex_trylock(struct ww_mutex *lock) {
102 	int res = 1;
103 	KKASSERT(curthread);
104 
105 	spin_lock(&lock->lock);
106 	/*
107 	 * In case no one holds the ww_mutex yet, we acquire it.
108 	 */
109 	if (lock->acquired == 0) {
110 		KKASSERT(lock->ctx == NULL);
111 		lock->acquired += 1;
112 		lock->owner = curthread;
113 	}
114 	/*
115 	 * In case we already hold the ww_mutex, increase a count.
116 	 */
117 	else if (lock->owner == curthread) {
118 		lock->acquired += 1;
119 	}
120 	else {
121 		res = 0;
122 	}
123 	spin_unlock(&lock->lock);
124 	return res;
125 }
126 
127 /*
128  * When `slow` is `true`, it will always block if the ww_mutex is contended.
129  * It is assumed that the called will not hold any (ww_mutex) resources when
130  * calling the slow path as this could lead to deadlocks.
131  *
132  * When `intr` is `true`, the ssleep will be interruptable.
133  */
134 static inline int
135 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx, bool slow, bool intr) {
136 	int err;
137 
138 	KKASSERT(curthread);
139 
140 	spin_lock(&lock->lock);
141 	for (;;) {
142 		/*
143 		 * In case no one holds the ww_mutex yet, we acquire it.
144 		 */
145 		if (lock->acquired == 0) {
146 			KKASSERT(lock->ctx == NULL);
147 			lock->acquired += 1;
148 			lock->ctx = ctx;
149 			lock->owner = curthread;
150 			err = 0;
151 			break;
152 		}
153 		/*
154 		 * In case we already hold the ww_mutex, simply increase
155 		 * a count and return -ALREADY.
156 		 */
157 		else if (lock->owner == curthread) {
158 			lock->acquired += 1;
159 			err = -EALREADY;
160 			break;
161 		}
162 		/*
163 		 * This is the contention case where the ww_mutex is
164 		 * already held by another context.
165 		 */
166 		else {
167 			/*
168 			 * Three cases:
169 			 *
170 			 * - We are in the slow-path (first lock to obtain).
171                          *
172 			 * - No context was specified. We assume a single
173 			 *   resouce, so there is no danger of a deadlock.
174                          *
175 			 * - An `older` process (`ctx`) tries to acquire a
176 			 *   lock already held by a `younger` process.
177                          *   We put the `older` process to sleep until
178                          *   the `younger` process gives up all it's
179                          *   resources.
180 			 */
181 			if (slow || ctx == NULL || ctx->stamp < lock->ctx->stamp) {
182 				int s = ssleep(lock, &lock->lock,
183 					       intr ? PCATCH : 0,
184 					       ctx ? ctx->ww_class->name : "ww_mutex_lock", 0);
185 				if (intr && (s == EINTR || s == ERESTART)) {
186 					// XXX: Should we handle ERESTART?
187 					err = -EINTR;
188 					break;
189 				}
190 			}
191 			/*
192 			 * If a `younger` process tries to acquire a lock
193 			 * already held by an `older` process, we `wound` it,
194 			 * i.e. we return -EDEADLK because there is a potential
195 			 * risk for a deadlock. The `younger` process then
196 			 * should give up all it's resources and try again to
197 			 * acquire the lock in question, this time in a
198 			 * blocking manner.
199 			 */
200 			else {
201 				err = -EDEADLK;
202 				break;
203 			}
204 		}
205 
206 	} /* for */
207 	spin_unlock(&lock->lock);
208 	return err;
209 }
210 
211 static inline int
212 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) {
213 	return __ww_mutex_lock(lock, ctx, false, false);
214 }
215 
216 static inline void
217 ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) {
218 	(void)__ww_mutex_lock(lock, ctx, true, false);
219 }
220 
221 static inline int
222 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) {
223 	return __ww_mutex_lock(lock, ctx, false, true);
224 }
225 
226 static inline int __must_check
227 ww_mutex_lock_slow_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) {
228 	return __ww_mutex_lock(lock, ctx, true, true);
229 }
230 
231 static inline void
232 ww_mutex_unlock(struct ww_mutex *lock) {
233 	spin_lock(&lock->lock);
234 	KKASSERT(lock->owner == curthread);
235 	KKASSERT(lock->acquired > 0);
236 
237 	--lock->acquired;
238 	if (lock->acquired > 0) {
239 		spin_unlock(&lock->lock);
240 		return;
241 	}
242 
243 	KKASSERT(lock->acquired == 0);
244 	lock->ctx = NULL;
245 	lock->owner = NULL;
246 	spin_unlock(&lock->lock);
247 	wakeup(lock);
248 }
249 
250 static inline void
251 ww_mutex_destroy(struct ww_mutex *lock) {
252 	KKASSERT(lock->acquired == 0);
253 	KKASSERT(lock->ctx == NULL);
254 	KKASSERT(lock->owner == NULL);
255 	spin_uninit(&lock->lock);
256 }
257 
258 #endif	/* _LINUX_WW_MUTEX_H_ */
259