xref: /openbsd/sys/dev/pci/drm/include/linux/ww_mutex.h (revision 905646f0)
1 /*
2  * Copyright (c) 2015 Michael Neumann <mneumann@ntecs.de>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #ifndef _LINUX_WW_MUTEX_H_
28 #define _LINUX_WW_MUTEX_H_
29 
30 /*
31  * A basic, unoptimized implementation of wound/wait mutexes for DragonFly
32  * modelled after the Linux API [1].
33  *
34  * [1]: http://lxr.free-electrons.com/source/include/linux/ww_mutex.h
35  */
36 
37 #include <sys/types.h>
38 #include <sys/systm.h>
39 #include <sys/param.h>
40 #include <sys/mutex.h>
41 #include <machine/intr.h>
42 #include <linux/compiler.h>
43 #include <linux/mutex.h>
44 
45 struct ww_class {
46 	volatile u_long			stamp;
47 	const char			*name;
48 };
49 
50 struct ww_acquire_ctx {
51 	u_long				stamp;
52 	struct ww_class			*ww_class;
53 };
54 
55 struct ww_mutex {
56 	struct mutex			base;
57 	volatile int			acquired;
58 	struct ww_acquire_ctx		*ctx;
59 	volatile struct proc		*owner;
60 };
61 
62 #define DEFINE_WW_CLASS(classname)	\
63 	struct ww_class classname = {	\
64 		.stamp = 0,		\
65 		.name = #classname	\
66 	}
67 
68 #define DEFINE_WD_CLASS(classname)	\
69 	struct ww_class classname = {	\
70 		.stamp = 0,		\
71 		.name = #classname	\
72 	}
73 
74 static inline void
75 ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *ww_class) {
76 	ctx->stamp = __sync_fetch_and_add(&ww_class->stamp, 1);
77 	ctx->ww_class = ww_class;
78 }
79 
80 static inline void
81 ww_acquire_done(__unused struct ww_acquire_ctx *ctx) {
82 }
83 
84 static inline void
85 ww_acquire_fini(__unused struct ww_acquire_ctx *ctx) {
86 }
87 
88 static inline void
89 ww_mutex_init(struct ww_mutex *lock, struct ww_class *ww_class) {
90 	mtx_init(&lock->base, IPL_NONE);
91 	lock->acquired = 0;
92 	lock->ctx = NULL;
93 	lock->owner = NULL;
94 }
95 
96 static inline bool
97 ww_mutex_is_locked(struct ww_mutex *lock) {
98 	bool res = false;
99 	mtx_enter(&lock->base);
100 	if (lock->acquired > 0) res = true;
101 	mtx_leave(&lock->base);
102 	return res;
103 }
104 
105 /*
106  * Return 1 if lock could be acquired, else 0 (contended).
107  */
108 static inline int
109 ww_mutex_trylock(struct ww_mutex *lock) {
110 	int res = 0;
111 
112 	mtx_enter(&lock->base);
113 	/*
114 	 * In case no one holds the ww_mutex yet, we acquire it.
115 	 */
116 	if (lock->acquired == 0) {
117 		KASSERT(lock->ctx == NULL);
118 		lock->acquired = 1;
119 		lock->owner = curproc;
120 		res = 1;
121 	}
122 	mtx_leave(&lock->base);
123 	return res;
124 }
125 
126 /*
127  * When `slow` is `true`, it will always block if the ww_mutex is contended.
128  * It is assumed that the called will not hold any (ww_mutex) resources when
129  * calling the slow path as this could lead to deadlocks.
130  *
131  * When `intr` is `true`, the ssleep will be interruptable.
132  */
133 static inline int
134 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx, bool slow, bool intr) {
135 	int err;
136 
137 	mtx_enter(&lock->base);
138 	for (;;) {
139 		/*
140 		 * In case no one holds the ww_mutex yet, we acquire it.
141 		 */
142 		if (lock->acquired == 0) {
143 			KASSERT(lock->ctx == NULL);
144 			lock->acquired = 1;
145 			lock->ctx = ctx;
146 			lock->owner = curproc;
147 			err = 0;
148 			break;
149 		}
150 		/*
151 		 * In case we already hold the return -EALREADY.
152 		 */
153 		else if (lock->owner == curproc) {
154 			err = -EALREADY;
155 			break;
156 		}
157 		/*
158 		 * This is the contention case where the ww_mutex is
159 		 * already held by another context.
160 		 */
161 		else {
162 			/*
163 			 * Three cases:
164 			 *
165 			 * - We are in the slow-path (first lock to obtain).
166                          *
167 			 * - No context was specified. We assume a single
168 			 *   resouce, so there is no danger of a deadlock.
169                          *
170 			 * - An `older` process (`ctx`) tries to acquire a
171 			 *   lock already held by a `younger` process.
172                          *   We put the `older` process to sleep until
173                          *   the `younger` process gives up all it's
174                          *   resources.
175 			 */
176 			if (slow || ctx == NULL ||
177 			    (lock->ctx && ctx->stamp < lock->ctx->stamp)) {
178 				KASSERT(!cold);
179 				int s = msleep_nsec(lock, &lock->base,
180 				    intr ? PCATCH : 0,
181 				    ctx ? ctx->ww_class->name : "ww_mutex_lock",
182 				    INFSLP);
183 				if (intr && (s == EINTR || s == ERESTART)) {
184 					// XXX: Should we handle ERESTART?
185 					err = -EINTR;
186 					break;
187 				}
188 			}
189 			/*
190 			 * If a `younger` process tries to acquire a lock
191 			 * already held by an `older` process, we `wound` it,
192 			 * i.e. we return -EDEADLK because there is a potential
193 			 * risk for a deadlock. The `younger` process then
194 			 * should give up all it's resources and try again to
195 			 * acquire the lock in question, this time in a
196 			 * blocking manner.
197 			 */
198 			else {
199 				err = -EDEADLK;
200 				break;
201 			}
202 		}
203 
204 	} /* for */
205 	mtx_leave(&lock->base);
206 	return err;
207 }
208 
209 static inline int
210 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) {
211 	return __ww_mutex_lock(lock, ctx, false, false);
212 }
213 
214 static inline void
215 ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) {
216 	(void)__ww_mutex_lock(lock, ctx, true, false);
217 }
218 
219 static inline int
220 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) {
221 	return __ww_mutex_lock(lock, ctx, false, true);
222 }
223 
224 static inline int __must_check
225 ww_mutex_lock_slow_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) {
226 	return __ww_mutex_lock(lock, ctx, true, true);
227 }
228 
229 static inline void
230 ww_mutex_unlock(struct ww_mutex *lock) {
231 	mtx_enter(&lock->base);
232 	KASSERT(lock->owner == curproc);
233 	KASSERT(lock->acquired == 1);
234 
235 	lock->acquired = 0;
236 	lock->ctx = NULL;
237 	lock->owner = NULL;
238 	mtx_leave(&lock->base);
239 	wakeup(lock);
240 }
241 
242 static inline void
243 ww_mutex_destroy(struct ww_mutex *lock) {
244 	KASSERT(lock->acquired == 0);
245 	KASSERT(lock->ctx == NULL);
246 	KASSERT(lock->owner == NULL);
247 }
248 
249 #endif	/* _LINUX_WW_MUTEX_H_ */
250