xref: /dragonfly/sys/dev/drm/ttm/ttm_lock.c (revision 0b29ed9d)
1 /**************************************************************************
2  *
3  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 
31 #include <drm/ttm/ttm_lock.h>
32 #include <drm/ttm/ttm_module.h>
33 #include <linux/atomic.h>
34 #include <linux/errno.h>
35 #include <linux/wait.h>
36 #include <linux/sched.h>
37 #include <linux/module.h>
38 
39 #define TTM_WRITE_LOCK_PENDING    (1 << 0)
40 #define TTM_VT_LOCK_PENDING       (1 << 1)
41 #define TTM_SUSPEND_LOCK_PENDING  (1 << 2)
42 #define TTM_VT_LOCK               (1 << 3)
43 #define TTM_SUSPEND_LOCK          (1 << 4)
44 
45 void ttm_lock_init(struct ttm_lock *lock)
46 {
47 	lockinit(&lock->lock, "ttmlk", 0, LK_CANRECURSE);
48 	init_waitqueue_head(&lock->queue);
49 	lock->rw = 0;
50 	lock->flags = 0;
51 	lock->kill_takers = false;
52 	lock->signal = SIGKILL;
53 }
54 EXPORT_SYMBOL(ttm_lock_init);
55 
56 void ttm_read_unlock(struct ttm_lock *lock)
57 {
58 	lockmgr(&lock->lock, LK_EXCLUSIVE);
59 	if (--lock->rw == 0)
60 		wake_up_all(&lock->queue);
61 	lockmgr(&lock->lock, LK_RELEASE);
62 }
63 EXPORT_SYMBOL(ttm_read_unlock);
64 
65 static bool __ttm_read_lock(struct ttm_lock *lock)
66 {
67 	bool locked = false;
68 
69 	lockmgr(&lock->lock, LK_EXCLUSIVE);
70 	if (unlikely(lock->kill_takers)) {
71 		send_sig(lock->signal, curproc, 0);
72 		lockmgr(&lock->lock, LK_RELEASE);
73 		return false;
74 	}
75 	if (lock->rw >= 0 && lock->flags == 0) {
76 		++lock->rw;
77 		locked = true;
78 	}
79 	lockmgr(&lock->lock, LK_RELEASE);
80 	return locked;
81 }
82 
83 int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
84 {
85 	int ret = 0;
86 
87 	if (interruptible)
88 		ret = wait_event_interruptible(lock->queue,
89 					       __ttm_read_lock(lock));
90 	else
91 		wait_event(lock->queue, __ttm_read_lock(lock));
92 	return ret;
93 }
94 EXPORT_SYMBOL(ttm_read_lock);
95 
96 static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
97 {
98 	bool block = true;
99 
100 	*locked = false;
101 
102 	lockmgr(&lock->lock, LK_EXCLUSIVE);
103 	if (unlikely(lock->kill_takers)) {
104 		send_sig(lock->signal, curproc, 0);
105 		lockmgr(&lock->lock, LK_RELEASE);
106 		return false;
107 	}
108 	if (lock->rw >= 0 && lock->flags == 0) {
109 		++lock->rw;
110 		block = false;
111 		*locked = true;
112 	} else if (lock->flags == 0) {
113 		block = false;
114 	}
115 	lockmgr(&lock->lock, LK_RELEASE);
116 
117 	return !block;
118 }
119 
120 int ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
121 {
122 	int ret = 0;
123 	bool locked;
124 
125 	if (interruptible)
126 		ret = wait_event_interruptible
127 			(lock->queue, __ttm_read_trylock(lock, &locked));
128 	else
129 		wait_event(lock->queue, __ttm_read_trylock(lock, &locked));
130 
131 	if (unlikely(ret != 0)) {
132 		BUG_ON(locked);
133 		return ret;
134 	}
135 
136 	return (locked) ? 0 : -EBUSY;
137 }
138 
139 void ttm_write_unlock(struct ttm_lock *lock)
140 {
141 	lockmgr(&lock->lock, LK_EXCLUSIVE);
142 	lock->rw = 0;
143 	wake_up_all(&lock->queue);
144 	lockmgr(&lock->lock, LK_RELEASE);
145 }
146 EXPORT_SYMBOL(ttm_write_unlock);
147 
148 static bool __ttm_write_lock(struct ttm_lock *lock)
149 {
150 	bool locked = false;
151 
152 	lockmgr(&lock->lock, LK_EXCLUSIVE);
153 	if (unlikely(lock->kill_takers)) {
154 		send_sig(lock->signal, curproc, 0);
155 		lockmgr(&lock->lock, LK_RELEASE);
156 		return false;
157 	}
158 	if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
159 		lock->rw = -1;
160 		lock->flags &= ~TTM_WRITE_LOCK_PENDING;
161 		locked = true;
162 	} else {
163 		lock->flags |= TTM_WRITE_LOCK_PENDING;
164 	}
165 	lockmgr(&lock->lock, LK_RELEASE);
166 	return locked;
167 }
168 
169 int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
170 {
171 	int ret = 0;
172 
173 	if (interruptible) {
174 		ret = wait_event_interruptible(lock->queue,
175 					       __ttm_write_lock(lock));
176 		if (unlikely(ret != 0)) {
177 			lockmgr(&lock->lock, LK_EXCLUSIVE);
178 			lock->flags &= ~TTM_WRITE_LOCK_PENDING;
179 			wake_up_all(&lock->queue);
180 			lockmgr(&lock->lock, LK_RELEASE);
181 		}
182 	} else
183 		wait_event(lock->queue, __ttm_read_lock(lock));
184 
185 	return ret;
186 }
187 EXPORT_SYMBOL(ttm_write_lock);
188 
189 void ttm_write_lock_downgrade(struct ttm_lock *lock);
190 void ttm_write_lock_downgrade(struct ttm_lock *lock)
191 {
192 	lockmgr(&lock->lock, LK_EXCLUSIVE);
193 	lock->rw = 1;
194 	wake_up_all(&lock->queue);
195 	lockmgr(&lock->lock, LK_RELEASE);
196 }
197 
198 static int __ttm_vt_unlock(struct ttm_lock *lock)
199 {
200 	int ret = 0;
201 
202 	lockmgr(&lock->lock, LK_EXCLUSIVE);
203 	if (unlikely(!(lock->flags & TTM_VT_LOCK)))
204 		ret = -EINVAL;
205 	lock->flags &= ~TTM_VT_LOCK;
206 	wake_up_all(&lock->queue);
207 	lockmgr(&lock->lock, LK_RELEASE);
208 
209 	return ret;
210 }
211 
212 static void ttm_vt_lock_remove(struct ttm_base_object **p_base)
213 {
214 	struct ttm_base_object *base = *p_base;
215 	struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
216 	int ret;
217 
218 	*p_base = NULL;
219 	ret = __ttm_vt_unlock(lock);
220 	BUG_ON(ret != 0);
221 }
222 
223 static bool __ttm_vt_lock(struct ttm_lock *lock)
224 {
225 	bool locked = false;
226 
227 	lockmgr(&lock->lock, LK_EXCLUSIVE);
228 	if (lock->rw == 0) {
229 		lock->flags &= ~TTM_VT_LOCK_PENDING;
230 		lock->flags |= TTM_VT_LOCK;
231 		locked = true;
232 	} else {
233 		lock->flags |= TTM_VT_LOCK_PENDING;
234 	}
235 	lockmgr(&lock->lock, LK_RELEASE);
236 	return locked;
237 }
238 
239 int ttm_vt_lock(struct ttm_lock *lock,
240 		bool interruptible,
241 		struct ttm_object_file *tfile)
242 {
243 	int ret = 0;
244 
245 	if (interruptible) {
246 		ret = wait_event_interruptible(lock->queue,
247 					       __ttm_vt_lock(lock));
248 		if (unlikely(ret != 0)) {
249 			lockmgr(&lock->lock, LK_EXCLUSIVE);
250 			lock->flags &= ~TTM_VT_LOCK_PENDING;
251 			wake_up_all(&lock->queue);
252 			lockmgr(&lock->lock, LK_RELEASE);
253 			return ret;
254 		}
255 	} else
256 		wait_event(lock->queue, __ttm_vt_lock(lock));
257 
258 	/*
259 	 * Add a base-object, the destructor of which will
260 	 * make sure the lock is released if the client dies
261 	 * while holding it.
262 	 */
263 
264 	ret = ttm_base_object_init(tfile, &lock->base, false,
265 				   ttm_lock_type, &ttm_vt_lock_remove, NULL);
266 	if (ret)
267 		(void)__ttm_vt_unlock(lock);
268 	else
269 		lock->vt_holder = tfile;
270 
271 	return ret;
272 }
273 EXPORT_SYMBOL(ttm_vt_lock);
274 
275 int ttm_vt_unlock(struct ttm_lock *lock)
276 {
277 	return ttm_ref_object_base_unref(lock->vt_holder,
278 					 lock->base.hash.key, TTM_REF_USAGE);
279 }
280 EXPORT_SYMBOL(ttm_vt_unlock);
281 
282 void ttm_suspend_unlock(struct ttm_lock *lock)
283 {
284 	lockmgr(&lock->lock, LK_EXCLUSIVE);
285 	lock->flags &= ~TTM_SUSPEND_LOCK;
286 	wake_up_all(&lock->queue);
287 	lockmgr(&lock->lock, LK_RELEASE);
288 }
289 EXPORT_SYMBOL(ttm_suspend_unlock);
290 
291 static bool __ttm_suspend_lock(struct ttm_lock *lock)
292 {
293 	bool locked = false;
294 
295 	lockmgr(&lock->lock, LK_EXCLUSIVE);
296 	if (lock->rw == 0) {
297 		lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
298 		lock->flags |= TTM_SUSPEND_LOCK;
299 		locked = true;
300 	} else {
301 		lock->flags |= TTM_SUSPEND_LOCK_PENDING;
302 	}
303 	lockmgr(&lock->lock, LK_RELEASE);
304 	return locked;
305 }
306 
307 void ttm_suspend_lock(struct ttm_lock *lock)
308 {
309 	wait_event(lock->queue, __ttm_suspend_lock(lock));
310 }
311 EXPORT_SYMBOL(ttm_suspend_lock);
312