xref: /dragonfly/sys/dev/drm/ttm/ttm_lock.c (revision 7d3e9a5b)
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31 
32 #include <drm/ttm/ttm_lock.h>
33 #include <drm/ttm/ttm_module.h>
34 #include <linux/atomic.h>
35 #include <linux/errno.h>
36 #include <linux/wait.h>
37 #include <linux/sched/signal.h>
38 #include <linux/module.h>
39 
40 #define TTM_WRITE_LOCK_PENDING    (1 << 0)
41 #define TTM_VT_LOCK_PENDING       (1 << 1)
42 #define TTM_SUSPEND_LOCK_PENDING  (1 << 2)
43 #define TTM_VT_LOCK               (1 << 3)
44 #define TTM_SUSPEND_LOCK          (1 << 4)
45 
46 void ttm_lock_init(struct ttm_lock *lock)
47 {
48 	spin_init(&lock->lock, "ttmll");
49 	init_waitqueue_head(&lock->queue);
50 	lock->rw = 0;
51 	lock->flags = 0;
52 	lock->kill_takers = false;
53 	lock->signal = SIGKILL;
54 }
55 EXPORT_SYMBOL(ttm_lock_init);
56 
57 void ttm_read_unlock(struct ttm_lock *lock)
58 {
59 	spin_lock(&lock->lock);
60 	if (--lock->rw == 0)
61 		wake_up_all(&lock->queue);
62 	spin_unlock(&lock->lock);
63 }
64 EXPORT_SYMBOL(ttm_read_unlock);
65 
66 static bool __ttm_read_lock(struct ttm_lock *lock)
67 {
68 	bool locked = false;
69 
70 	spin_lock(&lock->lock);
71 	if (unlikely(lock->kill_takers)) {
72 		send_sig(lock->signal, current->dfly_td->td_proc, 0);
73 		spin_unlock(&lock->lock);
74 		return false;
75 	}
76 	if (lock->rw >= 0 && lock->flags == 0) {
77 		++lock->rw;
78 		locked = true;
79 	}
80 	spin_unlock(&lock->lock);
81 	return locked;
82 }
83 
84 int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
85 {
86 	int ret = 0;
87 
88 	if (interruptible) {
89 		kprintf("#2#begin#: ttm_read_lock.wait_event_interruptible\n");
90 		ret = wait_event_interruptible(lock->queue,
91 					       __ttm_read_lock(lock));
92 		kprintf("#2#end#: ttm_read_lock.wait_event_interruptible\n");
93 	}
94 	else {
95 		kprintf("#1#begin#: ttm_read_lock.wait_event\n");
96 		wait_event(lock->queue, __ttm_read_lock(lock));
97 		kprintf("#1#end#: ttm_read_lock.wait_event\n");
98 	}
99 	return ret;
100 }
101 EXPORT_SYMBOL(ttm_read_lock);
102 
103 static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
104 {
105 	bool block = true;
106 
107 	*locked = false;
108 
109 	spin_lock(&lock->lock);
110 	if (unlikely(lock->kill_takers)) {
111 		send_sig(lock->signal, current->dfly_td->td_proc, 0);
112 		spin_unlock(&lock->lock);
113 		return false;
114 	}
115 	if (lock->rw >= 0 && lock->flags == 0) {
116 		++lock->rw;
117 		block = false;
118 		*locked = true;
119 	} else if (lock->flags == 0) {
120 		block = false;
121 	}
122 	spin_unlock(&lock->lock);
123 
124 	return !block;
125 }
126 
127 int ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
128 {
129 	int ret = 0;
130 	bool locked;
131 
132 	if (interruptible) {
133 		kprintf("#4#begin#: ttm_read_trylock.wait_event_interruptible\n");
134 		ret = wait_event_interruptible
135 			(lock->queue, __ttm_read_trylock(lock, &locked));
136 		kprintf("#4#end#: ttm_read_trylock.wait_event_interruptible\n");
137 	}
138 	else {
139 		kprintf("#3#begin#: ttm_read_trylock.wait_event\n");
140 		wait_event(lock->queue, __ttm_read_trylock(lock, &locked));
141 		kprintf("#3#end#: ttm_read_trylock.wait_event\n");
142 	}
143 
144 	if (unlikely(ret != 0)) {
145 		BUG_ON(locked);
146 		return ret;
147 	}
148 
149 	return (locked) ? 0 : -EBUSY;
150 }
151 
152 void ttm_write_unlock(struct ttm_lock *lock)
153 {
154 	spin_lock(&lock->lock);
155 	lock->rw = 0;
156 	wake_up_all(&lock->queue);
157 	spin_unlock(&lock->lock);
158 }
159 EXPORT_SYMBOL(ttm_write_unlock);
160 
161 static bool __ttm_write_lock(struct ttm_lock *lock)
162 {
163 	bool locked = false;
164 
165 	spin_lock(&lock->lock);
166 	if (unlikely(lock->kill_takers)) {
167 		send_sig(lock->signal, current->dfly_td->td_proc, 0);
168 		spin_unlock(&lock->lock);
169 		return false;
170 	}
171 	if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
172 		lock->rw = -1;
173 		lock->flags &= ~TTM_WRITE_LOCK_PENDING;
174 		locked = true;
175 	} else {
176 		lock->flags |= TTM_WRITE_LOCK_PENDING;
177 	}
178 	spin_unlock(&lock->lock);
179 	return locked;
180 }
181 
182 int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
183 {
184 	int ret = 0;
185 
186 	if (interruptible) {
187 		kprintf("#6#begin#: ttm_write_lock.wait_event_interruptible\n");
188 		ret = wait_event_interruptible(lock->queue,
189 					       __ttm_write_lock(lock));
190 		kprintf("#6#end#: ttm_write_lock.wait_event_interruptible\n");
191 		if (unlikely(ret != 0)) {
192 			spin_lock(&lock->lock);
193 			lock->flags &= ~TTM_WRITE_LOCK_PENDING;
194 			wake_up_all(&lock->queue);
195 			spin_unlock(&lock->lock);
196 		}
197 	} else {
198 		kprintf("#5#begin#: ttm_write_lock.wait_event\n");
199 		wait_event(lock->queue, __ttm_write_lock(lock));
200 		kprintf("#5#end#: ttm_write_lock.wait_event\n");
201 	}
202 
203 	return ret;
204 }
205 EXPORT_SYMBOL(ttm_write_lock);
206 
207 static int __ttm_vt_unlock(struct ttm_lock *lock)
208 {
209 	int ret = 0;
210 
211 	spin_lock(&lock->lock);
212 	if (unlikely(!(lock->flags & TTM_VT_LOCK)))
213 		ret = -EINVAL;
214 	lock->flags &= ~TTM_VT_LOCK;
215 	wake_up_all(&lock->queue);
216 	spin_unlock(&lock->lock);
217 
218 	return ret;
219 }
220 
221 static void ttm_vt_lock_remove(struct ttm_base_object **p_base)
222 {
223 	struct ttm_base_object *base = *p_base;
224 	struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
225 	int ret;
226 
227 	*p_base = NULL;
228 	ret = __ttm_vt_unlock(lock);
229 	BUG_ON(ret != 0);
230 }
231 
232 static bool __ttm_vt_lock(struct ttm_lock *lock)
233 {
234 	bool locked = false;
235 
236 	spin_lock(&lock->lock);
237 	if (lock->rw == 0) {
238 		lock->flags &= ~TTM_VT_LOCK_PENDING;
239 		lock->flags |= TTM_VT_LOCK;
240 		locked = true;
241 	} else {
242 		lock->flags |= TTM_VT_LOCK_PENDING;
243 	}
244 	spin_unlock(&lock->lock);
245 	return locked;
246 }
247 
248 int ttm_vt_lock(struct ttm_lock *lock,
249 		bool interruptible,
250 		struct ttm_object_file *tfile)
251 {
252 	int ret = 0;
253 
254 	if (interruptible) {
255 		kprintf("#8#begin#: ttm_vt_lock.wait_event_interruptible\n");
256 		ret = wait_event_interruptible(lock->queue,
257 					       __ttm_vt_lock(lock));
258 		kprintf("#8#end#: ttm_vt_lock.wait_event_interruptible\n");
259 		if (unlikely(ret != 0)) {
260 			spin_lock(&lock->lock);
261 			lock->flags &= ~TTM_VT_LOCK_PENDING;
262 			wake_up_all(&lock->queue);
263 			spin_unlock(&lock->lock);
264 			return ret;
265 		}
266 	} else {
267 		kprintf("#7#begin#: ttm_vt_lock.wait_event\n");
268 		wait_event(lock->queue, __ttm_vt_lock(lock));
269 		kprintf("#7#end#: ttm_vt_lock.wait_event\n");
270 	}
271 
272 	/*
273 	 * Add a base-object, the destructor of which will
274 	 * make sure the lock is released if the client dies
275 	 * while holding it.
276 	 */
277 
278 	ret = ttm_base_object_init(tfile, &lock->base, false,
279 				   ttm_lock_type, &ttm_vt_lock_remove, NULL);
280 	if (ret)
281 		(void)__ttm_vt_unlock(lock);
282 	else
283 		lock->vt_holder = tfile;
284 
285 	return ret;
286 }
287 EXPORT_SYMBOL(ttm_vt_lock);
288 
289 int ttm_vt_unlock(struct ttm_lock *lock)
290 {
291 	return ttm_ref_object_base_unref(lock->vt_holder,
292 					 lock->base.hash.key, TTM_REF_USAGE);
293 }
294 EXPORT_SYMBOL(ttm_vt_unlock);
295 
296 void ttm_suspend_unlock(struct ttm_lock *lock)
297 {
298 	spin_lock(&lock->lock);
299 	lock->flags &= ~TTM_SUSPEND_LOCK;
300 	wake_up_all(&lock->queue);
301 	spin_unlock(&lock->lock);
302 }
303 EXPORT_SYMBOL(ttm_suspend_unlock);
304 
305 static bool __ttm_suspend_lock(struct ttm_lock *lock)
306 {
307 	bool locked = false;
308 
309 	spin_lock(&lock->lock);
310 	if (lock->rw == 0) {
311 		lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
312 		lock->flags |= TTM_SUSPEND_LOCK;
313 		locked = true;
314 	} else {
315 		lock->flags |= TTM_SUSPEND_LOCK_PENDING;
316 	}
317 	spin_unlock(&lock->lock);
318 	return locked;
319 }
320 
321 void ttm_suspend_lock(struct ttm_lock *lock)
322 {
323 	kprintf("#9#begin#: ttm_suspend_lock.wait_event\n");
324 	wait_event(lock->queue, __ttm_suspend_lock(lock));
325 	kprintf("#9#end#: ttm_suspend_lock.wait_event\n");
326 }
327 EXPORT_SYMBOL(ttm_suspend_lock);
328