1 /*
2 * Copyright (C) 2014 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <drm/drmP.h>
25 #include <drm/drm_crtc.h>
26 #include <drm/drm_modeset_lock.h>
27
28 /**
29 * DOC: kms locking
30 *
31 * As KMS moves toward more fine grained locking, and atomic ioctl where
32 * userspace can indirectly control locking order, it becomes necessary
33 * to use &ww_mutex and acquire-contexts to avoid deadlocks. But because
34 * the locking is more distributed around the driver code, we want a bit
35 * of extra utility/tracking out of our acquire-ctx. This is provided
36 * by &struct drm_modeset_lock and &struct drm_modeset_acquire_ctx.
37 *
38 * For basic principles of &ww_mutex, see: Documentation/locking/ww-mutex-design.txt
39 *
40 * The basic usage pattern is to::
41 *
42 * drm_modeset_acquire_init(ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE)
43 * retry:
44 * foreach (lock in random_ordered_set_of_locks) {
45 * ret = drm_modeset_lock(lock, ctx)
46 * if (ret == -EDEADLK) {
47 * ret = drm_modeset_backoff(ctx);
48 * if (!ret)
49 * goto retry;
50 * }
51 * if (ret)
52 * goto out;
53 * }
54 * ... do stuff ...
55 * out:
56 * drm_modeset_drop_locks(ctx);
57 * drm_modeset_acquire_fini(ctx);
58 *
59 * If all that is needed is a single modeset lock, then the &struct
60 * drm_modeset_acquire_ctx is not needed and the locking can be simplified
61 * by passing a NULL instead of ctx in the drm_modeset_lock() call or
62 * calling drm_modeset_lock_single_interruptible(). To unlock afterwards
63 * call drm_modeset_unlock().
64 *
65 * On top of these per-object locks using &ww_mutex there's also an overall
66 * &drm_mode_config.mutex, for protecting everything else. Mostly this means
67 * probe state of connectors, and preventing hotplug add/removal of connectors.
68 *
69 * Finally there's a bunch of dedicated locks to protect drm core internal
70 * lists and lookup data structures.
71 */
72
73 static DEFINE_WW_CLASS(crtc_ww_class);
74
75 /**
76 * drm_modeset_lock_all - take all modeset locks
77 * @dev: DRM device
78 *
79 * This function takes all modeset locks, suitable where a more fine-grained
80 * scheme isn't (yet) implemented. Locks must be dropped by calling the
81 * drm_modeset_unlock_all() function.
82 *
83 * This function is deprecated. It allocates a lock acquisition context and
84 * stores it in &drm_device.mode_config. This facilitate conversion of
85 * existing code because it removes the need to manually deal with the
86 * acquisition context, but it is also brittle because the context is global
87 * and care must be taken not to nest calls. New code should use the
88 * drm_modeset_lock_all_ctx() function and pass in the context explicitly.
89 */
drm_modeset_lock_all(struct drm_device * dev)90 void drm_modeset_lock_all(struct drm_device *dev)
91 {
92 struct drm_mode_config *config = &dev->mode_config;
93 struct drm_modeset_acquire_ctx *ctx;
94 int ret;
95
96 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL | __GFP_NOFAIL);
97 if (WARN_ON(!ctx))
98 return;
99
100 mutex_lock(&config->mutex);
101
102 drm_modeset_acquire_init(ctx, 0);
103
104 retry:
105 ret = drm_modeset_lock_all_ctx(dev, ctx);
106 if (ret < 0) {
107 if (ret == -EDEADLK) {
108 drm_modeset_backoff(ctx);
109 goto retry;
110 }
111
112 drm_modeset_acquire_fini(ctx);
113 kfree(ctx);
114 return;
115 }
116
117 WARN_ON(config->acquire_ctx);
118
119 /*
120 * We hold the locks now, so it is safe to stash the acquisition
121 * context for drm_modeset_unlock_all().
122 */
123 config->acquire_ctx = ctx;
124
125 drm_warn_on_modeset_not_all_locked(dev);
126 }
127 EXPORT_SYMBOL(drm_modeset_lock_all);
128
129 /**
130 * drm_modeset_unlock_all - drop all modeset locks
131 * @dev: DRM device
132 *
133 * This function drops all modeset locks taken by a previous call to the
134 * drm_modeset_lock_all() function.
135 *
136 * This function is deprecated. It uses the lock acquisition context stored
137 * in &drm_device.mode_config. This facilitates conversion of existing
138 * code because it removes the need to manually deal with the acquisition
139 * context, but it is also brittle because the context is global and care must
140 * be taken not to nest calls. New code should pass the acquisition context
141 * directly to the drm_modeset_drop_locks() function.
142 */
drm_modeset_unlock_all(struct drm_device * dev)143 void drm_modeset_unlock_all(struct drm_device *dev)
144 {
145 struct drm_mode_config *config = &dev->mode_config;
146 struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
147
148 if (WARN_ON(!ctx))
149 return;
150
151 config->acquire_ctx = NULL;
152 drm_modeset_drop_locks(ctx);
153 drm_modeset_acquire_fini(ctx);
154
155 kfree(ctx);
156
157 mutex_unlock(&dev->mode_config.mutex);
158 }
159 EXPORT_SYMBOL(drm_modeset_unlock_all);
160
161 /**
162 * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
163 * @dev: device
164 *
165 * Useful as a debug assert.
166 */
drm_warn_on_modeset_not_all_locked(struct drm_device * dev)167 void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
168 {
169 struct drm_crtc *crtc;
170
171 /* Locking is currently fubar in the panic handler. */
172 #ifdef __DragonFly__
173 if (panicstr)
174 return;
175 #else
176 if (oops_in_progress)
177 return;
178 #endif
179
180 drm_for_each_crtc(crtc, dev)
181 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
182
183 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
184 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
185 }
186 EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
187
188 /**
189 * drm_modeset_acquire_init - initialize acquire context
190 * @ctx: the acquire context
191 * @flags: 0 or %DRM_MODESET_ACQUIRE_INTERRUPTIBLE
192 *
193 * When passing %DRM_MODESET_ACQUIRE_INTERRUPTIBLE to @flags,
194 * all calls to drm_modeset_lock() will perform an interruptible
195 * wait.
196 */
drm_modeset_acquire_init(struct drm_modeset_acquire_ctx * ctx,uint32_t flags)197 void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
198 uint32_t flags)
199 {
200 memset(ctx, 0, sizeof(*ctx));
201 ww_acquire_init(&ctx->ww_ctx, &crtc_ww_class);
202 INIT_LIST_HEAD(&ctx->locked);
203
204 if (flags & DRM_MODESET_ACQUIRE_INTERRUPTIBLE)
205 ctx->interruptible = true;
206 }
207 EXPORT_SYMBOL(drm_modeset_acquire_init);
208
209 /**
210 * drm_modeset_acquire_fini - cleanup acquire context
211 * @ctx: the acquire context
212 */
drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx * ctx)213 void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx)
214 {
215 ww_acquire_fini(&ctx->ww_ctx);
216 }
217 EXPORT_SYMBOL(drm_modeset_acquire_fini);
218
219 /**
220 * drm_modeset_drop_locks - drop all locks
221 * @ctx: the acquire context
222 *
223 * Drop all locks currently held against this acquire context.
224 */
drm_modeset_drop_locks(struct drm_modeset_acquire_ctx * ctx)225 void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx)
226 {
227 WARN_ON(ctx->contended);
228 while (!list_empty(&ctx->locked)) {
229 struct drm_modeset_lock_info *info;
230
231 info = list_first_entry(&ctx->locked,
232 struct drm_modeset_lock_info, ctx_entry);
233
234 drm_modeset_unlock(info->lock);
235 }
236 }
237 EXPORT_SYMBOL(drm_modeset_drop_locks);
238
modeset_lock(struct drm_modeset_lock * lock,struct drm_modeset_acquire_ctx * ctx,bool interruptible,bool slow)239 static inline int modeset_lock(struct drm_modeset_lock *lock,
240 struct drm_modeset_acquire_ctx *ctx,
241 bool interruptible, bool slow)
242 {
243 int ret;
244
245 WARN_ON(ctx->contended);
246
247 if (ctx->trylock_only) {
248 #if 0
249 lockdep_assert_held(&ctx->ww_ctx);
250 #endif
251
252 if (!ww_mutex_trylock(&lock->mutex))
253 return -EBUSY;
254 else
255 return 0;
256 } else if (interruptible && slow) {
257 ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx);
258 } else if (interruptible) {
259 ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx);
260 } else if (slow) {
261 ret = ww_mutex_lock_slow(&lock->mutex, &ctx->ww_ctx);
262 if (ret)
263 kprintf("DRM: Warning: modeset_lock SLOW failed %d\n", ret);
264 // ret = 0;
265 } else {
266 ret = ww_mutex_lock(&lock->mutex, &ctx->ww_ctx);
267 }
268 if (ret == -EALREADY) {
269 /* we already hold the lock.. this is fine. For atomic
270 * we will need to be able to drm_modeset_lock() things
271 * without having to keep track of what is already locked
272 * or not.
273 */
274 ret = 0;
275 } else if (ret == -EDEADLK) {
276 ctx->contended = lock;
277 }
278 if (ret == 0) {
279 struct drm_modeset_lock_info *info;
280
281 info = kzalloc(sizeof(*info), GFP_KERNEL);
282 INIT_LIST_HEAD(&info->ctx_entry);
283 INIT_LIST_HEAD(&info->lock_entry);
284 info->lock = lock;
285 info->ctx = ctx;
286 list_add(&info->ctx_entry, &ctx->locked);
287 list_add(&info->lock_entry, &lock->head);
288 }
289
290 return ret;
291 }
292
293 /**
294 * drm_modeset_backoff - deadlock avoidance backoff
295 * @ctx: the acquire context
296 *
297 * If deadlock is detected (ie. drm_modeset_lock() returns -EDEADLK),
298 * you must call this function to drop all currently held locks and
299 * block until the contended lock becomes available.
300 *
301 * This function returns 0 on success, or -ERESTARTSYS if this context
302 * is initialized with %DRM_MODESET_ACQUIRE_INTERRUPTIBLE and the
303 * wait has been interrupted.
304 */
drm_modeset_backoff(struct drm_modeset_acquire_ctx * ctx)305 int drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx)
306 {
307 struct drm_modeset_lock *contended = ctx->contended;
308
309 ctx->contended = NULL;
310
311 if (WARN_ON(!contended))
312 return 0;
313
314 drm_modeset_drop_locks(ctx);
315
316 return modeset_lock(contended, ctx, ctx->interruptible, true);
317 }
318 EXPORT_SYMBOL(drm_modeset_backoff);
319
320 /**
321 * drm_modeset_lock_init - initialize lock
322 * @lock: lock to init
323 */
drm_modeset_lock_init(struct drm_modeset_lock * lock)324 void drm_modeset_lock_init(struct drm_modeset_lock *lock)
325 {
326 ww_mutex_init(&lock->mutex, &crtc_ww_class);
327 INIT_LIST_HEAD(&lock->head);
328 }
329 EXPORT_SYMBOL(drm_modeset_lock_init);
330
331 /**
332 * drm_modeset_lock - take modeset lock
333 * @lock: lock to take
334 * @ctx: acquire ctx
335 *
336 * If @ctx is not NULL, then its ww acquire context is used and the
337 * lock will be tracked by the context and can be released by calling
338 * drm_modeset_drop_locks(). If -EDEADLK is returned, this means a
339 * deadlock scenario has been detected and it is an error to attempt
340 * to take any more locks without first calling drm_modeset_backoff().
341 *
342 * If the @ctx is not NULL and initialized with
343 * %DRM_MODESET_ACQUIRE_INTERRUPTIBLE, this function will fail with
344 * -ERESTARTSYS when interrupted.
345 *
346 * If @ctx is NULL then the function call behaves like a normal,
347 * uninterruptible non-nesting mutex_lock() call.
348 */
drm_modeset_lock(struct drm_modeset_lock * lock,struct drm_modeset_acquire_ctx * ctx)349 int drm_modeset_lock(struct drm_modeset_lock *lock,
350 struct drm_modeset_acquire_ctx *ctx)
351 {
352 if (ctx)
353 return modeset_lock(lock, ctx, ctx->interruptible, false);
354
355 ww_mutex_lock(&lock->mutex, NULL);
356 return 0;
357 }
358 EXPORT_SYMBOL(drm_modeset_lock);
359
360 /**
361 * drm_modeset_lock_single_interruptible - take a single modeset lock
362 * @lock: lock to take
363 *
364 * This function behaves as drm_modeset_lock() with a NULL context,
365 * but performs interruptible waits.
366 *
367 * This function returns 0 on success, or -ERESTARTSYS when interrupted.
368 */
drm_modeset_lock_single_interruptible(struct drm_modeset_lock * lock)369 int drm_modeset_lock_single_interruptible(struct drm_modeset_lock *lock)
370 {
371 return ww_mutex_lock_interruptible(&lock->mutex, NULL);
372 }
373 EXPORT_SYMBOL(drm_modeset_lock_single_interruptible);
374
375 /**
376 * drm_modeset_unlock - drop modeset lock
377 * @lock: lock to release
378 */
drm_modeset_unlock(struct drm_modeset_lock * lock)379 void drm_modeset_unlock(struct drm_modeset_lock *lock)
380 {
381 struct drm_modeset_lock_info *info;
382
383 /* undo in reverse order */
384 if (!list_empty(&lock->head)) {
385 info = list_last_entry(&lock->head,
386 struct drm_modeset_lock_info, lock_entry);
387 list_del_init(&info->lock_entry);
388 if (info->ctx)
389 list_del_init(&info->ctx_entry);
390 kfree(info);
391 }
392 ww_mutex_unlock(&lock->mutex);
393 }
394 EXPORT_SYMBOL(drm_modeset_unlock);
395
396 /**
397 * drm_modeset_lock_all_ctx - take all modeset locks
398 * @dev: DRM device
399 * @ctx: lock acquisition context
400 *
401 * This function takes all modeset locks, suitable where a more fine-grained
402 * scheme isn't (yet) implemented.
403 *
404 * Unlike drm_modeset_lock_all(), it doesn't take the &drm_mode_config.mutex
405 * since that lock isn't required for modeset state changes. Callers which
406 * need to grab that lock too need to do so outside of the acquire context
407 * @ctx.
408 *
409 * Locks acquired with this function should be released by calling the
410 * drm_modeset_drop_locks() function on @ctx.
411 *
412 * Returns: 0 on success or a negative error-code on failure.
413 */
drm_modeset_lock_all_ctx(struct drm_device * dev,struct drm_modeset_acquire_ctx * ctx)414 int drm_modeset_lock_all_ctx(struct drm_device *dev,
415 struct drm_modeset_acquire_ctx *ctx)
416 {
417 struct drm_crtc *crtc;
418 struct drm_plane *plane;
419 int ret;
420
421 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
422 if (ret)
423 return ret;
424
425 drm_for_each_crtc(crtc, dev) {
426 ret = drm_modeset_lock(&crtc->mutex, ctx);
427 if (ret)
428 return ret;
429 }
430
431 drm_for_each_plane(plane, dev) {
432 ret = drm_modeset_lock(&plane->mutex, ctx);
433 if (ret)
434 return ret;
435 }
436
437 return 0;
438 }
439 EXPORT_SYMBOL(drm_modeset_lock_all_ctx);
440