xref: /openbsd/sys/dev/pci/drm/drm_modeset_lock.c (revision 1bb76ff1)
13253c27bSkettenis /*
23253c27bSkettenis  * Copyright (C) 2014 Red Hat
33253c27bSkettenis  * Author: Rob Clark <robdclark@gmail.com>
43253c27bSkettenis  *
53253c27bSkettenis  * Permission is hereby granted, free of charge, to any person obtaining a
63253c27bSkettenis  * copy of this software and associated documentation files (the "Software"),
73253c27bSkettenis  * to deal in the Software without restriction, including without limitation
83253c27bSkettenis  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
93253c27bSkettenis  * and/or sell copies of the Software, and to permit persons to whom the
103253c27bSkettenis  * Software is furnished to do so, subject to the following conditions:
113253c27bSkettenis  *
123253c27bSkettenis  * The above copyright notice and this permission notice shall be included in
133253c27bSkettenis  * all copies or substantial portions of the Software.
143253c27bSkettenis  *
153253c27bSkettenis  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
163253c27bSkettenis  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
173253c27bSkettenis  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
183253c27bSkettenis  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
193253c27bSkettenis  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
203253c27bSkettenis  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
213253c27bSkettenis  * OTHER DEALINGS IN THE SOFTWARE.
223253c27bSkettenis  */
233253c27bSkettenis 
24c349dbc7Sjsg #include <drm/drm_atomic.h>
257f4dd379Sjsg #include <drm/drm_crtc.h>
26c349dbc7Sjsg #include <drm/drm_device.h>
277f4dd379Sjsg #include <drm/drm_modeset_lock.h>
28*1bb76ff1Sjsg #include <drm/drm_print.h>
293253c27bSkettenis 
303253c27bSkettenis /**
313253c27bSkettenis  * DOC: kms locking
323253c27bSkettenis  *
333253c27bSkettenis  * As KMS moves toward more fine grained locking, and atomic ioctl where
343253c27bSkettenis  * userspace can indirectly control locking order, it becomes necessary
357f4dd379Sjsg  * to use &ww_mutex and acquire-contexts to avoid deadlocks.  But because
363253c27bSkettenis  * the locking is more distributed around the driver code, we want a bit
373253c27bSkettenis  * of extra utility/tracking out of our acquire-ctx.  This is provided
387f4dd379Sjsg  * by &struct drm_modeset_lock and &struct drm_modeset_acquire_ctx.
393253c27bSkettenis  *
40c349dbc7Sjsg  * For basic principles of &ww_mutex, see: Documentation/locking/ww-mutex-design.rst
413253c27bSkettenis  *
427f4dd379Sjsg  * The basic usage pattern is to::
433253c27bSkettenis  *
447f4dd379Sjsg  *     drm_modeset_acquire_init(ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE)
453253c27bSkettenis  *     retry:
463253c27bSkettenis  *     foreach (lock in random_ordered_set_of_locks) {
477f4dd379Sjsg  *         ret = drm_modeset_lock(lock, ctx)
483253c27bSkettenis  *         if (ret == -EDEADLK) {
497f4dd379Sjsg  *             ret = drm_modeset_backoff(ctx);
507f4dd379Sjsg  *             if (!ret)
513253c27bSkettenis  *                 goto retry;
523253c27bSkettenis  *         }
537f4dd379Sjsg  *         if (ret)
547f4dd379Sjsg  *             goto out;
553253c27bSkettenis  *     }
563253c27bSkettenis  *     ... do stuff ...
577f4dd379Sjsg  *     out:
587f4dd379Sjsg  *     drm_modeset_drop_locks(ctx);
597f4dd379Sjsg  *     drm_modeset_acquire_fini(ctx);
603253c27bSkettenis  *
61c349dbc7Sjsg  * For convenience this control flow is implemented in
62c349dbc7Sjsg  * DRM_MODESET_LOCK_ALL_BEGIN() and DRM_MODESET_LOCK_ALL_END() for the case
63c349dbc7Sjsg  * where all modeset locks need to be taken through drm_modeset_lock_all_ctx().
64c349dbc7Sjsg  *
657f4dd379Sjsg  * If all that is needed is a single modeset lock, then the &struct
667f4dd379Sjsg  * drm_modeset_acquire_ctx is not needed and the locking can be simplified
677f4dd379Sjsg  * by passing a NULL instead of ctx in the drm_modeset_lock() call or
687f4dd379Sjsg  * calling  drm_modeset_lock_single_interruptible(). To unlock afterwards
697f4dd379Sjsg  * call drm_modeset_unlock().
707f4dd379Sjsg  *
717f4dd379Sjsg  * On top of these per-object locks using &ww_mutex there's also an overall
727f4dd379Sjsg  * &drm_mode_config.mutex, for protecting everything else. Mostly this means
737f4dd379Sjsg  * probe state of connectors, and preventing hotplug add/removal of connectors.
747f4dd379Sjsg  *
757f4dd379Sjsg  * Finally there's a bunch of dedicated locks to protect drm core internal
767f4dd379Sjsg  * lists and lookup data structures.
773253c27bSkettenis  */
783253c27bSkettenis 
797f4dd379Sjsg static DEFINE_WW_CLASS(crtc_ww_class);
807f4dd379Sjsg 
81*1bb76ff1Sjsg #if IS_ENABLED(CONFIG_DRM_DEBUG_MODESET_LOCK)
__drm_stack_depot_save(void)82*1bb76ff1Sjsg static noinline depot_stack_handle_t __drm_stack_depot_save(void)
83*1bb76ff1Sjsg {
84*1bb76ff1Sjsg 	unsigned long entries[8];
85*1bb76ff1Sjsg 	unsigned int n;
86*1bb76ff1Sjsg 
87*1bb76ff1Sjsg 	n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
88*1bb76ff1Sjsg 
89*1bb76ff1Sjsg 	return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
90*1bb76ff1Sjsg }
91*1bb76ff1Sjsg 
__drm_stack_depot_print(depot_stack_handle_t stack_depot)92*1bb76ff1Sjsg static void __drm_stack_depot_print(depot_stack_handle_t stack_depot)
93*1bb76ff1Sjsg {
94*1bb76ff1Sjsg 	struct drm_printer p = drm_debug_printer("drm_modeset_lock");
95*1bb76ff1Sjsg 	unsigned long *entries;
96*1bb76ff1Sjsg 	unsigned int nr_entries;
97*1bb76ff1Sjsg 	char *buf;
98*1bb76ff1Sjsg 
99*1bb76ff1Sjsg 	buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
100*1bb76ff1Sjsg 	if (!buf)
101*1bb76ff1Sjsg 		return;
102*1bb76ff1Sjsg 
103*1bb76ff1Sjsg 	nr_entries = stack_depot_fetch(stack_depot, &entries);
104*1bb76ff1Sjsg 	stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 2);
105*1bb76ff1Sjsg 
106*1bb76ff1Sjsg 	drm_printf(&p, "attempting to lock a contended lock without backoff:\n%s", buf);
107*1bb76ff1Sjsg 
108*1bb76ff1Sjsg 	kfree(buf);
109*1bb76ff1Sjsg }
110*1bb76ff1Sjsg 
__drm_stack_depot_init(void)111*1bb76ff1Sjsg static void __drm_stack_depot_init(void)
112*1bb76ff1Sjsg {
113*1bb76ff1Sjsg 	stack_depot_init();
114*1bb76ff1Sjsg }
115*1bb76ff1Sjsg #else /* CONFIG_DRM_DEBUG_MODESET_LOCK */
__drm_stack_depot_save(void)116*1bb76ff1Sjsg static depot_stack_handle_t __drm_stack_depot_save(void)
117*1bb76ff1Sjsg {
118*1bb76ff1Sjsg 	return 0;
119*1bb76ff1Sjsg }
__drm_stack_depot_print(depot_stack_handle_t stack_depot)120*1bb76ff1Sjsg static void __drm_stack_depot_print(depot_stack_handle_t stack_depot)
121*1bb76ff1Sjsg {
122*1bb76ff1Sjsg }
__drm_stack_depot_init(void)123*1bb76ff1Sjsg static void __drm_stack_depot_init(void)
124*1bb76ff1Sjsg {
125*1bb76ff1Sjsg }
126*1bb76ff1Sjsg #endif /* CONFIG_DRM_DEBUG_MODESET_LOCK */
127*1bb76ff1Sjsg 
1283253c27bSkettenis /**
1293253c27bSkettenis  * drm_modeset_lock_all - take all modeset locks
1307f4dd379Sjsg  * @dev: DRM device
1313253c27bSkettenis  *
1323253c27bSkettenis  * This function takes all modeset locks, suitable where a more fine-grained
1337f4dd379Sjsg  * scheme isn't (yet) implemented. Locks must be dropped by calling the
1347f4dd379Sjsg  * drm_modeset_unlock_all() function.
1357f4dd379Sjsg  *
1367f4dd379Sjsg  * This function is deprecated. It allocates a lock acquisition context and
1377f4dd379Sjsg  * stores it in &drm_device.mode_config. This facilitate conversion of
1387f4dd379Sjsg  * existing code because it removes the need to manually deal with the
1397f4dd379Sjsg  * acquisition context, but it is also brittle because the context is global
1407f4dd379Sjsg  * and care must be taken not to nest calls. New code should use the
1417f4dd379Sjsg  * drm_modeset_lock_all_ctx() function and pass in the context explicitly.
1423253c27bSkettenis  */
drm_modeset_lock_all(struct drm_device * dev)1433253c27bSkettenis void drm_modeset_lock_all(struct drm_device *dev)
1443253c27bSkettenis {
1453253c27bSkettenis 	struct drm_mode_config *config = &dev->mode_config;
1463253c27bSkettenis 	struct drm_modeset_acquire_ctx *ctx;
1473253c27bSkettenis 	int ret;
1483253c27bSkettenis 
149c349dbc7Sjsg #ifdef __linux__
150c349dbc7Sjsg 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL | __GFP_NOFAIL);
151c349dbc7Sjsg #else
152c349dbc7Sjsg 	ctx = kzalloc(sizeof(*ctx), M_WAITOK);
153c349dbc7Sjsg #endif
1543253c27bSkettenis 	if (WARN_ON(!ctx))
1553253c27bSkettenis 		return;
1563253c27bSkettenis 
1573253c27bSkettenis 	mutex_lock(&config->mutex);
1583253c27bSkettenis 
1593253c27bSkettenis 	drm_modeset_acquire_init(ctx, 0);
1603253c27bSkettenis 
1613253c27bSkettenis retry:
1627f4dd379Sjsg 	ret = drm_modeset_lock_all_ctx(dev, ctx);
1637f4dd379Sjsg 	if (ret < 0) {
1643253c27bSkettenis 		if (ret == -EDEADLK) {
1653253c27bSkettenis 			drm_modeset_backoff(ctx);
1663253c27bSkettenis 			goto retry;
1673253c27bSkettenis 		}
1683253c27bSkettenis 
1697f4dd379Sjsg 		drm_modeset_acquire_fini(ctx);
1703253c27bSkettenis 		kfree(ctx);
1717f4dd379Sjsg 		return;
1727f4dd379Sjsg 	}
1737f4dd379Sjsg 	ww_acquire_done(&ctx->ww_ctx);
1747f4dd379Sjsg 
1757f4dd379Sjsg 	WARN_ON(config->acquire_ctx);
1767f4dd379Sjsg 
1777f4dd379Sjsg 	/*
1787f4dd379Sjsg 	 * We hold the locks now, so it is safe to stash the acquisition
1797f4dd379Sjsg 	 * context for drm_modeset_unlock_all().
1807f4dd379Sjsg 	 */
1817f4dd379Sjsg 	config->acquire_ctx = ctx;
1827f4dd379Sjsg 
1837f4dd379Sjsg 	drm_warn_on_modeset_not_all_locked(dev);
1843253c27bSkettenis }
1853253c27bSkettenis EXPORT_SYMBOL(drm_modeset_lock_all);
1863253c27bSkettenis 
1873253c27bSkettenis /**
1883253c27bSkettenis  * drm_modeset_unlock_all - drop all modeset locks
1897f4dd379Sjsg  * @dev: DRM device
1903253c27bSkettenis  *
1917f4dd379Sjsg  * This function drops all modeset locks taken by a previous call to the
1927f4dd379Sjsg  * drm_modeset_lock_all() function.
1937f4dd379Sjsg  *
1947f4dd379Sjsg  * This function is deprecated. It uses the lock acquisition context stored
1957f4dd379Sjsg  * in &drm_device.mode_config. This facilitates conversion of existing
1967f4dd379Sjsg  * code because it removes the need to manually deal with the acquisition
1977f4dd379Sjsg  * context, but it is also brittle because the context is global and care must
1987f4dd379Sjsg  * be taken not to nest calls. New code should pass the acquisition context
1997f4dd379Sjsg  * directly to the drm_modeset_drop_locks() function.
2003253c27bSkettenis  */
drm_modeset_unlock_all(struct drm_device * dev)2013253c27bSkettenis void drm_modeset_unlock_all(struct drm_device *dev)
2023253c27bSkettenis {
2033253c27bSkettenis 	struct drm_mode_config *config = &dev->mode_config;
2043253c27bSkettenis 	struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
2053253c27bSkettenis 
2063253c27bSkettenis 	if (WARN_ON(!ctx))
2073253c27bSkettenis 		return;
2083253c27bSkettenis 
2093253c27bSkettenis 	config->acquire_ctx = NULL;
2103253c27bSkettenis 	drm_modeset_drop_locks(ctx);
2113253c27bSkettenis 	drm_modeset_acquire_fini(ctx);
2123253c27bSkettenis 
2133253c27bSkettenis 	kfree(ctx);
2143253c27bSkettenis 
2153253c27bSkettenis 	mutex_unlock(&dev->mode_config.mutex);
2163253c27bSkettenis }
2173253c27bSkettenis EXPORT_SYMBOL(drm_modeset_unlock_all);
2183253c27bSkettenis 
2193253c27bSkettenis /**
2203253c27bSkettenis  * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
2213253c27bSkettenis  * @dev: device
2223253c27bSkettenis  *
2233253c27bSkettenis  * Useful as a debug assert.
2243253c27bSkettenis  */
drm_warn_on_modeset_not_all_locked(struct drm_device * dev)2253253c27bSkettenis void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
2263253c27bSkettenis {
2273253c27bSkettenis 	struct drm_crtc *crtc;
2283253c27bSkettenis 
2293253c27bSkettenis 	/* Locking is currently fubar in the panic handler. */
2303253c27bSkettenis 	if (oops_in_progress)
2313253c27bSkettenis 		return;
2323253c27bSkettenis 
2333253c27bSkettenis 	drm_for_each_crtc(crtc, dev)
2343253c27bSkettenis 		WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
2353253c27bSkettenis 
2363253c27bSkettenis 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
2373253c27bSkettenis 	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
2383253c27bSkettenis }
2393253c27bSkettenis EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
2403253c27bSkettenis 
2413253c27bSkettenis /**
2423253c27bSkettenis  * drm_modeset_acquire_init - initialize acquire context
2433253c27bSkettenis  * @ctx: the acquire context
2447f4dd379Sjsg  * @flags: 0 or %DRM_MODESET_ACQUIRE_INTERRUPTIBLE
2457f4dd379Sjsg  *
2467f4dd379Sjsg  * When passing %DRM_MODESET_ACQUIRE_INTERRUPTIBLE to @flags,
2477f4dd379Sjsg  * all calls to drm_modeset_lock() will perform an interruptible
2487f4dd379Sjsg  * wait.
2493253c27bSkettenis  */
drm_modeset_acquire_init(struct drm_modeset_acquire_ctx * ctx,uint32_t flags)2503253c27bSkettenis void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
2513253c27bSkettenis 		uint32_t flags)
2523253c27bSkettenis {
2533253c27bSkettenis 	memset(ctx, 0, sizeof(*ctx));
2543253c27bSkettenis 	ww_acquire_init(&ctx->ww_ctx, &crtc_ww_class);
2553253c27bSkettenis 	INIT_LIST_HEAD(&ctx->locked);
2567f4dd379Sjsg 
2577f4dd379Sjsg 	if (flags & DRM_MODESET_ACQUIRE_INTERRUPTIBLE)
2587f4dd379Sjsg 		ctx->interruptible = true;
2593253c27bSkettenis }
2603253c27bSkettenis EXPORT_SYMBOL(drm_modeset_acquire_init);
2613253c27bSkettenis 
2623253c27bSkettenis /**
2633253c27bSkettenis  * drm_modeset_acquire_fini - cleanup acquire context
2643253c27bSkettenis  * @ctx: the acquire context
2653253c27bSkettenis  */
drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx * ctx)2663253c27bSkettenis void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx)
2673253c27bSkettenis {
2683253c27bSkettenis 	ww_acquire_fini(&ctx->ww_ctx);
2693253c27bSkettenis }
2703253c27bSkettenis EXPORT_SYMBOL(drm_modeset_acquire_fini);
2713253c27bSkettenis 
2723253c27bSkettenis /**
2733253c27bSkettenis  * drm_modeset_drop_locks - drop all locks
2743253c27bSkettenis  * @ctx: the acquire context
2753253c27bSkettenis  *
2763253c27bSkettenis  * Drop all locks currently held against this acquire context.
2773253c27bSkettenis  */
drm_modeset_drop_locks(struct drm_modeset_acquire_ctx * ctx)2783253c27bSkettenis void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx)
2793253c27bSkettenis {
280*1bb76ff1Sjsg 	if (WARN_ON(ctx->contended))
281*1bb76ff1Sjsg 		__drm_stack_depot_print(ctx->stack_depot);
282*1bb76ff1Sjsg 
2833253c27bSkettenis 	while (!list_empty(&ctx->locked)) {
2843253c27bSkettenis 		struct drm_modeset_lock *lock;
2853253c27bSkettenis 
2863253c27bSkettenis 		lock = list_first_entry(&ctx->locked,
2873253c27bSkettenis 				struct drm_modeset_lock, head);
2883253c27bSkettenis 
2893253c27bSkettenis 		drm_modeset_unlock(lock);
2903253c27bSkettenis 	}
2913253c27bSkettenis }
2923253c27bSkettenis EXPORT_SYMBOL(drm_modeset_drop_locks);
2933253c27bSkettenis 
modeset_lock(struct drm_modeset_lock * lock,struct drm_modeset_acquire_ctx * ctx,bool interruptible,bool slow)2943253c27bSkettenis static inline int modeset_lock(struct drm_modeset_lock *lock,
2953253c27bSkettenis 		struct drm_modeset_acquire_ctx *ctx,
2963253c27bSkettenis 		bool interruptible, bool slow)
2973253c27bSkettenis {
2983253c27bSkettenis 	int ret;
2993253c27bSkettenis 
300*1bb76ff1Sjsg 	if (WARN_ON(ctx->contended))
301*1bb76ff1Sjsg 		__drm_stack_depot_print(ctx->stack_depot);
3023253c27bSkettenis 
3033253c27bSkettenis 	if (ctx->trylock_only) {
3043253c27bSkettenis 		lockdep_assert_held(&ctx->ww_ctx);
3053253c27bSkettenis 
306*1bb76ff1Sjsg 		if (!ww_mutex_trylock(&lock->mutex, NULL))
3073253c27bSkettenis 			return -EBUSY;
3083253c27bSkettenis 		else
3093253c27bSkettenis 			return 0;
3103253c27bSkettenis 	} else if (interruptible && slow) {
3113253c27bSkettenis 		ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx);
3123253c27bSkettenis 	} else if (interruptible) {
3133253c27bSkettenis 		ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx);
3143253c27bSkettenis 	} else if (slow) {
3153253c27bSkettenis 		ww_mutex_lock_slow(&lock->mutex, &ctx->ww_ctx);
3163253c27bSkettenis 		ret = 0;
3173253c27bSkettenis 	} else {
3183253c27bSkettenis 		ret = ww_mutex_lock(&lock->mutex, &ctx->ww_ctx);
3193253c27bSkettenis 	}
3203253c27bSkettenis 	if (!ret) {
3213253c27bSkettenis 		WARN_ON(!list_empty(&lock->head));
3223253c27bSkettenis 		list_add(&lock->head, &ctx->locked);
3233253c27bSkettenis 	} else if (ret == -EALREADY) {
3243253c27bSkettenis 		/* we already hold the lock.. this is fine.  For atomic
3253253c27bSkettenis 		 * we will need to be able to drm_modeset_lock() things
3263253c27bSkettenis 		 * without having to keep track of what is already locked
3273253c27bSkettenis 		 * or not.
3283253c27bSkettenis 		 */
3293253c27bSkettenis 		ret = 0;
3303253c27bSkettenis 	} else if (ret == -EDEADLK) {
3313253c27bSkettenis 		ctx->contended = lock;
332*1bb76ff1Sjsg 		ctx->stack_depot = __drm_stack_depot_save();
3333253c27bSkettenis 	}
3343253c27bSkettenis 
3353253c27bSkettenis 	return ret;
3363253c27bSkettenis }
3373253c27bSkettenis 
3387f4dd379Sjsg /**
3397f4dd379Sjsg  * drm_modeset_backoff - deadlock avoidance backoff
3407f4dd379Sjsg  * @ctx: the acquire context
3417f4dd379Sjsg  *
3427f4dd379Sjsg  * If deadlock is detected (ie. drm_modeset_lock() returns -EDEADLK),
3437f4dd379Sjsg  * you must call this function to drop all currently held locks and
3447f4dd379Sjsg  * block until the contended lock becomes available.
3457f4dd379Sjsg  *
3467f4dd379Sjsg  * This function returns 0 on success, or -ERESTARTSYS if this context
3477f4dd379Sjsg  * is initialized with %DRM_MODESET_ACQUIRE_INTERRUPTIBLE and the
3487f4dd379Sjsg  * wait has been interrupted.
3497f4dd379Sjsg  */
drm_modeset_backoff(struct drm_modeset_acquire_ctx * ctx)3507f4dd379Sjsg int drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx)
3513253c27bSkettenis {
3523253c27bSkettenis 	struct drm_modeset_lock *contended = ctx->contended;
3533253c27bSkettenis 
3543253c27bSkettenis 	ctx->contended = NULL;
355*1bb76ff1Sjsg 	ctx->stack_depot = 0;
3563253c27bSkettenis 
3573253c27bSkettenis 	if (WARN_ON(!contended))
3583253c27bSkettenis 		return 0;
3593253c27bSkettenis 
3603253c27bSkettenis 	drm_modeset_drop_locks(ctx);
3613253c27bSkettenis 
3627f4dd379Sjsg 	return modeset_lock(contended, ctx, ctx->interruptible, true);
3633253c27bSkettenis }
3643253c27bSkettenis EXPORT_SYMBOL(drm_modeset_backoff);
3653253c27bSkettenis 
3663253c27bSkettenis /**
3677f4dd379Sjsg  * drm_modeset_lock_init - initialize lock
3687f4dd379Sjsg  * @lock: lock to init
3693253c27bSkettenis  */
drm_modeset_lock_init(struct drm_modeset_lock * lock)3707f4dd379Sjsg void drm_modeset_lock_init(struct drm_modeset_lock *lock)
3713253c27bSkettenis {
3727f4dd379Sjsg 	ww_mutex_init(&lock->mutex, &crtc_ww_class);
3737f4dd379Sjsg 	INIT_LIST_HEAD(&lock->head);
374*1bb76ff1Sjsg 	__drm_stack_depot_init();
3753253c27bSkettenis }
3767f4dd379Sjsg EXPORT_SYMBOL(drm_modeset_lock_init);
3773253c27bSkettenis 
3783253c27bSkettenis /**
3793253c27bSkettenis  * drm_modeset_lock - take modeset lock
3803253c27bSkettenis  * @lock: lock to take
3813253c27bSkettenis  * @ctx: acquire ctx
3823253c27bSkettenis  *
3837f4dd379Sjsg  * If @ctx is not NULL, then its ww acquire context is used and the
3843253c27bSkettenis  * lock will be tracked by the context and can be released by calling
3853253c27bSkettenis  * drm_modeset_drop_locks().  If -EDEADLK is returned, this means a
3863253c27bSkettenis  * deadlock scenario has been detected and it is an error to attempt
3873253c27bSkettenis  * to take any more locks without first calling drm_modeset_backoff().
3887f4dd379Sjsg  *
3897f4dd379Sjsg  * If the @ctx is not NULL and initialized with
3907f4dd379Sjsg  * %DRM_MODESET_ACQUIRE_INTERRUPTIBLE, this function will fail with
3917f4dd379Sjsg  * -ERESTARTSYS when interrupted.
3927f4dd379Sjsg  *
3937f4dd379Sjsg  * If @ctx is NULL then the function call behaves like a normal,
3947f4dd379Sjsg  * uninterruptible non-nesting mutex_lock() call.
3953253c27bSkettenis  */
drm_modeset_lock(struct drm_modeset_lock * lock,struct drm_modeset_acquire_ctx * ctx)3963253c27bSkettenis int drm_modeset_lock(struct drm_modeset_lock *lock,
3973253c27bSkettenis 		struct drm_modeset_acquire_ctx *ctx)
3983253c27bSkettenis {
3993253c27bSkettenis 	if (ctx)
4007f4dd379Sjsg 		return modeset_lock(lock, ctx, ctx->interruptible, false);
4013253c27bSkettenis 
4023253c27bSkettenis 	ww_mutex_lock(&lock->mutex, NULL);
4033253c27bSkettenis 	return 0;
4043253c27bSkettenis }
4053253c27bSkettenis EXPORT_SYMBOL(drm_modeset_lock);
4063253c27bSkettenis 
4073253c27bSkettenis /**
4087f4dd379Sjsg  * drm_modeset_lock_single_interruptible - take a single modeset lock
4093253c27bSkettenis  * @lock: lock to take
4103253c27bSkettenis  *
4117f4dd379Sjsg  * This function behaves as drm_modeset_lock() with a NULL context,
4127f4dd379Sjsg  * but performs interruptible waits.
4137f4dd379Sjsg  *
4147f4dd379Sjsg  * This function returns 0 on success, or -ERESTARTSYS when interrupted.
4153253c27bSkettenis  */
drm_modeset_lock_single_interruptible(struct drm_modeset_lock * lock)4167f4dd379Sjsg int drm_modeset_lock_single_interruptible(struct drm_modeset_lock *lock)
4173253c27bSkettenis {
4183253c27bSkettenis 	return ww_mutex_lock_interruptible(&lock->mutex, NULL);
4193253c27bSkettenis }
4207f4dd379Sjsg EXPORT_SYMBOL(drm_modeset_lock_single_interruptible);
4213253c27bSkettenis 
4223253c27bSkettenis /**
4233253c27bSkettenis  * drm_modeset_unlock - drop modeset lock
4243253c27bSkettenis  * @lock: lock to release
4253253c27bSkettenis  */
drm_modeset_unlock(struct drm_modeset_lock * lock)4263253c27bSkettenis void drm_modeset_unlock(struct drm_modeset_lock *lock)
4273253c27bSkettenis {
4283253c27bSkettenis 	list_del_init(&lock->head);
4293253c27bSkettenis 	ww_mutex_unlock(&lock->mutex);
4303253c27bSkettenis }
4313253c27bSkettenis EXPORT_SYMBOL(drm_modeset_unlock);
4323253c27bSkettenis 
4337f4dd379Sjsg /**
4347f4dd379Sjsg  * drm_modeset_lock_all_ctx - take all modeset locks
4357f4dd379Sjsg  * @dev: DRM device
4367f4dd379Sjsg  * @ctx: lock acquisition context
4377f4dd379Sjsg  *
4387f4dd379Sjsg  * This function takes all modeset locks, suitable where a more fine-grained
4397f4dd379Sjsg  * scheme isn't (yet) implemented.
4407f4dd379Sjsg  *
4417f4dd379Sjsg  * Unlike drm_modeset_lock_all(), it doesn't take the &drm_mode_config.mutex
4427f4dd379Sjsg  * since that lock isn't required for modeset state changes. Callers which
4437f4dd379Sjsg  * need to grab that lock too need to do so outside of the acquire context
4447f4dd379Sjsg  * @ctx.
4457f4dd379Sjsg  *
4467f4dd379Sjsg  * Locks acquired with this function should be released by calling the
4477f4dd379Sjsg  * drm_modeset_drop_locks() function on @ctx.
4487f4dd379Sjsg  *
449c349dbc7Sjsg  * See also: DRM_MODESET_LOCK_ALL_BEGIN() and DRM_MODESET_LOCK_ALL_END()
450c349dbc7Sjsg  *
4517f4dd379Sjsg  * Returns: 0 on success or a negative error-code on failure.
4527f4dd379Sjsg  */
drm_modeset_lock_all_ctx(struct drm_device * dev,struct drm_modeset_acquire_ctx * ctx)4537f4dd379Sjsg int drm_modeset_lock_all_ctx(struct drm_device *dev,
4543253c27bSkettenis 			     struct drm_modeset_acquire_ctx *ctx)
4553253c27bSkettenis {
456c349dbc7Sjsg 	struct drm_private_obj *privobj;
4573253c27bSkettenis 	struct drm_crtc *crtc;
4583253c27bSkettenis 	struct drm_plane *plane;
4597f4dd379Sjsg 	int ret;
4607f4dd379Sjsg 
4617f4dd379Sjsg 	ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
4627f4dd379Sjsg 	if (ret)
4637f4dd379Sjsg 		return ret;
4643253c27bSkettenis 
4653253c27bSkettenis 	drm_for_each_crtc(crtc, dev) {
4663253c27bSkettenis 		ret = drm_modeset_lock(&crtc->mutex, ctx);
4673253c27bSkettenis 		if (ret)
4683253c27bSkettenis 			return ret;
4693253c27bSkettenis 	}
4703253c27bSkettenis 
4713253c27bSkettenis 	drm_for_each_plane(plane, dev) {
4723253c27bSkettenis 		ret = drm_modeset_lock(&plane->mutex, ctx);
4733253c27bSkettenis 		if (ret)
4743253c27bSkettenis 			return ret;
4753253c27bSkettenis 	}
4763253c27bSkettenis 
477c349dbc7Sjsg 	drm_for_each_privobj(privobj, dev) {
478c349dbc7Sjsg 		ret = drm_modeset_lock(&privobj->lock, ctx);
479c349dbc7Sjsg 		if (ret)
480c349dbc7Sjsg 			return ret;
481c349dbc7Sjsg 	}
482c349dbc7Sjsg 
4833253c27bSkettenis 	return 0;
4843253c27bSkettenis }
4857f4dd379Sjsg EXPORT_SYMBOL(drm_modeset_lock_all_ctx);
486