xref: /openbsd/sys/dev/pci/drm/i915/intel_wakeref.h (revision f005ef32)
1c349dbc7Sjsg /*
2c349dbc7Sjsg  * SPDX-License-Identifier: MIT
3c349dbc7Sjsg  *
4c349dbc7Sjsg  * Copyright © 2019 Intel Corporation
5c349dbc7Sjsg  */
6c349dbc7Sjsg 
7c349dbc7Sjsg #ifndef INTEL_WAKEREF_H
8c349dbc7Sjsg #define INTEL_WAKEREF_H
9c349dbc7Sjsg 
10c349dbc7Sjsg #include <linux/atomic.h>
11ad8b1aafSjsg #include <linux/bitfield.h>
12c349dbc7Sjsg #include <linux/bits.h>
13c349dbc7Sjsg #include <linux/lockdep.h>
14c349dbc7Sjsg #include <linux/mutex.h>
15c349dbc7Sjsg #include <linux/refcount.h>
16c349dbc7Sjsg #include <linux/stackdepot.h>
17c349dbc7Sjsg #include <linux/timer.h>
18c349dbc7Sjsg #include <linux/workqueue.h>
19c349dbc7Sjsg 
20c349dbc7Sjsg #if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
21c349dbc7Sjsg #define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr)
22c349dbc7Sjsg #else
23c349dbc7Sjsg #define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
24c349dbc7Sjsg #endif
25c349dbc7Sjsg 
26c349dbc7Sjsg struct intel_runtime_pm;
27c349dbc7Sjsg struct intel_wakeref;
28c349dbc7Sjsg 
29c349dbc7Sjsg typedef depot_stack_handle_t intel_wakeref_t;
30c349dbc7Sjsg 
31c349dbc7Sjsg struct intel_wakeref_ops {
32c349dbc7Sjsg 	int (*get)(struct intel_wakeref *wf);
33c349dbc7Sjsg 	int (*put)(struct intel_wakeref *wf);
34c349dbc7Sjsg };
35c349dbc7Sjsg 
36c349dbc7Sjsg struct intel_wakeref {
37c349dbc7Sjsg 	atomic_t count;
38c349dbc7Sjsg 	struct rwlock mutex;
39c349dbc7Sjsg 
40c349dbc7Sjsg 	intel_wakeref_t wakeref;
41c349dbc7Sjsg 
42*f005ef32Sjsg #define drm_i915_private inteldrm_softc
43*f005ef32Sjsg 	struct drm_i915_private *i915;
44c349dbc7Sjsg 	const struct intel_wakeref_ops *ops;
45c349dbc7Sjsg 
46ad8b1aafSjsg 	struct delayed_work work;
47c349dbc7Sjsg };
48c349dbc7Sjsg 
49c349dbc7Sjsg struct intel_wakeref_lockclass {
50c349dbc7Sjsg 	struct lock_class_key mutex;
51c349dbc7Sjsg 	struct lock_class_key work;
52c349dbc7Sjsg };
53c349dbc7Sjsg 
54c349dbc7Sjsg void __intel_wakeref_init(struct intel_wakeref *wf,
55*f005ef32Sjsg 			  struct drm_i915_private *i915,
56c349dbc7Sjsg 			  const struct intel_wakeref_ops *ops,
57c349dbc7Sjsg 			  struct intel_wakeref_lockclass *key);
58*f005ef32Sjsg #define intel_wakeref_init(wf, i915, ops) do {				\
59c349dbc7Sjsg 	static struct intel_wakeref_lockclass __key;			\
60c349dbc7Sjsg 									\
61*f005ef32Sjsg 	__intel_wakeref_init((wf), (i915), (ops), &__key);		\
62c349dbc7Sjsg } while (0)
63c349dbc7Sjsg 
64c349dbc7Sjsg int __intel_wakeref_get_first(struct intel_wakeref *wf);
65c349dbc7Sjsg void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags);
66c349dbc7Sjsg 
67c349dbc7Sjsg /**
68c349dbc7Sjsg  * intel_wakeref_get: Acquire the wakeref
69c349dbc7Sjsg  * @wf: the wakeref
70c349dbc7Sjsg  *
71c349dbc7Sjsg  * Acquire a hold on the wakeref. The first user to do so, will acquire
72*f005ef32Sjsg  * the runtime pm wakeref and then call the intel_wakeref_ops->get()
73*f005ef32Sjsg  * underneath the wakeref mutex.
74c349dbc7Sjsg  *
75*f005ef32Sjsg  * Note that intel_wakeref_ops->get() is allowed to fail, in which case
76*f005ef32Sjsg  * the runtime-pm wakeref will be released and the acquisition unwound,
77*f005ef32Sjsg  * and an error reported.
78c349dbc7Sjsg  *
79c349dbc7Sjsg  * Returns: 0 if the wakeref was acquired successfully, or a negative error
80c349dbc7Sjsg  * code otherwise.
81c349dbc7Sjsg  */
82c349dbc7Sjsg static inline int
intel_wakeref_get(struct intel_wakeref * wf)83c349dbc7Sjsg intel_wakeref_get(struct intel_wakeref *wf)
84c349dbc7Sjsg {
85c349dbc7Sjsg 	might_sleep();
86c349dbc7Sjsg 	if (unlikely(!atomic_inc_not_zero(&wf->count)))
87c349dbc7Sjsg 		return __intel_wakeref_get_first(wf);
88c349dbc7Sjsg 
89c349dbc7Sjsg 	return 0;
90c349dbc7Sjsg }
91c349dbc7Sjsg 
92c349dbc7Sjsg /**
93c349dbc7Sjsg  * __intel_wakeref_get: Acquire the wakeref, again
94c349dbc7Sjsg  * @wf: the wakeref
95c349dbc7Sjsg  *
96c349dbc7Sjsg  * Increment the wakeref counter, only valid if it is already held by
97c349dbc7Sjsg  * the caller.
98c349dbc7Sjsg  *
99c349dbc7Sjsg  * See intel_wakeref_get().
100c349dbc7Sjsg  */
101c349dbc7Sjsg static inline void
__intel_wakeref_get(struct intel_wakeref * wf)102c349dbc7Sjsg __intel_wakeref_get(struct intel_wakeref *wf)
103c349dbc7Sjsg {
104c349dbc7Sjsg 	INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
105c349dbc7Sjsg 	atomic_inc(&wf->count);
106c349dbc7Sjsg }
107c349dbc7Sjsg 
108c349dbc7Sjsg /**
109*f005ef32Sjsg  * intel_wakeref_get_if_active: Acquire the wakeref
110c349dbc7Sjsg  * @wf: the wakeref
111c349dbc7Sjsg  *
112c349dbc7Sjsg  * Acquire a hold on the wakeref, but only if the wakeref is already
113c349dbc7Sjsg  * active.
114c349dbc7Sjsg  *
115c349dbc7Sjsg  * Returns: true if the wakeref was acquired, false otherwise.
116c349dbc7Sjsg  */
117c349dbc7Sjsg static inline bool
intel_wakeref_get_if_active(struct intel_wakeref * wf)118c349dbc7Sjsg intel_wakeref_get_if_active(struct intel_wakeref *wf)
119c349dbc7Sjsg {
120c349dbc7Sjsg 	return atomic_inc_not_zero(&wf->count);
121c349dbc7Sjsg }
122c349dbc7Sjsg 
123ad8b1aafSjsg enum {
124ad8b1aafSjsg 	INTEL_WAKEREF_PUT_ASYNC_BIT = 0,
125ad8b1aafSjsg 	__INTEL_WAKEREF_PUT_LAST_BIT__
126ad8b1aafSjsg };
127ad8b1aafSjsg 
1281bb76ff1Sjsg static inline void
intel_wakeref_might_get(struct intel_wakeref * wf)1291bb76ff1Sjsg intel_wakeref_might_get(struct intel_wakeref *wf)
1301bb76ff1Sjsg {
1311bb76ff1Sjsg 	might_lock(&wf->mutex);
1321bb76ff1Sjsg }
1331bb76ff1Sjsg 
134c349dbc7Sjsg /**
135*f005ef32Sjsg  * __intel_wakeref_put: Release the wakeref
136c349dbc7Sjsg  * @wf: the wakeref
137c349dbc7Sjsg  * @flags: control flags
138c349dbc7Sjsg  *
139c349dbc7Sjsg  * Release our hold on the wakeref. When there are no more users,
140*f005ef32Sjsg  * the runtime pm wakeref will be released after the intel_wakeref_ops->put()
141*f005ef32Sjsg  * callback is called underneath the wakeref mutex.
142c349dbc7Sjsg  *
143*f005ef32Sjsg  * Note that intel_wakeref_ops->put() is allowed to fail, in which case the
144*f005ef32Sjsg  * runtime-pm wakeref is retained.
145c349dbc7Sjsg  *
146c349dbc7Sjsg  */
147c349dbc7Sjsg static inline void
__intel_wakeref_put(struct intel_wakeref * wf,unsigned long flags)148c349dbc7Sjsg __intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags)
149ad8b1aafSjsg #define INTEL_WAKEREF_PUT_ASYNC BIT(INTEL_WAKEREF_PUT_ASYNC_BIT)
150ad8b1aafSjsg #define INTEL_WAKEREF_PUT_DELAY \
151ad8b1aafSjsg 	GENMASK(BITS_PER_LONG - 1, __INTEL_WAKEREF_PUT_LAST_BIT__)
152c349dbc7Sjsg {
153c349dbc7Sjsg 	INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
154c349dbc7Sjsg 	if (unlikely(!atomic_add_unless(&wf->count, -1, 1)))
155c349dbc7Sjsg 		__intel_wakeref_put_last(wf, flags);
156c349dbc7Sjsg }
157c349dbc7Sjsg 
158c349dbc7Sjsg static inline void
intel_wakeref_put(struct intel_wakeref * wf)159c349dbc7Sjsg intel_wakeref_put(struct intel_wakeref *wf)
160c349dbc7Sjsg {
161c349dbc7Sjsg 	might_sleep();
162c349dbc7Sjsg 	__intel_wakeref_put(wf, 0);
163c349dbc7Sjsg }
164c349dbc7Sjsg 
165c349dbc7Sjsg static inline void
intel_wakeref_put_async(struct intel_wakeref * wf)166c349dbc7Sjsg intel_wakeref_put_async(struct intel_wakeref *wf)
167c349dbc7Sjsg {
168c349dbc7Sjsg 	__intel_wakeref_put(wf, INTEL_WAKEREF_PUT_ASYNC);
169c349dbc7Sjsg }
170c349dbc7Sjsg 
171ad8b1aafSjsg static inline void
intel_wakeref_put_delay(struct intel_wakeref * wf,unsigned long delay)172ad8b1aafSjsg intel_wakeref_put_delay(struct intel_wakeref *wf, unsigned long delay)
173ad8b1aafSjsg {
174ad8b1aafSjsg 	__intel_wakeref_put(wf,
175ad8b1aafSjsg 			    INTEL_WAKEREF_PUT_ASYNC |
176ad8b1aafSjsg 			    FIELD_PREP(INTEL_WAKEREF_PUT_DELAY, delay));
177ad8b1aafSjsg }
178ad8b1aafSjsg 
1791bb76ff1Sjsg static inline void
intel_wakeref_might_put(struct intel_wakeref * wf)1801bb76ff1Sjsg intel_wakeref_might_put(struct intel_wakeref *wf)
1811bb76ff1Sjsg {
1821bb76ff1Sjsg 	might_lock(&wf->mutex);
1831bb76ff1Sjsg }
1841bb76ff1Sjsg 
185c349dbc7Sjsg /**
186c349dbc7Sjsg  * intel_wakeref_lock: Lock the wakeref (mutex)
187c349dbc7Sjsg  * @wf: the wakeref
188c349dbc7Sjsg  *
189c349dbc7Sjsg  * Locks the wakeref to prevent it being acquired or released. New users
190c349dbc7Sjsg  * can still adjust the counter, but the wakeref itself (and callback)
191c349dbc7Sjsg  * cannot be acquired or released.
192c349dbc7Sjsg  */
193c349dbc7Sjsg static inline void
intel_wakeref_lock(struct intel_wakeref * wf)194c349dbc7Sjsg intel_wakeref_lock(struct intel_wakeref *wf)
195c349dbc7Sjsg 	__acquires(wf->mutex)
196c349dbc7Sjsg {
197c349dbc7Sjsg 	mutex_lock(&wf->mutex);
198c349dbc7Sjsg }
199c349dbc7Sjsg 
200c349dbc7Sjsg /**
201c349dbc7Sjsg  * intel_wakeref_unlock: Unlock the wakeref
202c349dbc7Sjsg  * @wf: the wakeref
203c349dbc7Sjsg  *
204c349dbc7Sjsg  * Releases a previously acquired intel_wakeref_lock().
205c349dbc7Sjsg  */
206c349dbc7Sjsg static inline void
intel_wakeref_unlock(struct intel_wakeref * wf)207c349dbc7Sjsg intel_wakeref_unlock(struct intel_wakeref *wf)
208c349dbc7Sjsg 	__releases(wf->mutex)
209c349dbc7Sjsg {
210c349dbc7Sjsg 	mutex_unlock(&wf->mutex);
211c349dbc7Sjsg }
212c349dbc7Sjsg 
213c349dbc7Sjsg /**
214c349dbc7Sjsg  * intel_wakeref_unlock_wait: Wait until the active callback is complete
215c349dbc7Sjsg  * @wf: the wakeref
216c349dbc7Sjsg  *
217c349dbc7Sjsg  * Waits for the active callback (under the @wf->mutex or another CPU) is
218c349dbc7Sjsg  * complete.
219c349dbc7Sjsg  */
220c349dbc7Sjsg static inline void
intel_wakeref_unlock_wait(struct intel_wakeref * wf)221c349dbc7Sjsg intel_wakeref_unlock_wait(struct intel_wakeref *wf)
222c349dbc7Sjsg {
223c349dbc7Sjsg 	mutex_lock(&wf->mutex);
224c349dbc7Sjsg 	mutex_unlock(&wf->mutex);
225ad8b1aafSjsg 	flush_delayed_work(&wf->work);
226c349dbc7Sjsg }
227c349dbc7Sjsg 
228c349dbc7Sjsg /**
229c349dbc7Sjsg  * intel_wakeref_is_active: Query whether the wakeref is currently held
230c349dbc7Sjsg  * @wf: the wakeref
231c349dbc7Sjsg  *
232c349dbc7Sjsg  * Returns: true if the wakeref is currently held.
233c349dbc7Sjsg  */
234c349dbc7Sjsg static inline bool
intel_wakeref_is_active(const struct intel_wakeref * wf)235c349dbc7Sjsg intel_wakeref_is_active(const struct intel_wakeref *wf)
236c349dbc7Sjsg {
237c349dbc7Sjsg 	return READ_ONCE(wf->wakeref);
238c349dbc7Sjsg }
239c349dbc7Sjsg 
240c349dbc7Sjsg /**
241c349dbc7Sjsg  * __intel_wakeref_defer_park: Defer the current park callback
242c349dbc7Sjsg  * @wf: the wakeref
243c349dbc7Sjsg  */
244c349dbc7Sjsg static inline void
__intel_wakeref_defer_park(struct intel_wakeref * wf)245c349dbc7Sjsg __intel_wakeref_defer_park(struct intel_wakeref *wf)
246c349dbc7Sjsg {
247c349dbc7Sjsg 	lockdep_assert_held(&wf->mutex);
248c349dbc7Sjsg 	INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count));
249c349dbc7Sjsg 	atomic_set_release(&wf->count, 1);
250c349dbc7Sjsg }
251c349dbc7Sjsg 
252c349dbc7Sjsg /**
253c349dbc7Sjsg  * intel_wakeref_wait_for_idle: Wait until the wakeref is idle
254c349dbc7Sjsg  * @wf: the wakeref
255c349dbc7Sjsg  *
256c349dbc7Sjsg  * Wait for the earlier asynchronous release of the wakeref. Note
257c349dbc7Sjsg  * this will wait for any third party as well, so make sure you only wait
258c349dbc7Sjsg  * when you have control over the wakeref and trust no one else is acquiring
259c349dbc7Sjsg  * it.
260c349dbc7Sjsg  *
261c349dbc7Sjsg  * Return: 0 on success, error code if killed.
262c349dbc7Sjsg  */
263c349dbc7Sjsg int intel_wakeref_wait_for_idle(struct intel_wakeref *wf);
264c349dbc7Sjsg 
265c349dbc7Sjsg struct intel_wakeref_auto {
266*f005ef32Sjsg 	struct drm_i915_private *i915;
267c349dbc7Sjsg 	struct timeout timer;
268c349dbc7Sjsg 	intel_wakeref_t wakeref;
269c349dbc7Sjsg 	spinlock_t lock;
270c349dbc7Sjsg 	refcount_t count;
271c349dbc7Sjsg };
272c349dbc7Sjsg 
273c349dbc7Sjsg /**
274c349dbc7Sjsg  * intel_wakeref_auto: Delay the runtime-pm autosuspend
275c349dbc7Sjsg  * @wf: the wakeref
276c349dbc7Sjsg  * @timeout: relative timeout in jiffies
277c349dbc7Sjsg  *
278c349dbc7Sjsg  * The runtime-pm core uses a suspend delay after the last wakeref
279c349dbc7Sjsg  * is released before triggering runtime suspend of the device. That
280c349dbc7Sjsg  * delay is configurable via sysfs with little regard to the device
281c349dbc7Sjsg  * characteristics. Instead, we want to tune the autosuspend based on our
282c349dbc7Sjsg  * HW knowledge. intel_wakeref_auto() delays the sleep by the supplied
283c349dbc7Sjsg  * timeout.
284c349dbc7Sjsg  *
285c349dbc7Sjsg  * Pass @timeout = 0 to cancel a previous autosuspend by executing the
286c349dbc7Sjsg  * suspend immediately.
287c349dbc7Sjsg  */
288c349dbc7Sjsg void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout);
289c349dbc7Sjsg 
290c349dbc7Sjsg void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
291*f005ef32Sjsg 			     struct drm_i915_private *i915);
292c349dbc7Sjsg void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf);
293c349dbc7Sjsg 
294c349dbc7Sjsg #endif /* INTEL_WAKEREF_H */
295