xref: /openbsd/sys/dev/pci/drm/i915/gt/intel_engine_pm.h (revision 1bb76ff1)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #ifndef INTEL_ENGINE_PM_H
7 #define INTEL_ENGINE_PM_H
8 
9 #include "i915_drv.h"
10 #include "i915_request.h"
11 #include "intel_engine_types.h"
12 #include "intel_wakeref.h"
13 #include "intel_gt_pm.h"
14 
15 static inline bool
intel_engine_pm_is_awake(const struct intel_engine_cs * engine)16 intel_engine_pm_is_awake(const struct intel_engine_cs *engine)
17 {
18 	return intel_wakeref_is_active(&engine->wakeref);
19 }
20 
__intel_engine_pm_get(struct intel_engine_cs * engine)21 static inline void __intel_engine_pm_get(struct intel_engine_cs *engine)
22 {
23 	__intel_wakeref_get(&engine->wakeref);
24 }
25 
intel_engine_pm_get(struct intel_engine_cs * engine)26 static inline void intel_engine_pm_get(struct intel_engine_cs *engine)
27 {
28 	intel_wakeref_get(&engine->wakeref);
29 }
30 
intel_engine_pm_get_if_awake(struct intel_engine_cs * engine)31 static inline bool intel_engine_pm_get_if_awake(struct intel_engine_cs *engine)
32 {
33 	return intel_wakeref_get_if_active(&engine->wakeref);
34 }
35 
intel_engine_pm_might_get(struct intel_engine_cs * engine)36 static inline void intel_engine_pm_might_get(struct intel_engine_cs *engine)
37 {
38 	if (!intel_engine_is_virtual(engine)) {
39 		intel_wakeref_might_get(&engine->wakeref);
40 	} else {
41 		struct intel_gt *gt = engine->gt;
42 		struct intel_engine_cs *tengine;
43 		intel_engine_mask_t tmp, mask = engine->mask;
44 
45 		for_each_engine_masked(tengine, gt, mask, tmp)
46 			intel_wakeref_might_get(&tengine->wakeref);
47 	}
48 	intel_gt_pm_might_get(engine->gt);
49 }
50 
intel_engine_pm_put(struct intel_engine_cs * engine)51 static inline void intel_engine_pm_put(struct intel_engine_cs *engine)
52 {
53 	intel_wakeref_put(&engine->wakeref);
54 }
55 
intel_engine_pm_put_async(struct intel_engine_cs * engine)56 static inline void intel_engine_pm_put_async(struct intel_engine_cs *engine)
57 {
58 	intel_wakeref_put_async(&engine->wakeref);
59 }
60 
intel_engine_pm_put_delay(struct intel_engine_cs * engine,unsigned long delay)61 static inline void intel_engine_pm_put_delay(struct intel_engine_cs *engine,
62 					     unsigned long delay)
63 {
64 	intel_wakeref_put_delay(&engine->wakeref, delay);
65 }
66 
intel_engine_pm_flush(struct intel_engine_cs * engine)67 static inline void intel_engine_pm_flush(struct intel_engine_cs *engine)
68 {
69 	intel_wakeref_unlock_wait(&engine->wakeref);
70 }
71 
intel_engine_pm_might_put(struct intel_engine_cs * engine)72 static inline void intel_engine_pm_might_put(struct intel_engine_cs *engine)
73 {
74 	if (!intel_engine_is_virtual(engine)) {
75 		intel_wakeref_might_put(&engine->wakeref);
76 	} else {
77 		struct intel_gt *gt = engine->gt;
78 		struct intel_engine_cs *tengine;
79 		intel_engine_mask_t tmp, mask = engine->mask;
80 
81 		for_each_engine_masked(tengine, gt, mask, tmp)
82 			intel_wakeref_might_put(&tengine->wakeref);
83 	}
84 	intel_gt_pm_might_put(engine->gt);
85 }
86 
87 static inline struct i915_request *
intel_engine_create_kernel_request(struct intel_engine_cs * engine)88 intel_engine_create_kernel_request(struct intel_engine_cs *engine)
89 {
90 	struct i915_request *rq;
91 
92 	/*
93 	 * The engine->kernel_context is special as it is used inside
94 	 * the engine-pm barrier (see __engine_park()), circumventing
95 	 * the usual mutexes and relying on the engine-pm barrier
96 	 * instead. So whenever we use the engine->kernel_context
97 	 * outside of the barrier, we must manually handle the
98 	 * engine wakeref to serialise with the use inside.
99 	 */
100 	intel_engine_pm_get(engine);
101 	rq = i915_request_create(engine->kernel_context);
102 	intel_engine_pm_put(engine);
103 
104 	return rq;
105 }
106 
107 void intel_engine_init__pm(struct intel_engine_cs *engine);
108 
109 void intel_engine_reset_pinned_contexts(struct intel_engine_cs *engine);
110 
111 #endif /* INTEL_ENGINE_PM_H */
112