xref: /linux/drivers/gpu/drm/i915/display/intel_drrs.c (revision 908fc4c2)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "i915_drv.h"
7 #include "intel_atomic.h"
8 #include "intel_de.h"
9 #include "intel_display_types.h"
10 #include "intel_drrs.h"
11 #include "intel_panel.h"
12 
13 /**
14  * DOC: Display Refresh Rate Switching (DRRS)
15  *
16  * Display Refresh Rate Switching (DRRS) is a power conservation feature
17  * which enables swtching between low and high refresh rates,
18  * dynamically, based on the usage scenario. This feature is applicable
19  * for internal panels.
20  *
21  * Indication that the panel supports DRRS is given by the panel EDID, which
22  * would list multiple refresh rates for one resolution.
23  *
24  * DRRS is of 2 types - static and seamless.
25  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
26  * (may appear as a blink on screen) and is used in dock-undock scenario.
27  * Seamless DRRS involves changing RR without any visual effect to the user
28  * and can be used during normal system usage. This is done by programming
29  * certain registers.
30  *
31  * Support for static/seamless DRRS may be indicated in the VBT based on
32  * inputs from the panel spec.
33  *
34  * DRRS saves power by switching to low RR based on usage scenarios.
35  *
36  * The implementation is based on frontbuffer tracking implementation.  When
37  * there is a disturbance on the screen triggered by user activity or a periodic
38  * system activity, DRRS is disabled (RR is changed to high RR).  When there is
39  * no movement on screen, after a timeout of 1 second, a switch to low RR is
40  * made.
41  *
42  * For integration with frontbuffer tracking code, intel_drrs_invalidate()
43  * and intel_drrs_flush() are called.
44  *
45  * DRRS can be further extended to support other internal panels and also
46  * the scenario of video playback wherein RR is set based on the rate
47  * requested by userspace.
48  */
49 
50 const char *intel_drrs_type_str(enum drrs_type drrs_type)
51 {
52 	static const char * const str[] = {
53 		[DRRS_TYPE_NONE] = "none",
54 		[DRRS_TYPE_STATIC] = "static",
55 		[DRRS_TYPE_SEAMLESS] = "seamless",
56 	};
57 
58 	if (drrs_type >= ARRAY_SIZE(str))
59 		return "<invalid>";
60 
61 	return str[drrs_type];
62 }
63 
64 static void
65 intel_drrs_set_refresh_rate_pipeconf(struct intel_crtc *crtc,
66 				     enum drrs_refresh_rate refresh_rate)
67 {
68 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
69 	enum transcoder cpu_transcoder = crtc->drrs.cpu_transcoder;
70 	u32 val, bit;
71 
72 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
73 		bit = PIPECONF_REFRESH_RATE_ALT_VLV;
74 	else
75 		bit = PIPECONF_REFRESH_RATE_ALT_ILK;
76 
77 	val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
78 
79 	if (refresh_rate == DRRS_REFRESH_RATE_LOW)
80 		val |= bit;
81 	else
82 		val &= ~bit;
83 
84 	intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
85 }
86 
87 static void
88 intel_drrs_set_refresh_rate_m_n(struct intel_crtc *crtc,
89 				enum drrs_refresh_rate refresh_rate)
90 {
91 	intel_cpu_transcoder_set_m1_n1(crtc, crtc->drrs.cpu_transcoder,
92 				       refresh_rate == DRRS_REFRESH_RATE_LOW ?
93 				       &crtc->drrs.m2_n2 : &crtc->drrs.m_n);
94 }
95 
96 bool intel_drrs_is_active(struct intel_crtc *crtc)
97 {
98 	return crtc->drrs.cpu_transcoder != INVALID_TRANSCODER;
99 }
100 
101 static void intel_drrs_set_state(struct intel_crtc *crtc,
102 				 enum drrs_refresh_rate refresh_rate)
103 {
104 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
105 
106 	if (refresh_rate == crtc->drrs.refresh_rate)
107 		return;
108 
109 	if (intel_cpu_transcoder_has_m2_n2(dev_priv, crtc->drrs.cpu_transcoder))
110 		intel_drrs_set_refresh_rate_pipeconf(crtc, refresh_rate);
111 	else
112 		intel_drrs_set_refresh_rate_m_n(crtc, refresh_rate);
113 
114 	crtc->drrs.refresh_rate = refresh_rate;
115 }
116 
117 static void intel_drrs_schedule_work(struct intel_crtc *crtc)
118 {
119 	mod_delayed_work(system_wq, &crtc->drrs.work, msecs_to_jiffies(1000));
120 }
121 
122 static unsigned int intel_drrs_frontbuffer_bits(const struct intel_crtc_state *crtc_state)
123 {
124 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
125 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
126 	unsigned int frontbuffer_bits;
127 
128 	frontbuffer_bits = INTEL_FRONTBUFFER_ALL_MASK(crtc->pipe);
129 
130 	for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc,
131 					 crtc_state->bigjoiner_pipes)
132 		frontbuffer_bits |= INTEL_FRONTBUFFER_ALL_MASK(crtc->pipe);
133 
134 	return frontbuffer_bits;
135 }
136 
137 /**
138  * intel_drrs_activate - activate DRRS
139  * @crtc_state: the crtc state
140  *
141  * Activates DRRS on the crtc.
142  */
143 void intel_drrs_activate(const struct intel_crtc_state *crtc_state)
144 {
145 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
146 
147 	if (!crtc_state->has_drrs)
148 		return;
149 
150 	if (!crtc_state->hw.active)
151 		return;
152 
153 	if (intel_crtc_is_bigjoiner_slave(crtc_state))
154 		return;
155 
156 	mutex_lock(&crtc->drrs.mutex);
157 
158 	crtc->drrs.cpu_transcoder = crtc_state->cpu_transcoder;
159 	crtc->drrs.m_n = crtc_state->dp_m_n;
160 	crtc->drrs.m2_n2 = crtc_state->dp_m2_n2;
161 	crtc->drrs.frontbuffer_bits = intel_drrs_frontbuffer_bits(crtc_state);
162 	crtc->drrs.busy_frontbuffer_bits = 0;
163 
164 	intel_drrs_schedule_work(crtc);
165 
166 	mutex_unlock(&crtc->drrs.mutex);
167 }
168 
169 /**
170  * intel_drrs_deactivate - deactivate DRRS
171  * @old_crtc_state: the old crtc state
172  *
173  * Deactivates DRRS on the crtc.
174  */
175 void intel_drrs_deactivate(const struct intel_crtc_state *old_crtc_state)
176 {
177 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
178 
179 	if (!old_crtc_state->has_drrs)
180 		return;
181 
182 	if (!old_crtc_state->hw.active)
183 		return;
184 
185 	if (intel_crtc_is_bigjoiner_slave(old_crtc_state))
186 		return;
187 
188 	mutex_lock(&crtc->drrs.mutex);
189 
190 	if (intel_drrs_is_active(crtc))
191 		intel_drrs_set_state(crtc, DRRS_REFRESH_RATE_HIGH);
192 
193 	crtc->drrs.cpu_transcoder = INVALID_TRANSCODER;
194 	crtc->drrs.frontbuffer_bits = 0;
195 	crtc->drrs.busy_frontbuffer_bits = 0;
196 
197 	mutex_unlock(&crtc->drrs.mutex);
198 
199 	cancel_delayed_work_sync(&crtc->drrs.work);
200 }
201 
202 static void intel_drrs_downclock_work(struct work_struct *work)
203 {
204 	struct intel_crtc *crtc = container_of(work, typeof(*crtc), drrs.work.work);
205 
206 	mutex_lock(&crtc->drrs.mutex);
207 
208 	if (intel_drrs_is_active(crtc) && !crtc->drrs.busy_frontbuffer_bits)
209 		intel_drrs_set_state(crtc, DRRS_REFRESH_RATE_LOW);
210 
211 	mutex_unlock(&crtc->drrs.mutex);
212 }
213 
214 static void intel_drrs_frontbuffer_update(struct drm_i915_private *dev_priv,
215 					  unsigned int all_frontbuffer_bits,
216 					  bool invalidate)
217 {
218 	struct intel_crtc *crtc;
219 
220 	if (dev_priv->vbt.drrs_type != DRRS_TYPE_SEAMLESS)
221 		return;
222 
223 	for_each_intel_crtc(&dev_priv->drm, crtc) {
224 		unsigned int frontbuffer_bits;
225 
226 		mutex_lock(&crtc->drrs.mutex);
227 
228 		frontbuffer_bits = all_frontbuffer_bits & crtc->drrs.frontbuffer_bits;
229 		if (!frontbuffer_bits) {
230 			mutex_unlock(&crtc->drrs.mutex);
231 			continue;
232 		}
233 
234 		if (invalidate)
235 			crtc->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
236 		else
237 			crtc->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
238 
239 		/* flush/invalidate means busy screen hence upclock */
240 		intel_drrs_set_state(crtc, DRRS_REFRESH_RATE_HIGH);
241 
242 		/*
243 		 * flush also means no more activity hence schedule downclock, if all
244 		 * other fbs are quiescent too
245 		 */
246 		if (!crtc->drrs.busy_frontbuffer_bits)
247 			intel_drrs_schedule_work(crtc);
248 		else
249 			cancel_delayed_work(&crtc->drrs.work);
250 
251 		mutex_unlock(&crtc->drrs.mutex);
252 	}
253 }
254 
255 /**
256  * intel_drrs_invalidate - Disable Idleness DRRS
257  * @dev_priv: i915 device
258  * @frontbuffer_bits: frontbuffer plane tracking bits
259  *
260  * This function gets called everytime rendering on the given planes start.
261  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
262  *
263  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
264  */
265 void intel_drrs_invalidate(struct drm_i915_private *dev_priv,
266 			   unsigned int frontbuffer_bits)
267 {
268 	intel_drrs_frontbuffer_update(dev_priv, frontbuffer_bits, true);
269 }
270 
271 /**
272  * intel_drrs_flush - Restart Idleness DRRS
273  * @dev_priv: i915 device
274  * @frontbuffer_bits: frontbuffer plane tracking bits
275  *
276  * This function gets called every time rendering on the given planes has
277  * completed or flip on a crtc is completed. So DRRS should be upclocked
278  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
279  * if no other planes are dirty.
280  *
281  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
282  */
283 void intel_drrs_flush(struct drm_i915_private *dev_priv,
284 		      unsigned int frontbuffer_bits)
285 {
286 	intel_drrs_frontbuffer_update(dev_priv, frontbuffer_bits, false);
287 }
288 
289 /**
290  * intel_crtc_drrs_init - Init DRRS for CRTC
291  * @crtc: crtc
292  *
293  * This function is called only once at driver load to initialize basic
294  * DRRS stuff.
295  *
296  */
297 void intel_crtc_drrs_init(struct intel_crtc *crtc)
298 {
299 	INIT_DELAYED_WORK(&crtc->drrs.work, intel_drrs_downclock_work);
300 	mutex_init(&crtc->drrs.mutex);
301 	crtc->drrs.cpu_transcoder = INVALID_TRANSCODER;
302 }
303