xref: /dragonfly/sys/dev/drm/i915/intel_runtime_pm.c (revision 0d27ae55)
1 /*
2  * Copyright © 2012-2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eugeni Dodonov <eugeni.dodonov@intel.com>
25  *    Daniel Vetter <daniel.vetter@ffwll.ch>
26  *
27  */
28 
29 #include "i915_drv.h"
30 #include "intel_drv.h"
31 
32 /**
33  * DOC: runtime pm
34  *
35  * The i915 driver supports dynamic enabling and disabling of entire hardware
36  * blocks at runtime. This is especially important on the display side where
37  * software is supposed to control many power gates manually on recent hardware,
38  * since on the GT side a lot of the power management is done by the hardware.
39  * But even there some manual control at the device level is required.
40  *
41  * Since i915 supports a diverse set of platforms with a unified codebase and
42  * hardware engineers just love to shuffle functionality around between power
43  * domains there's a sizeable amount of indirection required. This file provides
44  * generic functions to the driver for grabbing and releasing references for
45  * abstract power domains. It then maps those to the actual power wells
46  * present for a given platform.
47  */
48 
49 #define GEN9_ENABLE_DC5(dev) 0
50 #define SKL_ENABLE_DC6(dev) IS_SKYLAKE(dev)
51 
52 #define for_each_power_well(i, power_well, domain_mask, power_domains)	\
53 	for (i = 0;							\
54 	     i < (power_domains)->power_well_count &&			\
55 		 ((power_well) = &(power_domains)->power_wells[i]);	\
56 	     i++)							\
57 		if ((power_well)->domains & (domain_mask))
58 
59 #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
60 	for (i = (power_domains)->power_well_count - 1;			 \
61 	     i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
62 	     i--)							 \
63 		if ((power_well)->domains & (domain_mask))
64 
65 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
66 				    int power_well_id);
67 
68 /*
69  * We should only use the power well if we explicitly asked the hardware to
70  * enable it, so check if it's enabled and also check if we've requested it to
71  * be enabled.
72  */
73 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
74 				   struct i915_power_well *power_well)
75 {
76 	return I915_READ(HSW_PWR_WELL_DRIVER) ==
77 		     (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
78 }
79 
80 /**
81  * __intel_display_power_is_enabled - unlocked check for a power domain
82  * @dev_priv: i915 device instance
83  * @domain: power domain to check
84  *
85  * This is the unlocked version of intel_display_power_is_enabled() and should
86  * only be used from error capture and recovery code where deadlocks are
87  * possible.
88  *
89  * Returns:
90  * True when the power domain is enabled, false otherwise.
91  */
92 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
93 				      enum intel_display_power_domain domain)
94 {
95 	struct i915_power_domains *power_domains;
96 	struct i915_power_well *power_well;
97 	bool is_enabled;
98 	int i;
99 
100 	if (dev_priv->pm.suspended)
101 		return false;
102 
103 	power_domains = &dev_priv->power_domains;
104 
105 	is_enabled = true;
106 
107 	for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
108 		if (power_well->always_on)
109 			continue;
110 
111 		if (!power_well->hw_enabled) {
112 			is_enabled = false;
113 			break;
114 		}
115 	}
116 
117 	return is_enabled;
118 }
119 
120 /**
121  * intel_display_power_is_enabled - check for a power domain
122  * @dev_priv: i915 device instance
123  * @domain: power domain to check
124  *
125  * This function can be used to check the hw power domain state. It is mostly
126  * used in hardware state readout functions. Everywhere else code should rely
127  * upon explicit power domain reference counting to ensure that the hardware
128  * block is powered up before accessing it.
129  *
130  * Callers must hold the relevant modesetting locks to ensure that concurrent
131  * threads can't disable the power well while the caller tries to read a few
132  * registers.
133  *
134  * Returns:
135  * True when the power domain is enabled, false otherwise.
136  */
137 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
138 				    enum intel_display_power_domain domain)
139 {
140 	struct i915_power_domains *power_domains;
141 	bool ret;
142 
143 	power_domains = &dev_priv->power_domains;
144 
145 	mutex_lock(&power_domains->lock);
146 	ret = __intel_display_power_is_enabled(dev_priv, domain);
147 	mutex_unlock(&power_domains->lock);
148 
149 	return ret;
150 }
151 
152 /**
153  * intel_display_set_init_power - set the initial power domain state
154  * @dev_priv: i915 device instance
155  * @enable: whether to enable or disable the initial power domain state
156  *
157  * For simplicity our driver load/unload and system suspend/resume code assumes
158  * that all power domains are always enabled. This functions controls the state
159  * of this little hack. While the initial power domain state is enabled runtime
160  * pm is effectively disabled.
161  */
162 void intel_display_set_init_power(struct drm_i915_private *dev_priv,
163 				  bool enable)
164 {
165 	if (dev_priv->power_domains.init_power_on == enable)
166 		return;
167 
168 	if (enable)
169 		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
170 	else
171 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
172 
173 	dev_priv->power_domains.init_power_on = enable;
174 }
175 
176 /*
177  * Starting with Haswell, we have a "Power Down Well" that can be turned off
178  * when not needed anymore. We have 4 registers that can request the power well
179  * to be enabled, and it will only be disabled if none of the registers is
180  * requesting it to be enabled.
181  */
182 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
183 {
184 	struct drm_device *dev = dev_priv->dev;
185 
186 	/*
187 	 * After we re-enable the power well, if we touch VGA register 0x3d5
188 	 * we'll get unclaimed register interrupts. This stops after we write
189 	 * anything to the VGA MSR register. The vgacon module uses this
190 	 * register all the time, so if we unbind our driver and, as a
191 	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
192 	 * console_unlock(). So make here we touch the VGA MSR register, making
193 	 * sure vgacon can keep working normally without triggering interrupts
194 	 * and error messages.
195 	 */
196 #if 0
197 	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
198 	outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
199 	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
200 #endif
201 
202 	if (IS_BROADWELL(dev))
203 		gen8_irq_power_well_post_enable(dev_priv,
204 						1 << PIPE_C | 1 << PIPE_B);
205 }
206 
207 static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
208 				       struct i915_power_well *power_well)
209 {
210 	struct drm_device *dev = dev_priv->dev;
211 
212 	/*
213 	 * After we re-enable the power well, if we touch VGA register 0x3d5
214 	 * we'll get unclaimed register interrupts. This stops after we write
215 	 * anything to the VGA MSR register. The vgacon module uses this
216 	 * register all the time, so if we unbind our driver and, as a
217 	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
218 	 * console_unlock(). So make here we touch the VGA MSR register, making
219 	 * sure vgacon can keep working normally without triggering interrupts
220 	 * and error messages.
221 	 */
222 	if (power_well->data == SKL_DISP_PW_2) {
223 #if 0
224 		vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
225 		outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
226 		vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
227 #endif
228 
229 		gen8_irq_power_well_post_enable(dev_priv,
230 						1 << PIPE_C | 1 << PIPE_B);
231 	}
232 
233 	if (power_well->data == SKL_DISP_PW_1) {
234 		intel_prepare_ddi(dev);
235 		gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A);
236 	}
237 }
238 
239 static void hsw_set_power_well(struct drm_i915_private *dev_priv,
240 			       struct i915_power_well *power_well, bool enable)
241 {
242 	bool is_enabled, enable_requested;
243 	uint32_t tmp;
244 
245 	tmp = I915_READ(HSW_PWR_WELL_DRIVER);
246 	is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
247 	enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
248 
249 	if (enable) {
250 		if (!enable_requested)
251 			I915_WRITE(HSW_PWR_WELL_DRIVER,
252 				   HSW_PWR_WELL_ENABLE_REQUEST);
253 
254 		if (!is_enabled) {
255 			DRM_DEBUG_KMS("Enabling power well\n");
256 			if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
257 				      HSW_PWR_WELL_STATE_ENABLED), 20))
258 				DRM_ERROR("Timeout enabling power well\n");
259 			hsw_power_well_post_enable(dev_priv);
260 		}
261 
262 	} else {
263 		if (enable_requested) {
264 			I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
265 			POSTING_READ(HSW_PWR_WELL_DRIVER);
266 			DRM_DEBUG_KMS("Requesting to disable the power well\n");
267 		}
268 	}
269 }
270 
271 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
272 	BIT(POWER_DOMAIN_TRANSCODER_A) |		\
273 	BIT(POWER_DOMAIN_PIPE_B) |			\
274 	BIT(POWER_DOMAIN_TRANSCODER_B) |		\
275 	BIT(POWER_DOMAIN_PIPE_C) |			\
276 	BIT(POWER_DOMAIN_TRANSCODER_C) |		\
277 	BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
278 	BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
279 	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |		\
280 	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |		\
281 	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |		\
282 	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |		\
283 	BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |		\
284 	BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |		\
285 	BIT(POWER_DOMAIN_AUX_B) |                       \
286 	BIT(POWER_DOMAIN_AUX_C) |			\
287 	BIT(POWER_DOMAIN_AUX_D) |			\
288 	BIT(POWER_DOMAIN_AUDIO) |			\
289 	BIT(POWER_DOMAIN_VGA) |				\
290 	BIT(POWER_DOMAIN_INIT))
291 #define SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS (		\
292 	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
293 	BIT(POWER_DOMAIN_PLLS) |			\
294 	BIT(POWER_DOMAIN_PIPE_A) |			\
295 	BIT(POWER_DOMAIN_TRANSCODER_EDP) |		\
296 	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
297 	BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |		\
298 	BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |		\
299 	BIT(POWER_DOMAIN_AUX_A) |			\
300 	BIT(POWER_DOMAIN_INIT))
301 #define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS (		\
302 	BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |		\
303 	BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |		\
304 	BIT(POWER_DOMAIN_INIT))
305 #define SKL_DISPLAY_DDI_B_POWER_DOMAINS (		\
306 	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |		\
307 	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |		\
308 	BIT(POWER_DOMAIN_INIT))
309 #define SKL_DISPLAY_DDI_C_POWER_DOMAINS (		\
310 	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |		\
311 	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |		\
312 	BIT(POWER_DOMAIN_INIT))
313 #define SKL_DISPLAY_DDI_D_POWER_DOMAINS (		\
314 	BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |		\
315 	BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |		\
316 	BIT(POWER_DOMAIN_INIT))
317 #define SKL_DISPLAY_MISC_IO_POWER_DOMAINS (		\
318 	SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS |		\
319 	BIT(POWER_DOMAIN_PLLS) |			\
320 	BIT(POWER_DOMAIN_INIT))
321 #define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS (		\
322 	(POWER_DOMAIN_MASK & ~(SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS |	\
323 	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
324 	SKL_DISPLAY_DDI_A_E_POWER_DOMAINS |		\
325 	SKL_DISPLAY_DDI_B_POWER_DOMAINS |		\
326 	SKL_DISPLAY_DDI_C_POWER_DOMAINS |		\
327 	SKL_DISPLAY_DDI_D_POWER_DOMAINS |		\
328 	SKL_DISPLAY_MISC_IO_POWER_DOMAINS)) |		\
329 	BIT(POWER_DOMAIN_INIT))
330 
331 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
332 	BIT(POWER_DOMAIN_TRANSCODER_A) |		\
333 	BIT(POWER_DOMAIN_PIPE_B) |			\
334 	BIT(POWER_DOMAIN_TRANSCODER_B) |		\
335 	BIT(POWER_DOMAIN_PIPE_C) |			\
336 	BIT(POWER_DOMAIN_TRANSCODER_C) |		\
337 	BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
338 	BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
339 	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |		\
340 	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |		\
341 	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |		\
342 	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |		\
343 	BIT(POWER_DOMAIN_AUX_B) |			\
344 	BIT(POWER_DOMAIN_AUX_C) |			\
345 	BIT(POWER_DOMAIN_AUDIO) |			\
346 	BIT(POWER_DOMAIN_VGA) |				\
347 	BIT(POWER_DOMAIN_INIT))
348 #define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS (		\
349 	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
350 	BIT(POWER_DOMAIN_PIPE_A) |			\
351 	BIT(POWER_DOMAIN_TRANSCODER_EDP) |		\
352 	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
353 	BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |		\
354 	BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |		\
355 	BIT(POWER_DOMAIN_AUX_A) |			\
356 	BIT(POWER_DOMAIN_PLLS) |			\
357 	BIT(POWER_DOMAIN_INIT))
358 #define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS (		\
359 	(POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS |	\
360 	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) |	\
361 	BIT(POWER_DOMAIN_INIT))
362 
363 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
364 {
365 	struct drm_device *dev = dev_priv->dev;
366 
367 	WARN(!IS_BROXTON(dev), "Platform doesn't support DC9.\n");
368 	WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
369 		"DC9 already programmed to be enabled.\n");
370 	WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
371 		"DC5 still not disabled to enable DC9.\n");
372 	WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
373 	WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
374 
375 	 /*
376 	  * TODO: check for the following to verify the conditions to enter DC9
377 	  * state are satisfied:
378 	  * 1] Check relevant display engine registers to verify if mode set
379 	  * disable sequence was followed.
380 	  * 2] Check if display uninitialize sequence is initialized.
381 	  */
382 }
383 
384 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
385 {
386 	WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
387 	WARN(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
388 		"DC9 already programmed to be disabled.\n");
389 	WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
390 		"DC5 still not disabled.\n");
391 
392 	 /*
393 	  * TODO: check for the following to verify DC9 state was indeed
394 	  * entered before programming to disable it:
395 	  * 1] Check relevant display engine registers to verify if mode
396 	  *  set disable sequence was followed.
397 	  * 2] Check if display uninitialize sequence is initialized.
398 	  */
399 }
400 
401 void bxt_enable_dc9(struct drm_i915_private *dev_priv)
402 {
403 	uint32_t val;
404 
405 	assert_can_enable_dc9(dev_priv);
406 
407 	DRM_DEBUG_KMS("Enabling DC9\n");
408 
409 	val = I915_READ(DC_STATE_EN);
410 	val |= DC_STATE_EN_DC9;
411 	I915_WRITE(DC_STATE_EN, val);
412 	POSTING_READ(DC_STATE_EN);
413 }
414 
415 void bxt_disable_dc9(struct drm_i915_private *dev_priv)
416 {
417 	uint32_t val;
418 
419 	assert_can_disable_dc9(dev_priv);
420 
421 	DRM_DEBUG_KMS("Disabling DC9\n");
422 
423 	val = I915_READ(DC_STATE_EN);
424 	val &= ~DC_STATE_EN_DC9;
425 	I915_WRITE(DC_STATE_EN, val);
426 	POSTING_READ(DC_STATE_EN);
427 }
428 
429 static void gen9_set_dc_state_debugmask_memory_up(
430 			struct drm_i915_private *dev_priv)
431 {
432 	uint32_t val;
433 
434 	/* The below bit doesn't need to be cleared ever afterwards */
435 	val = I915_READ(DC_STATE_DEBUG);
436 	if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) {
437 		val |= DC_STATE_DEBUG_MASK_MEMORY_UP;
438 		I915_WRITE(DC_STATE_DEBUG, val);
439 		POSTING_READ(DC_STATE_DEBUG);
440 	}
441 }
442 
443 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
444 {
445 	struct drm_device *dev = dev_priv->dev;
446 	bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
447 					SKL_DISP_PW_2);
448 
449 	WARN(!IS_SKYLAKE(dev), "Platform doesn't support DC5.\n");
450 	WARN(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
451 	WARN(pg2_enabled, "PG2 not disabled to enable DC5.\n");
452 
453 	WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
454 				"DC5 already programmed to be enabled.\n");
455 	WARN(dev_priv->pm.suspended,
456 		"DC5 cannot be enabled, if platform is runtime-suspended.\n");
457 
458 	assert_csr_loaded(dev_priv);
459 }
460 
461 static void assert_can_disable_dc5(struct drm_i915_private *dev_priv)
462 {
463 	bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
464 					SKL_DISP_PW_2);
465 	/*
466 	 * During initialization, the firmware may not be loaded yet.
467 	 * We still want to make sure that the DC enabling flag is cleared.
468 	 */
469 	if (dev_priv->power_domains.initializing)
470 		return;
471 
472 	WARN(!pg2_enabled, "PG2 not enabled to disable DC5.\n");
473 	WARN(dev_priv->pm.suspended,
474 		"Disabling of DC5 while platform is runtime-suspended should never happen.\n");
475 }
476 
477 static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
478 {
479 	uint32_t val;
480 
481 	assert_can_enable_dc5(dev_priv);
482 
483 	DRM_DEBUG_KMS("Enabling DC5\n");
484 
485 	gen9_set_dc_state_debugmask_memory_up(dev_priv);
486 
487 	val = I915_READ(DC_STATE_EN);
488 	val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK;
489 	val |= DC_STATE_EN_UPTO_DC5;
490 	I915_WRITE(DC_STATE_EN, val);
491 	POSTING_READ(DC_STATE_EN);
492 }
493 
494 static void gen9_disable_dc5(struct drm_i915_private *dev_priv)
495 {
496 	uint32_t val;
497 
498 	assert_can_disable_dc5(dev_priv);
499 
500 	DRM_DEBUG_KMS("Disabling DC5\n");
501 
502 	val = I915_READ(DC_STATE_EN);
503 	val &= ~DC_STATE_EN_UPTO_DC5;
504 	I915_WRITE(DC_STATE_EN, val);
505 	POSTING_READ(DC_STATE_EN);
506 }
507 
508 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
509 {
510 	struct drm_device *dev = dev_priv->dev;
511 
512 	WARN(!IS_SKYLAKE(dev), "Platform doesn't support DC6.\n");
513 	WARN(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
514 	WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
515 		"Backlight is not disabled.\n");
516 	WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
517 		"DC6 already programmed to be enabled.\n");
518 
519 	assert_csr_loaded(dev_priv);
520 }
521 
522 static void assert_can_disable_dc6(struct drm_i915_private *dev_priv)
523 {
524 	/*
525 	 * During initialization, the firmware may not be loaded yet.
526 	 * We still want to make sure that the DC enabling flag is cleared.
527 	 */
528 	if (dev_priv->power_domains.initializing)
529 		return;
530 
531 	assert_csr_loaded(dev_priv);
532 	WARN(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
533 		"DC6 already programmed to be disabled.\n");
534 }
535 
536 static void skl_enable_dc6(struct drm_i915_private *dev_priv)
537 {
538 	uint32_t val;
539 
540 	assert_can_enable_dc6(dev_priv);
541 
542 	DRM_DEBUG_KMS("Enabling DC6\n");
543 
544 	gen9_set_dc_state_debugmask_memory_up(dev_priv);
545 
546 	val = I915_READ(DC_STATE_EN);
547 	val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK;
548 	val |= DC_STATE_EN_UPTO_DC6;
549 	I915_WRITE(DC_STATE_EN, val);
550 	POSTING_READ(DC_STATE_EN);
551 }
552 
553 static void skl_disable_dc6(struct drm_i915_private *dev_priv)
554 {
555 	uint32_t val;
556 
557 	assert_can_disable_dc6(dev_priv);
558 
559 	DRM_DEBUG_KMS("Disabling DC6\n");
560 
561 	val = I915_READ(DC_STATE_EN);
562 	val &= ~DC_STATE_EN_UPTO_DC6;
563 	I915_WRITE(DC_STATE_EN, val);
564 	POSTING_READ(DC_STATE_EN);
565 }
566 
567 static void skl_set_power_well(struct drm_i915_private *dev_priv,
568 			struct i915_power_well *power_well, bool enable)
569 {
570 	struct drm_device *dev = dev_priv->dev;
571 	uint32_t tmp, fuse_status;
572 	uint32_t req_mask, state_mask;
573 	bool is_enabled, enable_requested, check_fuse_status = false;
574 
575 	tmp = I915_READ(HSW_PWR_WELL_DRIVER);
576 	fuse_status = I915_READ(SKL_FUSE_STATUS);
577 
578 	switch (power_well->data) {
579 	case SKL_DISP_PW_1:
580 		if (wait_for((I915_READ(SKL_FUSE_STATUS) &
581 			SKL_FUSE_PG0_DIST_STATUS), 1)) {
582 			DRM_ERROR("PG0 not enabled\n");
583 			return;
584 		}
585 		break;
586 	case SKL_DISP_PW_2:
587 		if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
588 			DRM_ERROR("PG1 in disabled state\n");
589 			return;
590 		}
591 		break;
592 	case SKL_DISP_PW_DDI_A_E:
593 	case SKL_DISP_PW_DDI_B:
594 	case SKL_DISP_PW_DDI_C:
595 	case SKL_DISP_PW_DDI_D:
596 	case SKL_DISP_PW_MISC_IO:
597 		break;
598 	default:
599 		WARN(1, "Unknown power well %lu\n", power_well->data);
600 		return;
601 	}
602 
603 	req_mask = SKL_POWER_WELL_REQ(power_well->data);
604 	enable_requested = tmp & req_mask;
605 	state_mask = SKL_POWER_WELL_STATE(power_well->data);
606 	is_enabled = tmp & state_mask;
607 
608 	if (enable) {
609 		if (!enable_requested) {
610 			WARN((tmp & state_mask) &&
611 				!I915_READ(HSW_PWR_WELL_BIOS),
612 				"Invalid for power well status to be enabled, unless done by the BIOS, \
613 				when request is to disable!\n");
614 			if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) &&
615 				power_well->data == SKL_DISP_PW_2) {
616 				if (SKL_ENABLE_DC6(dev)) {
617 					skl_disable_dc6(dev_priv);
618 					/*
619 					 * DDI buffer programming unnecessary during driver-load/resume
620 					 * as it's already done during modeset initialization then.
621 					 * It's also invalid here as encoder list is still uninitialized.
622 					 */
623 					if (!dev_priv->power_domains.initializing)
624 						intel_prepare_ddi(dev);
625 				} else {
626 					gen9_disable_dc5(dev_priv);
627 				}
628 			}
629 			I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
630 		}
631 
632 		if (!is_enabled) {
633 			DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
634 			if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
635 				state_mask), 1))
636 				DRM_ERROR("%s enable timeout\n",
637 					power_well->name);
638 			check_fuse_status = true;
639 		}
640 	} else {
641 		if (enable_requested) {
642 			I915_WRITE(HSW_PWR_WELL_DRIVER,	tmp & ~req_mask);
643 			POSTING_READ(HSW_PWR_WELL_DRIVER);
644 			DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
645 
646 			if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) &&
647 				power_well->data == SKL_DISP_PW_2) {
648 				enum csr_state state;
649 				/* TODO: wait for a completion event or
650 				 * similar here instead of busy
651 				 * waiting using wait_for function.
652 				 */
653 				wait_for((state = intel_csr_load_status_get(dev_priv)) !=
654 						FW_UNINITIALIZED, 1000);
655 				if (state != FW_LOADED)
656 					DRM_ERROR("CSR firmware not ready (%d)\n",
657 							state);
658 				else
659 					if (SKL_ENABLE_DC6(dev))
660 						skl_enable_dc6(dev_priv);
661 					else
662 						gen9_enable_dc5(dev_priv);
663 			}
664 		}
665 	}
666 
667 	if (check_fuse_status) {
668 		if (power_well->data == SKL_DISP_PW_1) {
669 			if (wait_for((I915_READ(SKL_FUSE_STATUS) &
670 				SKL_FUSE_PG1_DIST_STATUS), 1))
671 				DRM_ERROR("PG1 distributing status timeout\n");
672 		} else if (power_well->data == SKL_DISP_PW_2) {
673 			if (wait_for((I915_READ(SKL_FUSE_STATUS) &
674 				SKL_FUSE_PG2_DIST_STATUS), 1))
675 				DRM_ERROR("PG2 distributing status timeout\n");
676 		}
677 	}
678 
679 	if (enable && !is_enabled)
680 		skl_power_well_post_enable(dev_priv, power_well);
681 }
682 
683 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
684 				   struct i915_power_well *power_well)
685 {
686 	hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
687 
688 	/*
689 	 * We're taking over the BIOS, so clear any requests made by it since
690 	 * the driver is in charge now.
691 	 */
692 	if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
693 		I915_WRITE(HSW_PWR_WELL_BIOS, 0);
694 }
695 
696 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
697 				  struct i915_power_well *power_well)
698 {
699 	hsw_set_power_well(dev_priv, power_well, true);
700 }
701 
702 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
703 				   struct i915_power_well *power_well)
704 {
705 	hsw_set_power_well(dev_priv, power_well, false);
706 }
707 
708 static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
709 					struct i915_power_well *power_well)
710 {
711 	uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) |
712 		SKL_POWER_WELL_STATE(power_well->data);
713 
714 	return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
715 }
716 
717 static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
718 				struct i915_power_well *power_well)
719 {
720 	skl_set_power_well(dev_priv, power_well, power_well->count > 0);
721 
722 	/* Clear any request made by BIOS as driver is taking over */
723 	I915_WRITE(HSW_PWR_WELL_BIOS, 0);
724 }
725 
726 static void skl_power_well_enable(struct drm_i915_private *dev_priv,
727 				struct i915_power_well *power_well)
728 {
729 	skl_set_power_well(dev_priv, power_well, true);
730 }
731 
732 static void skl_power_well_disable(struct drm_i915_private *dev_priv,
733 				struct i915_power_well *power_well)
734 {
735 	skl_set_power_well(dev_priv, power_well, false);
736 }
737 
738 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
739 					   struct i915_power_well *power_well)
740 {
741 }
742 
743 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
744 					     struct i915_power_well *power_well)
745 {
746 	return true;
747 }
748 
749 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
750 			       struct i915_power_well *power_well, bool enable)
751 {
752 	enum punit_power_well power_well_id = power_well->data;
753 	u32 mask;
754 	u32 state;
755 	u32 ctrl;
756 
757 	mask = PUNIT_PWRGT_MASK(power_well_id);
758 	state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
759 			 PUNIT_PWRGT_PWR_GATE(power_well_id);
760 
761 	mutex_lock(&dev_priv->rps.hw_lock);
762 
763 #define COND \
764 	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
765 
766 	if (COND)
767 		goto out;
768 
769 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
770 	ctrl &= ~mask;
771 	ctrl |= state;
772 	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
773 
774 	if (wait_for(COND, 100))
775 		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
776 			  state,
777 			  vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
778 
779 #undef COND
780 
781 out:
782 	mutex_unlock(&dev_priv->rps.hw_lock);
783 }
784 
785 static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
786 				   struct i915_power_well *power_well)
787 {
788 	vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
789 }
790 
791 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
792 				  struct i915_power_well *power_well)
793 {
794 	vlv_set_power_well(dev_priv, power_well, true);
795 }
796 
797 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
798 				   struct i915_power_well *power_well)
799 {
800 	vlv_set_power_well(dev_priv, power_well, false);
801 }
802 
803 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
804 				   struct i915_power_well *power_well)
805 {
806 	int power_well_id = power_well->data;
807 	bool enabled = false;
808 	u32 mask;
809 	u32 state;
810 	u32 ctrl;
811 
812 	mask = PUNIT_PWRGT_MASK(power_well_id);
813 	ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
814 
815 	mutex_lock(&dev_priv->rps.hw_lock);
816 
817 	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
818 	/*
819 	 * We only ever set the power-on and power-gate states, anything
820 	 * else is unexpected.
821 	 */
822 	WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
823 		state != PUNIT_PWRGT_PWR_GATE(power_well_id));
824 	if (state == ctrl)
825 		enabled = true;
826 
827 	/*
828 	 * A transient state at this point would mean some unexpected party
829 	 * is poking at the power controls too.
830 	 */
831 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
832 	WARN_ON(ctrl != state);
833 
834 	mutex_unlock(&dev_priv->rps.hw_lock);
835 
836 	return enabled;
837 }
838 
839 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
840 					  struct i915_power_well *power_well)
841 {
842 	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
843 
844 	vlv_set_power_well(dev_priv, power_well, true);
845 
846 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
847 	valleyview_enable_display_irqs(dev_priv);
848 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
849 
850 	/*
851 	 * During driver initialization/resume we can avoid restoring the
852 	 * part of the HW/SW state that will be inited anyway explicitly.
853 	 */
854 	if (dev_priv->power_domains.initializing)
855 		return;
856 
857 	intel_hpd_init(dev_priv);
858 
859 	i915_redisable_vga_power_on(dev_priv->dev);
860 }
861 
862 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
863 					   struct i915_power_well *power_well)
864 {
865 	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
866 
867 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
868 	valleyview_disable_display_irqs(dev_priv);
869 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
870 
871 	vlv_set_power_well(dev_priv, power_well, false);
872 
873 	vlv_power_sequencer_reset(dev_priv);
874 }
875 
876 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
877 					   struct i915_power_well *power_well)
878 {
879 	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
880 
881 	/*
882 	 * Enable the CRI clock source so we can get at the
883 	 * display and the reference clock for VGA
884 	 * hotplug / manual detection.
885 	 */
886 	I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
887 		   DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
888 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
889 
890 	vlv_set_power_well(dev_priv, power_well, true);
891 
892 	/*
893 	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
894 	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
895 	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
896 	 *   b.	The other bits such as sfr settings / modesel may all
897 	 *	be set to 0.
898 	 *
899 	 * This should only be done on init and resume from S3 with
900 	 * both PLLs disabled, or we risk losing DPIO and PLL
901 	 * synchronization.
902 	 */
903 	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
904 }
905 
906 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
907 					    struct i915_power_well *power_well)
908 {
909 	enum i915_pipe pipe;
910 
911 	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
912 
913 	for_each_pipe(dev_priv, pipe)
914 		assert_pll_disabled(dev_priv, pipe);
915 
916 	/* Assert common reset */
917 	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
918 
919 	vlv_set_power_well(dev_priv, power_well, false);
920 }
921 
922 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
923 					   struct i915_power_well *power_well)
924 {
925 	enum dpio_phy phy;
926 
927 	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
928 		     power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
929 
930 	/*
931 	 * Enable the CRI clock source so we can get at the
932 	 * display and the reference clock for VGA
933 	 * hotplug / manual detection.
934 	 */
935 	if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
936 		phy = DPIO_PHY0;
937 		I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
938 			   DPLL_REFA_CLK_ENABLE_VLV);
939 		I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
940 			   DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
941 	} else {
942 		phy = DPIO_PHY1;
943 		I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
944 			   DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
945 	}
946 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
947 	vlv_set_power_well(dev_priv, power_well, true);
948 
949 	/* Poll for phypwrgood signal */
950 	if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
951 		DRM_ERROR("Display PHY %d is not power up\n", phy);
952 
953 	dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
954 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
955 }
956 
957 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
958 					    struct i915_power_well *power_well)
959 {
960 	enum dpio_phy phy;
961 
962 	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
963 		     power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
964 
965 	if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
966 		phy = DPIO_PHY0;
967 		assert_pll_disabled(dev_priv, PIPE_A);
968 		assert_pll_disabled(dev_priv, PIPE_B);
969 	} else {
970 		phy = DPIO_PHY1;
971 		assert_pll_disabled(dev_priv, PIPE_C);
972 	}
973 
974 	dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
975 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
976 
977 	vlv_set_power_well(dev_priv, power_well, false);
978 }
979 
980 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
981 					struct i915_power_well *power_well)
982 {
983 	enum i915_pipe pipe = power_well->data;
984 	bool enabled;
985 	u32 state, ctrl;
986 
987 	mutex_lock(&dev_priv->rps.hw_lock);
988 
989 	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
990 	/*
991 	 * We only ever set the power-on and power-gate states, anything
992 	 * else is unexpected.
993 	 */
994 	WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
995 	enabled = state == DP_SSS_PWR_ON(pipe);
996 
997 	/*
998 	 * A transient state at this point would mean some unexpected party
999 	 * is poking at the power controls too.
1000 	 */
1001 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
1002 	WARN_ON(ctrl << 16 != state);
1003 
1004 	mutex_unlock(&dev_priv->rps.hw_lock);
1005 
1006 	return enabled;
1007 }
1008 
1009 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1010 				    struct i915_power_well *power_well,
1011 				    bool enable)
1012 {
1013 	enum i915_pipe pipe = power_well->data;
1014 	u32 state;
1015 	u32 ctrl;
1016 
1017 	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1018 
1019 	mutex_lock(&dev_priv->rps.hw_lock);
1020 
1021 #define COND \
1022 	((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
1023 
1024 	if (COND)
1025 		goto out;
1026 
1027 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
1028 	ctrl &= ~DP_SSC_MASK(pipe);
1029 	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1030 	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
1031 
1032 	if (wait_for(COND, 100))
1033 		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1034 			  state,
1035 			  vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
1036 
1037 #undef COND
1038 
1039 out:
1040 	mutex_unlock(&dev_priv->rps.hw_lock);
1041 }
1042 
1043 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
1044 					struct i915_power_well *power_well)
1045 {
1046 	chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
1047 }
1048 
1049 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1050 				       struct i915_power_well *power_well)
1051 {
1052 	WARN_ON_ONCE(power_well->data != PIPE_A &&
1053 		     power_well->data != PIPE_B &&
1054 		     power_well->data != PIPE_C);
1055 
1056 	chv_set_pipe_power_well(dev_priv, power_well, true);
1057 
1058 	if (power_well->data == PIPE_A) {
1059 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1060 		valleyview_enable_display_irqs(dev_priv);
1061 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1062 
1063 		/*
1064 		 * During driver initialization/resume we can avoid restoring the
1065 		 * part of the HW/SW state that will be inited anyway explicitly.
1066 		 */
1067 		if (dev_priv->power_domains.initializing)
1068 			return;
1069 
1070 		intel_hpd_init(dev_priv);
1071 
1072 		i915_redisable_vga_power_on(dev_priv->dev);
1073 	}
1074 }
1075 
1076 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1077 					struct i915_power_well *power_well)
1078 {
1079 	WARN_ON_ONCE(power_well->data != PIPE_A &&
1080 		     power_well->data != PIPE_B &&
1081 		     power_well->data != PIPE_C);
1082 
1083 	if (power_well->data == PIPE_A) {
1084 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1085 		valleyview_disable_display_irqs(dev_priv);
1086 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1087 	}
1088 
1089 	chv_set_pipe_power_well(dev_priv, power_well, false);
1090 
1091 	if (power_well->data == PIPE_A)
1092 		vlv_power_sequencer_reset(dev_priv);
1093 }
1094 
1095 /**
1096  * intel_display_power_get - grab a power domain reference
1097  * @dev_priv: i915 device instance
1098  * @domain: power domain to reference
1099  *
1100  * This function grabs a power domain reference for @domain and ensures that the
1101  * power domain and all its parents are powered up. Therefore users should only
1102  * grab a reference to the innermost power domain they need.
1103  *
1104  * Any power domain reference obtained by this function must have a symmetric
1105  * call to intel_display_power_put() to release the reference again.
1106  */
1107 void intel_display_power_get(struct drm_i915_private *dev_priv,
1108 			     enum intel_display_power_domain domain)
1109 {
1110 	struct i915_power_domains *power_domains;
1111 	struct i915_power_well *power_well;
1112 	int i;
1113 
1114 	intel_runtime_pm_get(dev_priv);
1115 
1116 	power_domains = &dev_priv->power_domains;
1117 
1118 	mutex_lock(&power_domains->lock);
1119 
1120 	for_each_power_well(i, power_well, BIT(domain), power_domains) {
1121 		if (!power_well->count++) {
1122 			DRM_DEBUG_KMS("enabling %s\n", power_well->name);
1123 			power_well->ops->enable(dev_priv, power_well);
1124 			power_well->hw_enabled = true;
1125 		}
1126 	}
1127 
1128 	power_domains->domain_use_count[domain]++;
1129 
1130 	mutex_unlock(&power_domains->lock);
1131 }
1132 
1133 /**
1134  * intel_display_power_put - release a power domain reference
1135  * @dev_priv: i915 device instance
1136  * @domain: power domain to reference
1137  *
1138  * This function drops the power domain reference obtained by
1139  * intel_display_power_get() and might power down the corresponding hardware
1140  * block right away if this is the last reference.
1141  */
1142 void intel_display_power_put(struct drm_i915_private *dev_priv,
1143 			     enum intel_display_power_domain domain)
1144 {
1145 	struct i915_power_domains *power_domains;
1146 	struct i915_power_well *power_well;
1147 	int i;
1148 
1149 	power_domains = &dev_priv->power_domains;
1150 
1151 	mutex_lock(&power_domains->lock);
1152 
1153 	WARN_ON(!power_domains->domain_use_count[domain]);
1154 	power_domains->domain_use_count[domain]--;
1155 
1156 	for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
1157 		WARN_ON(!power_well->count);
1158 
1159 		if (!--power_well->count && i915.disable_power_well) {
1160 			DRM_DEBUG_KMS("disabling %s\n", power_well->name);
1161 			power_well->hw_enabled = false;
1162 			power_well->ops->disable(dev_priv, power_well);
1163 		}
1164 	}
1165 
1166 	mutex_unlock(&power_domains->lock);
1167 
1168 	intel_runtime_pm_put(dev_priv);
1169 }
1170 
1171 #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
1172 
1173 #define HSW_ALWAYS_ON_POWER_DOMAINS (			\
1174 	BIT(POWER_DOMAIN_PIPE_A) |			\
1175 	BIT(POWER_DOMAIN_TRANSCODER_EDP) |		\
1176 	BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |		\
1177 	BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |		\
1178 	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |		\
1179 	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |		\
1180 	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |		\
1181 	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |		\
1182 	BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |		\
1183 	BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |		\
1184 	BIT(POWER_DOMAIN_PORT_CRT) |			\
1185 	BIT(POWER_DOMAIN_PLLS) |			\
1186 	BIT(POWER_DOMAIN_AUX_A) |			\
1187 	BIT(POWER_DOMAIN_AUX_B) |			\
1188 	BIT(POWER_DOMAIN_AUX_C) |			\
1189 	BIT(POWER_DOMAIN_AUX_D) |			\
1190 	BIT(POWER_DOMAIN_INIT))
1191 #define HSW_DISPLAY_POWER_DOMAINS (				\
1192 	(POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) |	\
1193 	BIT(POWER_DOMAIN_INIT))
1194 
1195 #define BDW_ALWAYS_ON_POWER_DOMAINS (			\
1196 	HSW_ALWAYS_ON_POWER_DOMAINS |			\
1197 	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
1198 #define BDW_DISPLAY_POWER_DOMAINS (				\
1199 	(POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) |	\
1200 	BIT(POWER_DOMAIN_INIT))
1201 
1202 #define VLV_ALWAYS_ON_POWER_DOMAINS	BIT(POWER_DOMAIN_INIT)
1203 #define VLV_DISPLAY_POWER_DOMAINS	POWER_DOMAIN_MASK
1204 
1205 #define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
1206 	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |	\
1207 	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
1208 	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |	\
1209 	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
1210 	BIT(POWER_DOMAIN_PORT_CRT) |		\
1211 	BIT(POWER_DOMAIN_AUX_B) |		\
1212 	BIT(POWER_DOMAIN_AUX_C) |		\
1213 	BIT(POWER_DOMAIN_INIT))
1214 
1215 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
1216 	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |	\
1217 	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
1218 	BIT(POWER_DOMAIN_AUX_B) |		\
1219 	BIT(POWER_DOMAIN_INIT))
1220 
1221 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
1222 	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
1223 	BIT(POWER_DOMAIN_AUX_B) |		\
1224 	BIT(POWER_DOMAIN_INIT))
1225 
1226 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
1227 	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |	\
1228 	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
1229 	BIT(POWER_DOMAIN_AUX_C) |		\
1230 	BIT(POWER_DOMAIN_INIT))
1231 
1232 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
1233 	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
1234 	BIT(POWER_DOMAIN_AUX_C) |		\
1235 	BIT(POWER_DOMAIN_INIT))
1236 
1237 #define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
1238 	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |	\
1239 	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
1240 	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |	\
1241 	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
1242 	BIT(POWER_DOMAIN_AUX_B) |		\
1243 	BIT(POWER_DOMAIN_AUX_C) |		\
1244 	BIT(POWER_DOMAIN_INIT))
1245 
1246 #define CHV_DPIO_CMN_D_POWER_DOMAINS (		\
1247 	BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |	\
1248 	BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |	\
1249 	BIT(POWER_DOMAIN_AUX_D) |		\
1250 	BIT(POWER_DOMAIN_INIT))
1251 
1252 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
1253 	.sync_hw = i9xx_always_on_power_well_noop,
1254 	.enable = i9xx_always_on_power_well_noop,
1255 	.disable = i9xx_always_on_power_well_noop,
1256 	.is_enabled = i9xx_always_on_power_well_enabled,
1257 };
1258 
1259 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
1260 	.sync_hw = chv_pipe_power_well_sync_hw,
1261 	.enable = chv_pipe_power_well_enable,
1262 	.disable = chv_pipe_power_well_disable,
1263 	.is_enabled = chv_pipe_power_well_enabled,
1264 };
1265 
1266 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
1267 	.sync_hw = vlv_power_well_sync_hw,
1268 	.enable = chv_dpio_cmn_power_well_enable,
1269 	.disable = chv_dpio_cmn_power_well_disable,
1270 	.is_enabled = vlv_power_well_enabled,
1271 };
1272 
1273 static struct i915_power_well i9xx_always_on_power_well[] = {
1274 	{
1275 		.name = "always-on",
1276 		.always_on = 1,
1277 		.domains = POWER_DOMAIN_MASK,
1278 		.ops = &i9xx_always_on_power_well_ops,
1279 	},
1280 };
1281 
1282 static const struct i915_power_well_ops hsw_power_well_ops = {
1283 	.sync_hw = hsw_power_well_sync_hw,
1284 	.enable = hsw_power_well_enable,
1285 	.disable = hsw_power_well_disable,
1286 	.is_enabled = hsw_power_well_enabled,
1287 };
1288 
1289 static const struct i915_power_well_ops skl_power_well_ops = {
1290 	.sync_hw = skl_power_well_sync_hw,
1291 	.enable = skl_power_well_enable,
1292 	.disable = skl_power_well_disable,
1293 	.is_enabled = skl_power_well_enabled,
1294 };
1295 
1296 static struct i915_power_well hsw_power_wells[] = {
1297 	{
1298 		.name = "always-on",
1299 		.always_on = 1,
1300 		.domains = HSW_ALWAYS_ON_POWER_DOMAINS,
1301 		.ops = &i9xx_always_on_power_well_ops,
1302 	},
1303 	{
1304 		.name = "display",
1305 		.domains = HSW_DISPLAY_POWER_DOMAINS,
1306 		.ops = &hsw_power_well_ops,
1307 	},
1308 };
1309 
1310 static struct i915_power_well bdw_power_wells[] = {
1311 	{
1312 		.name = "always-on",
1313 		.always_on = 1,
1314 		.domains = BDW_ALWAYS_ON_POWER_DOMAINS,
1315 		.ops = &i9xx_always_on_power_well_ops,
1316 	},
1317 	{
1318 		.name = "display",
1319 		.domains = BDW_DISPLAY_POWER_DOMAINS,
1320 		.ops = &hsw_power_well_ops,
1321 	},
1322 };
1323 
1324 static const struct i915_power_well_ops vlv_display_power_well_ops = {
1325 	.sync_hw = vlv_power_well_sync_hw,
1326 	.enable = vlv_display_power_well_enable,
1327 	.disable = vlv_display_power_well_disable,
1328 	.is_enabled = vlv_power_well_enabled,
1329 };
1330 
1331 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
1332 	.sync_hw = vlv_power_well_sync_hw,
1333 	.enable = vlv_dpio_cmn_power_well_enable,
1334 	.disable = vlv_dpio_cmn_power_well_disable,
1335 	.is_enabled = vlv_power_well_enabled,
1336 };
1337 
1338 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
1339 	.sync_hw = vlv_power_well_sync_hw,
1340 	.enable = vlv_power_well_enable,
1341 	.disable = vlv_power_well_disable,
1342 	.is_enabled = vlv_power_well_enabled,
1343 };
1344 
1345 static struct i915_power_well vlv_power_wells[] = {
1346 	{
1347 		.name = "always-on",
1348 		.always_on = 1,
1349 		.domains = VLV_ALWAYS_ON_POWER_DOMAINS,
1350 		.ops = &i9xx_always_on_power_well_ops,
1351 	},
1352 	{
1353 		.name = "display",
1354 		.domains = VLV_DISPLAY_POWER_DOMAINS,
1355 		.data = PUNIT_POWER_WELL_DISP2D,
1356 		.ops = &vlv_display_power_well_ops,
1357 	},
1358 	{
1359 		.name = "dpio-tx-b-01",
1360 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1361 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1362 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1363 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1364 		.ops = &vlv_dpio_power_well_ops,
1365 		.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
1366 	},
1367 	{
1368 		.name = "dpio-tx-b-23",
1369 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1370 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1371 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1372 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1373 		.ops = &vlv_dpio_power_well_ops,
1374 		.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
1375 	},
1376 	{
1377 		.name = "dpio-tx-c-01",
1378 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1379 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1380 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1381 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1382 		.ops = &vlv_dpio_power_well_ops,
1383 		.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
1384 	},
1385 	{
1386 		.name = "dpio-tx-c-23",
1387 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1388 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1389 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1390 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1391 		.ops = &vlv_dpio_power_well_ops,
1392 		.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
1393 	},
1394 	{
1395 		.name = "dpio-common",
1396 		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
1397 		.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
1398 		.ops = &vlv_dpio_cmn_power_well_ops,
1399 	},
1400 };
1401 
1402 static struct i915_power_well chv_power_wells[] = {
1403 	{
1404 		.name = "always-on",
1405 		.always_on = 1,
1406 		.domains = VLV_ALWAYS_ON_POWER_DOMAINS,
1407 		.ops = &i9xx_always_on_power_well_ops,
1408 	},
1409 	{
1410 		.name = "display",
1411 		/*
1412 		 * Pipe A power well is the new disp2d well. Pipe B and C
1413 		 * power wells don't actually exist. Pipe A power well is
1414 		 * required for any pipe to work.
1415 		 */
1416 		.domains = VLV_DISPLAY_POWER_DOMAINS,
1417 		.data = PIPE_A,
1418 		.ops = &chv_pipe_power_well_ops,
1419 	},
1420 	{
1421 		.name = "dpio-common-bc",
1422 		.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
1423 		.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
1424 		.ops = &chv_dpio_cmn_power_well_ops,
1425 	},
1426 	{
1427 		.name = "dpio-common-d",
1428 		.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
1429 		.data = PUNIT_POWER_WELL_DPIO_CMN_D,
1430 		.ops = &chv_dpio_cmn_power_well_ops,
1431 	},
1432 };
1433 
1434 static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
1435 						 int power_well_id)
1436 {
1437 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1438 	struct i915_power_well *power_well;
1439 	int i;
1440 
1441 	for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
1442 		if (power_well->data == power_well_id)
1443 			return power_well;
1444 	}
1445 
1446 	return NULL;
1447 }
1448 
1449 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
1450 				    int power_well_id)
1451 {
1452 	struct i915_power_well *power_well;
1453 	bool ret;
1454 
1455 	power_well = lookup_power_well(dev_priv, power_well_id);
1456 	ret = power_well->ops->is_enabled(dev_priv, power_well);
1457 
1458 	return ret;
1459 }
1460 
1461 static struct i915_power_well skl_power_wells[] = {
1462 	{
1463 		.name = "always-on",
1464 		.always_on = 1,
1465 		.domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
1466 		.ops = &i9xx_always_on_power_well_ops,
1467 	},
1468 	{
1469 		.name = "power well 1",
1470 		.domains = SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS,
1471 		.ops = &skl_power_well_ops,
1472 		.data = SKL_DISP_PW_1,
1473 	},
1474 	{
1475 		.name = "MISC IO power well",
1476 		.domains = SKL_DISPLAY_MISC_IO_POWER_DOMAINS,
1477 		.ops = &skl_power_well_ops,
1478 		.data = SKL_DISP_PW_MISC_IO,
1479 	},
1480 	{
1481 		.name = "power well 2",
1482 		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
1483 		.ops = &skl_power_well_ops,
1484 		.data = SKL_DISP_PW_2,
1485 	},
1486 	{
1487 		.name = "DDI A/E power well",
1488 		.domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
1489 		.ops = &skl_power_well_ops,
1490 		.data = SKL_DISP_PW_DDI_A_E,
1491 	},
1492 	{
1493 		.name = "DDI B power well",
1494 		.domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
1495 		.ops = &skl_power_well_ops,
1496 		.data = SKL_DISP_PW_DDI_B,
1497 	},
1498 	{
1499 		.name = "DDI C power well",
1500 		.domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
1501 		.ops = &skl_power_well_ops,
1502 		.data = SKL_DISP_PW_DDI_C,
1503 	},
1504 	{
1505 		.name = "DDI D power well",
1506 		.domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
1507 		.ops = &skl_power_well_ops,
1508 		.data = SKL_DISP_PW_DDI_D,
1509 	},
1510 };
1511 
1512 static struct i915_power_well bxt_power_wells[] = {
1513 	{
1514 		.name = "always-on",
1515 		.always_on = 1,
1516 		.domains = BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
1517 		.ops = &i9xx_always_on_power_well_ops,
1518 	},
1519 	{
1520 		.name = "power well 1",
1521 		.domains = BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS,
1522 		.ops = &skl_power_well_ops,
1523 		.data = SKL_DISP_PW_1,
1524 	},
1525 	{
1526 		.name = "power well 2",
1527 		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
1528 		.ops = &skl_power_well_ops,
1529 		.data = SKL_DISP_PW_2,
1530 	}
1531 };
1532 
1533 #define set_power_wells(power_domains, __power_wells) ({		\
1534 	(power_domains)->power_wells = (__power_wells);			\
1535 	(power_domains)->power_well_count = ARRAY_SIZE(__power_wells);	\
1536 })
1537 
1538 /**
1539  * intel_power_domains_init - initializes the power domain structures
1540  * @dev_priv: i915 device instance
1541  *
1542  * Initializes the power domain structures for @dev_priv depending upon the
1543  * supported platform.
1544  */
1545 int intel_power_domains_init(struct drm_i915_private *dev_priv)
1546 {
1547 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1548 
1549 	lockinit(&power_domains->lock, "i915pl", 0, LK_CANRECURSE);
1550 
1551 	/*
1552 	 * The enabling order will be from lower to higher indexed wells,
1553 	 * the disabling order is reversed.
1554 	 */
1555 	if (IS_HASWELL(dev_priv->dev)) {
1556 		set_power_wells(power_domains, hsw_power_wells);
1557 	} else if (IS_BROADWELL(dev_priv->dev)) {
1558 		set_power_wells(power_domains, bdw_power_wells);
1559 	} else if (IS_SKYLAKE(dev_priv->dev)) {
1560 		set_power_wells(power_domains, skl_power_wells);
1561 	} else if (IS_BROXTON(dev_priv->dev)) {
1562 		set_power_wells(power_domains, bxt_power_wells);
1563 	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1564 		set_power_wells(power_domains, chv_power_wells);
1565 	} else if (IS_VALLEYVIEW(dev_priv->dev)) {
1566 		set_power_wells(power_domains, vlv_power_wells);
1567 	} else {
1568 		set_power_wells(power_domains, i9xx_always_on_power_well);
1569 	}
1570 
1571 	return 0;
1572 }
1573 
1574 static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
1575 {
1576 #if 0
1577 	struct drm_device *dev = dev_priv->dev;
1578 	struct device *device = &dev->pdev->dev;
1579 
1580 	if (!HAS_RUNTIME_PM(dev))
1581 		return;
1582 
1583 	if (!intel_enable_rc6(dev))
1584 		return;
1585 
1586 	/* Make sure we're not suspended first. */
1587 	pm_runtime_get_sync(device);
1588 	pm_runtime_disable(device);
1589 #endif
1590 }
1591 
1592 /**
1593  * intel_power_domains_fini - finalizes the power domain structures
1594  * @dev_priv: i915 device instance
1595  *
1596  * Finalizes the power domain structures for @dev_priv depending upon the
1597  * supported platform. This function also disables runtime pm and ensures that
1598  * the device stays powered up so that the driver can be reloaded.
1599  */
1600 void intel_power_domains_fini(struct drm_i915_private *dev_priv)
1601 {
1602 	intel_runtime_pm_disable(dev_priv);
1603 
1604 	/* The i915.ko module is still not prepared to be loaded when
1605 	 * the power well is not enabled, so just enable it in case
1606 	 * we're going to unload/reload. */
1607 	intel_display_set_init_power(dev_priv, true);
1608 }
1609 
1610 static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
1611 {
1612 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1613 	struct i915_power_well *power_well;
1614 	int i;
1615 
1616 	mutex_lock(&power_domains->lock);
1617 	for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
1618 		power_well->ops->sync_hw(dev_priv, power_well);
1619 		power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
1620 								     power_well);
1621 	}
1622 	mutex_unlock(&power_domains->lock);
1623 }
1624 
1625 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
1626 {
1627 	struct i915_power_well *cmn_bc =
1628 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1629 	struct i915_power_well *cmn_d =
1630 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
1631 
1632 	/*
1633 	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
1634 	 * workaround never ever read DISPLAY_PHY_CONTROL, and
1635 	 * instead maintain a shadow copy ourselves. Use the actual
1636 	 * power well state to reconstruct the expected initial
1637 	 * value.
1638 	 */
1639 	dev_priv->chv_phy_control =
1640 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
1641 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
1642 		PHY_CH_POWER_MODE(PHY_CH_SU_PSR, DPIO_PHY0, DPIO_CH0) |
1643 		PHY_CH_POWER_MODE(PHY_CH_SU_PSR, DPIO_PHY0, DPIO_CH1) |
1644 		PHY_CH_POWER_MODE(PHY_CH_SU_PSR, DPIO_PHY1, DPIO_CH0);
1645 	if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc))
1646 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
1647 	if (cmn_d->ops->is_enabled(dev_priv, cmn_d))
1648 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
1649 }
1650 
1651 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
1652 {
1653 	struct i915_power_well *cmn =
1654 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1655 	struct i915_power_well *disp2d =
1656 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
1657 
1658 	/* If the display might be already active skip this */
1659 	if (cmn->ops->is_enabled(dev_priv, cmn) &&
1660 	    disp2d->ops->is_enabled(dev_priv, disp2d) &&
1661 	    I915_READ(DPIO_CTL) & DPIO_CMNRST)
1662 		return;
1663 
1664 	DRM_DEBUG_KMS("toggling display PHY side reset\n");
1665 
1666 	/* cmnlane needs DPLL registers */
1667 	disp2d->ops->enable(dev_priv, disp2d);
1668 
1669 	/*
1670 	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
1671 	 * Need to assert and de-assert PHY SB reset by gating the
1672 	 * common lane power, then un-gating it.
1673 	 * Simply ungating isn't enough to reset the PHY enough to get
1674 	 * ports and lanes running.
1675 	 */
1676 	cmn->ops->disable(dev_priv, cmn);
1677 }
1678 
1679 /**
1680  * intel_power_domains_init_hw - initialize hardware power domain state
1681  * @dev_priv: i915 device instance
1682  *
1683  * This function initializes the hardware power domain state and enables all
1684  * power domains using intel_display_set_init_power().
1685  */
1686 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
1687 {
1688 	struct drm_device *dev = dev_priv->dev;
1689 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1690 
1691 	power_domains->initializing = true;
1692 
1693 	if (IS_CHERRYVIEW(dev)) {
1694 		chv_phy_control_init(dev_priv);
1695 	} else if (IS_VALLEYVIEW(dev)) {
1696 		mutex_lock(&power_domains->lock);
1697 		vlv_cmnlane_wa(dev_priv);
1698 		mutex_unlock(&power_domains->lock);
1699 	}
1700 
1701 	/* For now, we need the power well to be always enabled. */
1702 	intel_display_set_init_power(dev_priv, true);
1703 	intel_power_domains_resume(dev_priv);
1704 	power_domains->initializing = false;
1705 }
1706 
1707 /**
1708  * intel_aux_display_runtime_get - grab an auxiliary power domain reference
1709  * @dev_priv: i915 device instance
1710  *
1711  * This function grabs a power domain reference for the auxiliary power domain
1712  * (for access to the GMBUS and DP AUX blocks) and ensures that it and all its
1713  * parents are powered up. Therefore users should only grab a reference to the
1714  * innermost power domain they need.
1715  *
1716  * Any power domain reference obtained by this function must have a symmetric
1717  * call to intel_aux_display_runtime_put() to release the reference again.
1718  */
1719 void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
1720 {
1721 	intel_runtime_pm_get(dev_priv);
1722 }
1723 
1724 /**
1725  * intel_aux_display_runtime_put - release an auxiliary power domain reference
1726  * @dev_priv: i915 device instance
1727  *
1728  * This function drops the auxiliary power domain reference obtained by
1729  * intel_aux_display_runtime_get() and might power down the corresponding
1730  * hardware block right away if this is the last reference.
1731  */
1732 void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
1733 {
1734 	intel_runtime_pm_put(dev_priv);
1735 }
1736 
1737 /**
1738  * intel_runtime_pm_get - grab a runtime pm reference
1739  * @dev_priv: i915 device instance
1740  *
1741  * This function grabs a device-level runtime pm reference (mostly used for GEM
1742  * code to ensure the GTT or GT is on) and ensures that it is powered up.
1743  *
1744  * Any runtime pm reference obtained by this function must have a symmetric
1745  * call to intel_runtime_pm_put() to release the reference again.
1746  */
1747 void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
1748 {
1749 	struct drm_device *dev = dev_priv->dev;
1750 #if 0
1751 	struct device *device = &dev->pdev->dev;
1752 #endif
1753 
1754 	if (!HAS_RUNTIME_PM(dev))
1755 		return;
1756 
1757 #if 0
1758 	pm_runtime_get_sync(device);
1759 #endif
1760 	WARN(dev_priv->pm.suspended, "Device still suspended.\n");
1761 }
1762 
1763 /**
1764  * intel_runtime_pm_get_noresume - grab a runtime pm reference
1765  * @dev_priv: i915 device instance
1766  *
1767  * This function grabs a device-level runtime pm reference (mostly used for GEM
1768  * code to ensure the GTT or GT is on).
1769  *
1770  * It will _not_ power up the device but instead only check that it's powered
1771  * on.  Therefore it is only valid to call this functions from contexts where
1772  * the device is known to be powered up and where trying to power it up would
1773  * result in hilarity and deadlocks. That pretty much means only the system
1774  * suspend/resume code where this is used to grab runtime pm references for
1775  * delayed setup down in work items.
1776  *
1777  * Any runtime pm reference obtained by this function must have a symmetric
1778  * call to intel_runtime_pm_put() to release the reference again.
1779  */
1780 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
1781 {
1782 	struct drm_device *dev = dev_priv->dev;
1783 #if 0
1784 	struct device *device = &dev->pdev->dev;
1785 #endif
1786 
1787 	if (!HAS_RUNTIME_PM(dev))
1788 		return;
1789 
1790 	WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
1791 #if 0
1792 	pm_runtime_get_noresume(device);
1793 #endif
1794 }
1795 
1796 /**
1797  * intel_runtime_pm_put - release a runtime pm reference
1798  * @dev_priv: i915 device instance
1799  *
1800  * This function drops the device-level runtime pm reference obtained by
1801  * intel_runtime_pm_get() and might power down the corresponding
1802  * hardware block right away if this is the last reference.
1803  */
1804 void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
1805 {
1806 #if 0
1807 	struct drm_device *dev = dev_priv->dev;
1808 	struct device *device = &dev->pdev->dev;
1809 
1810 	if (!HAS_RUNTIME_PM(dev))
1811 		return;
1812 
1813 	pm_runtime_mark_last_busy(device);
1814 	pm_runtime_put_autosuspend(device);
1815 #endif
1816 }
1817 
1818 /**
1819  * intel_runtime_pm_enable - enable runtime pm
1820  * @dev_priv: i915 device instance
1821  *
1822  * This function enables runtime pm at the end of the driver load sequence.
1823  *
1824  * Note that this function does currently not enable runtime pm for the
1825  * subordinate display power domains. That is only done on the first modeset
1826  * using intel_display_set_init_power().
1827  */
1828 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
1829 {
1830 	struct drm_device *dev = dev_priv->dev;
1831 #if 0
1832 	struct device *device = &dev->pdev->dev;
1833 #endif
1834 
1835 	if (!HAS_RUNTIME_PM(dev))
1836 		return;
1837 
1838 #if 0
1839 	pm_runtime_set_active(device);
1840 #endif
1841 
1842 	/*
1843 	 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
1844 	 * requirement.
1845 	 */
1846 	if (!intel_enable_rc6(dev)) {
1847 		DRM_INFO("RC6 disabled, disabling runtime PM support\n");
1848 		return;
1849 	}
1850 
1851 #if 0
1852 	pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
1853 	pm_runtime_mark_last_busy(device);
1854 	pm_runtime_use_autosuspend(device);
1855 
1856 	pm_runtime_put_autosuspend(device);
1857 #endif
1858 }
1859 
1860