1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include "display/intel_crt.h"
7 #include "display/intel_dp.h"
8 
9 #include "i915_drv.h"
10 #include "i915_irq.h"
11 #include "intel_cdclk.h"
12 #include "intel_combo_phy.h"
13 #include "intel_csr.h"
14 #include "intel_display_power.h"
15 #include "intel_display_types.h"
16 #include "intel_dpio_phy.h"
17 #include "intel_hotplug.h"
18 #include "intel_pm.h"
19 #include "intel_sideband.h"
20 #include "intel_tc.h"
21 #include "intel_vga.h"
22 
23 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
24 					 enum i915_power_well_id power_well_id);
25 
26 const char *
27 intel_display_power_domain_str(enum intel_display_power_domain domain)
28 {
29 	switch (domain) {
30 	case POWER_DOMAIN_DISPLAY_CORE:
31 		return "DISPLAY_CORE";
32 	case POWER_DOMAIN_PIPE_A:
33 		return "PIPE_A";
34 	case POWER_DOMAIN_PIPE_B:
35 		return "PIPE_B";
36 	case POWER_DOMAIN_PIPE_C:
37 		return "PIPE_C";
38 	case POWER_DOMAIN_PIPE_D:
39 		return "PIPE_D";
40 	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
41 		return "PIPE_A_PANEL_FITTER";
42 	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
43 		return "PIPE_B_PANEL_FITTER";
44 	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
45 		return "PIPE_C_PANEL_FITTER";
46 	case POWER_DOMAIN_PIPE_D_PANEL_FITTER:
47 		return "PIPE_D_PANEL_FITTER";
48 	case POWER_DOMAIN_TRANSCODER_A:
49 		return "TRANSCODER_A";
50 	case POWER_DOMAIN_TRANSCODER_B:
51 		return "TRANSCODER_B";
52 	case POWER_DOMAIN_TRANSCODER_C:
53 		return "TRANSCODER_C";
54 	case POWER_DOMAIN_TRANSCODER_D:
55 		return "TRANSCODER_D";
56 	case POWER_DOMAIN_TRANSCODER_EDP:
57 		return "TRANSCODER_EDP";
58 	case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
59 		return "TRANSCODER_VDSC_PW2";
60 	case POWER_DOMAIN_TRANSCODER_DSI_A:
61 		return "TRANSCODER_DSI_A";
62 	case POWER_DOMAIN_TRANSCODER_DSI_C:
63 		return "TRANSCODER_DSI_C";
64 	case POWER_DOMAIN_PORT_DDI_A_LANES:
65 		return "PORT_DDI_A_LANES";
66 	case POWER_DOMAIN_PORT_DDI_B_LANES:
67 		return "PORT_DDI_B_LANES";
68 	case POWER_DOMAIN_PORT_DDI_C_LANES:
69 		return "PORT_DDI_C_LANES";
70 	case POWER_DOMAIN_PORT_DDI_D_LANES:
71 		return "PORT_DDI_D_LANES";
72 	case POWER_DOMAIN_PORT_DDI_E_LANES:
73 		return "PORT_DDI_E_LANES";
74 	case POWER_DOMAIN_PORT_DDI_F_LANES:
75 		return "PORT_DDI_F_LANES";
76 	case POWER_DOMAIN_PORT_DDI_G_LANES:
77 		return "PORT_DDI_G_LANES";
78 	case POWER_DOMAIN_PORT_DDI_H_LANES:
79 		return "PORT_DDI_H_LANES";
80 	case POWER_DOMAIN_PORT_DDI_I_LANES:
81 		return "PORT_DDI_I_LANES";
82 	case POWER_DOMAIN_PORT_DDI_A_IO:
83 		return "PORT_DDI_A_IO";
84 	case POWER_DOMAIN_PORT_DDI_B_IO:
85 		return "PORT_DDI_B_IO";
86 	case POWER_DOMAIN_PORT_DDI_C_IO:
87 		return "PORT_DDI_C_IO";
88 	case POWER_DOMAIN_PORT_DDI_D_IO:
89 		return "PORT_DDI_D_IO";
90 	case POWER_DOMAIN_PORT_DDI_E_IO:
91 		return "PORT_DDI_E_IO";
92 	case POWER_DOMAIN_PORT_DDI_F_IO:
93 		return "PORT_DDI_F_IO";
94 	case POWER_DOMAIN_PORT_DDI_G_IO:
95 		return "PORT_DDI_G_IO";
96 	case POWER_DOMAIN_PORT_DDI_H_IO:
97 		return "PORT_DDI_H_IO";
98 	case POWER_DOMAIN_PORT_DDI_I_IO:
99 		return "PORT_DDI_I_IO";
100 	case POWER_DOMAIN_PORT_DSI:
101 		return "PORT_DSI";
102 	case POWER_DOMAIN_PORT_CRT:
103 		return "PORT_CRT";
104 	case POWER_DOMAIN_PORT_OTHER:
105 		return "PORT_OTHER";
106 	case POWER_DOMAIN_VGA:
107 		return "VGA";
108 	case POWER_DOMAIN_AUDIO:
109 		return "AUDIO";
110 	case POWER_DOMAIN_AUX_A:
111 		return "AUX_A";
112 	case POWER_DOMAIN_AUX_B:
113 		return "AUX_B";
114 	case POWER_DOMAIN_AUX_C:
115 		return "AUX_C";
116 	case POWER_DOMAIN_AUX_D:
117 		return "AUX_D";
118 	case POWER_DOMAIN_AUX_E:
119 		return "AUX_E";
120 	case POWER_DOMAIN_AUX_F:
121 		return "AUX_F";
122 	case POWER_DOMAIN_AUX_G:
123 		return "AUX_G";
124 	case POWER_DOMAIN_AUX_H:
125 		return "AUX_H";
126 	case POWER_DOMAIN_AUX_I:
127 		return "AUX_I";
128 	case POWER_DOMAIN_AUX_IO_A:
129 		return "AUX_IO_A";
130 	case POWER_DOMAIN_AUX_C_TBT:
131 		return "AUX_C_TBT";
132 	case POWER_DOMAIN_AUX_D_TBT:
133 		return "AUX_D_TBT";
134 	case POWER_DOMAIN_AUX_E_TBT:
135 		return "AUX_E_TBT";
136 	case POWER_DOMAIN_AUX_F_TBT:
137 		return "AUX_F_TBT";
138 	case POWER_DOMAIN_AUX_G_TBT:
139 		return "AUX_G_TBT";
140 	case POWER_DOMAIN_AUX_H_TBT:
141 		return "AUX_H_TBT";
142 	case POWER_DOMAIN_AUX_I_TBT:
143 		return "AUX_I_TBT";
144 	case POWER_DOMAIN_GMBUS:
145 		return "GMBUS";
146 	case POWER_DOMAIN_INIT:
147 		return "INIT";
148 	case POWER_DOMAIN_MODESET:
149 		return "MODESET";
150 	case POWER_DOMAIN_GT_IRQ:
151 		return "GT_IRQ";
152 	case POWER_DOMAIN_DPLL_DC_OFF:
153 		return "DPLL_DC_OFF";
154 	default:
155 		MISSING_CASE(domain);
156 		return "?";
157 	}
158 }
159 
160 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
161 				    struct i915_power_well *power_well)
162 {
163 	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", power_well->desc->name);
164 	power_well->desc->ops->enable(dev_priv, power_well);
165 	power_well->hw_enabled = true;
166 }
167 
168 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
169 				     struct i915_power_well *power_well)
170 {
171 	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", power_well->desc->name);
172 	power_well->hw_enabled = false;
173 	power_well->desc->ops->disable(dev_priv, power_well);
174 }
175 
176 static void intel_power_well_get(struct drm_i915_private *dev_priv,
177 				 struct i915_power_well *power_well)
178 {
179 	if (!power_well->count++)
180 		intel_power_well_enable(dev_priv, power_well);
181 }
182 
183 static void intel_power_well_put(struct drm_i915_private *dev_priv,
184 				 struct i915_power_well *power_well)
185 {
186 	drm_WARN(&dev_priv->drm, !power_well->count,
187 		 "Use count on power well %s is already zero",
188 		 power_well->desc->name);
189 
190 	if (!--power_well->count)
191 		intel_power_well_disable(dev_priv, power_well);
192 }
193 
194 /**
195  * __intel_display_power_is_enabled - unlocked check for a power domain
196  * @dev_priv: i915 device instance
197  * @domain: power domain to check
198  *
199  * This is the unlocked version of intel_display_power_is_enabled() and should
200  * only be used from error capture and recovery code where deadlocks are
201  * possible.
202  *
203  * Returns:
204  * True when the power domain is enabled, false otherwise.
205  */
206 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
207 				      enum intel_display_power_domain domain)
208 {
209 	struct i915_power_well *power_well;
210 	bool is_enabled;
211 
212 	if (dev_priv->runtime_pm.suspended)
213 		return false;
214 
215 	is_enabled = true;
216 
217 	for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
218 		if (power_well->desc->always_on)
219 			continue;
220 
221 		if (!power_well->hw_enabled) {
222 			is_enabled = false;
223 			break;
224 		}
225 	}
226 
227 	return is_enabled;
228 }
229 
230 /**
231  * intel_display_power_is_enabled - check for a power domain
232  * @dev_priv: i915 device instance
233  * @domain: power domain to check
234  *
235  * This function can be used to check the hw power domain state. It is mostly
236  * used in hardware state readout functions. Everywhere else code should rely
237  * upon explicit power domain reference counting to ensure that the hardware
238  * block is powered up before accessing it.
239  *
240  * Callers must hold the relevant modesetting locks to ensure that concurrent
241  * threads can't disable the power well while the caller tries to read a few
242  * registers.
243  *
244  * Returns:
245  * True when the power domain is enabled, false otherwise.
246  */
247 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
248 				    enum intel_display_power_domain domain)
249 {
250 	struct i915_power_domains *power_domains;
251 	bool ret;
252 
253 	power_domains = &dev_priv->power_domains;
254 
255 	mutex_lock(&power_domains->lock);
256 	ret = __intel_display_power_is_enabled(dev_priv, domain);
257 	mutex_unlock(&power_domains->lock);
258 
259 	return ret;
260 }
261 
262 /*
263  * Starting with Haswell, we have a "Power Down Well" that can be turned off
264  * when not needed anymore. We have 4 registers that can request the power well
265  * to be enabled, and it will only be disabled if none of the registers is
266  * requesting it to be enabled.
267  */
268 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
269 				       u8 irq_pipe_mask, bool has_vga)
270 {
271 	if (has_vga)
272 		intel_vga_reset_io_mem(dev_priv);
273 
274 	if (irq_pipe_mask)
275 		gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
276 }
277 
278 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
279 				       u8 irq_pipe_mask)
280 {
281 	if (irq_pipe_mask)
282 		gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
283 }
284 
285 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
286 					   struct i915_power_well *power_well)
287 {
288 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
289 	int pw_idx = power_well->desc->hsw.idx;
290 
291 	/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
292 	if (intel_de_wait_for_set(dev_priv, regs->driver,
293 				  HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
294 		drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
295 			    power_well->desc->name);
296 
297 		/* An AUX timeout is expected if the TBT DP tunnel is down. */
298 		drm_WARN_ON(&dev_priv->drm, !power_well->desc->hsw.is_tc_tbt);
299 	}
300 }
301 
302 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
303 				     const struct i915_power_well_regs *regs,
304 				     int pw_idx)
305 {
306 	u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
307 	u32 ret;
308 
309 	ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0;
310 	ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0;
311 	if (regs->kvmr.reg)
312 		ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0;
313 	ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0;
314 
315 	return ret;
316 }
317 
318 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
319 					    struct i915_power_well *power_well)
320 {
321 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
322 	int pw_idx = power_well->desc->hsw.idx;
323 	bool disabled;
324 	u32 reqs;
325 
326 	/*
327 	 * Bspec doesn't require waiting for PWs to get disabled, but still do
328 	 * this for paranoia. The known cases where a PW will be forced on:
329 	 * - a KVMR request on any power well via the KVMR request register
330 	 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
331 	 *   DEBUG request registers
332 	 * Skip the wait in case any of the request bits are set and print a
333 	 * diagnostic message.
334 	 */
335 	wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) &
336 			       HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
337 		 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
338 	if (disabled)
339 		return;
340 
341 	drm_dbg_kms(&dev_priv->drm,
342 		    "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
343 		    power_well->desc->name,
344 		    !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
345 }
346 
347 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
348 					   enum skl_power_gate pg)
349 {
350 	/* Timeout 5us for PG#0, for other PGs 1us */
351 	drm_WARN_ON(&dev_priv->drm,
352 		    intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
353 					  SKL_FUSE_PG_DIST_STATUS(pg), 1));
354 }
355 
356 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
357 				  struct i915_power_well *power_well)
358 {
359 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
360 	int pw_idx = power_well->desc->hsw.idx;
361 	bool wait_fuses = power_well->desc->hsw.has_fuses;
362 	enum skl_power_gate uninitialized_var(pg);
363 	u32 val;
364 
365 	if (wait_fuses) {
366 		pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
367 						 SKL_PW_CTL_IDX_TO_PG(pw_idx);
368 		/*
369 		 * For PW1 we have to wait both for the PW0/PG0 fuse state
370 		 * before enabling the power well and PW1/PG1's own fuse
371 		 * state after the enabling. For all other power wells with
372 		 * fuses we only have to wait for that PW/PG's fuse state
373 		 * after the enabling.
374 		 */
375 		if (pg == SKL_PG1)
376 			gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
377 	}
378 
379 	val = intel_de_read(dev_priv, regs->driver);
380 	intel_de_write(dev_priv, regs->driver,
381 		       val | HSW_PWR_WELL_CTL_REQ(pw_idx));
382 	hsw_wait_for_power_well_enable(dev_priv, power_well);
383 
384 	/* Display WA #1178: cnl */
385 	if (IS_CANNONLAKE(dev_priv) &&
386 	    pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
387 	    pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
388 		val = intel_de_read(dev_priv, CNL_AUX_ANAOVRD1(pw_idx));
389 		val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
390 		intel_de_write(dev_priv, CNL_AUX_ANAOVRD1(pw_idx), val);
391 	}
392 
393 	if (wait_fuses)
394 		gen9_wait_for_power_well_fuses(dev_priv, pg);
395 
396 	hsw_power_well_post_enable(dev_priv,
397 				   power_well->desc->hsw.irq_pipe_mask,
398 				   power_well->desc->hsw.has_vga);
399 }
400 
401 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
402 				   struct i915_power_well *power_well)
403 {
404 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
405 	int pw_idx = power_well->desc->hsw.idx;
406 	u32 val;
407 
408 	hsw_power_well_pre_disable(dev_priv,
409 				   power_well->desc->hsw.irq_pipe_mask);
410 
411 	val = intel_de_read(dev_priv, regs->driver);
412 	intel_de_write(dev_priv, regs->driver,
413 		       val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
414 	hsw_wait_for_power_well_disable(dev_priv, power_well);
415 }
416 
417 #define ICL_AUX_PW_TO_PHY(pw_idx)	((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
418 
419 static void
420 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
421 				    struct i915_power_well *power_well)
422 {
423 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
424 	int pw_idx = power_well->desc->hsw.idx;
425 	enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
426 	u32 val;
427 
428 	drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
429 
430 	val = intel_de_read(dev_priv, regs->driver);
431 	intel_de_write(dev_priv, regs->driver,
432 		       val | HSW_PWR_WELL_CTL_REQ(pw_idx));
433 
434 	if (INTEL_GEN(dev_priv) < 12) {
435 		val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
436 		intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
437 			       val | ICL_LANE_ENABLE_AUX);
438 	}
439 
440 	hsw_wait_for_power_well_enable(dev_priv, power_well);
441 
442 	/* Display WA #1178: icl */
443 	if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
444 	    !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
445 		val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx));
446 		val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
447 		intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val);
448 	}
449 }
450 
451 static void
452 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
453 				     struct i915_power_well *power_well)
454 {
455 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
456 	int pw_idx = power_well->desc->hsw.idx;
457 	enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
458 	u32 val;
459 
460 	drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
461 
462 	val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
463 	intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
464 		       val & ~ICL_LANE_ENABLE_AUX);
465 
466 	val = intel_de_read(dev_priv, regs->driver);
467 	intel_de_write(dev_priv, regs->driver,
468 		       val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
469 
470 	hsw_wait_for_power_well_disable(dev_priv, power_well);
471 }
472 
473 #define ICL_AUX_PW_TO_CH(pw_idx)	\
474 	((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
475 
476 #define ICL_TBT_AUX_PW_TO_CH(pw_idx)	\
477 	((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
478 
479 static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv,
480 				     struct i915_power_well *power_well)
481 {
482 	int pw_idx = power_well->desc->hsw.idx;
483 
484 	return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
485 						 ICL_AUX_PW_TO_CH(pw_idx);
486 }
487 
488 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
489 
490 static u64 async_put_domains_mask(struct i915_power_domains *power_domains);
491 
492 static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
493 				      struct i915_power_well *power_well)
494 {
495 	int refs = hweight64(power_well->desc->domains &
496 			     async_put_domains_mask(&dev_priv->power_domains));
497 
498 	drm_WARN_ON(&dev_priv->drm, refs > power_well->count);
499 
500 	return refs;
501 }
502 
503 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
504 					struct i915_power_well *power_well)
505 {
506 	enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
507 	struct intel_digital_port *dig_port = NULL;
508 	struct intel_encoder *encoder;
509 
510 	/* Bypass the check if all references are released asynchronously */
511 	if (power_well_async_ref_count(dev_priv, power_well) ==
512 	    power_well->count)
513 		return;
514 
515 	aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
516 
517 	for_each_intel_encoder(&dev_priv->drm, encoder) {
518 		enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
519 
520 		if (!intel_phy_is_tc(dev_priv, phy))
521 			continue;
522 
523 		/* We'll check the MST primary port */
524 		if (encoder->type == INTEL_OUTPUT_DP_MST)
525 			continue;
526 
527 		dig_port = enc_to_dig_port(encoder);
528 		if (drm_WARN_ON(&dev_priv->drm, !dig_port))
529 			continue;
530 
531 		if (dig_port->aux_ch != aux_ch) {
532 			dig_port = NULL;
533 			continue;
534 		}
535 
536 		break;
537 	}
538 
539 	if (drm_WARN_ON(&dev_priv->drm, !dig_port))
540 		return;
541 
542 	drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
543 }
544 
545 #else
546 
547 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
548 					struct i915_power_well *power_well)
549 {
550 }
551 
552 #endif
553 
554 #define TGL_AUX_PW_TO_TC_PORT(pw_idx)	((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
555 
556 static void
557 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
558 				 struct i915_power_well *power_well)
559 {
560 	enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
561 	u32 val;
562 
563 	icl_tc_port_assert_ref_held(dev_priv, power_well);
564 
565 	val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
566 	val &= ~DP_AUX_CH_CTL_TBT_IO;
567 	if (power_well->desc->hsw.is_tc_tbt)
568 		val |= DP_AUX_CH_CTL_TBT_IO;
569 	intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
570 
571 	hsw_power_well_enable(dev_priv, power_well);
572 
573 	if (INTEL_GEN(dev_priv) >= 12 && !power_well->desc->hsw.is_tc_tbt) {
574 		enum tc_port tc_port;
575 
576 		tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
577 		intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
578 			       HIP_INDEX_VAL(tc_port, 0x2));
579 
580 		if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
581 					  DKL_CMN_UC_DW27_UC_HEALTH, 1))
582 			drm_warn(&dev_priv->drm,
583 				 "Timeout waiting TC uC health\n");
584 	}
585 }
586 
587 static void
588 icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
589 				  struct i915_power_well *power_well)
590 {
591 	icl_tc_port_assert_ref_held(dev_priv, power_well);
592 
593 	hsw_power_well_disable(dev_priv, power_well);
594 }
595 
596 /*
597  * We should only use the power well if we explicitly asked the hardware to
598  * enable it, so check if it's enabled and also check if we've requested it to
599  * be enabled.
600  */
601 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
602 				   struct i915_power_well *power_well)
603 {
604 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
605 	enum i915_power_well_id id = power_well->desc->id;
606 	int pw_idx = power_well->desc->hsw.idx;
607 	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
608 		   HSW_PWR_WELL_CTL_STATE(pw_idx);
609 	u32 val;
610 
611 	val = intel_de_read(dev_priv, regs->driver);
612 
613 	/*
614 	 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
615 	 * and the MISC_IO PW will be not restored, so check instead for the
616 	 * BIOS's own request bits, which are forced-on for these power wells
617 	 * when exiting DC5/6.
618 	 */
619 	if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
620 	    (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
621 		val |= intel_de_read(dev_priv, regs->bios);
622 
623 	return (val & mask) == mask;
624 }
625 
626 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
627 {
628 	drm_WARN_ONCE(&dev_priv->drm,
629 		      (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9),
630 		      "DC9 already programmed to be enabled.\n");
631 	drm_WARN_ONCE(&dev_priv->drm,
632 		      intel_de_read(dev_priv, DC_STATE_EN) &
633 		      DC_STATE_EN_UPTO_DC5,
634 		      "DC5 still not disabled to enable DC9.\n");
635 	drm_WARN_ONCE(&dev_priv->drm,
636 		      intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) &
637 		      HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
638 		      "Power well 2 on.\n");
639 	drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
640 		      "Interrupts not disabled yet.\n");
641 
642 	 /*
643 	  * TODO: check for the following to verify the conditions to enter DC9
644 	  * state are satisfied:
645 	  * 1] Check relevant display engine registers to verify if mode set
646 	  * disable sequence was followed.
647 	  * 2] Check if display uninitialize sequence is initialized.
648 	  */
649 }
650 
651 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
652 {
653 	drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
654 		      "Interrupts not disabled yet.\n");
655 	drm_WARN_ONCE(&dev_priv->drm,
656 		      intel_de_read(dev_priv, DC_STATE_EN) &
657 		      DC_STATE_EN_UPTO_DC5,
658 		      "DC5 still not disabled.\n");
659 
660 	 /*
661 	  * TODO: check for the following to verify DC9 state was indeed
662 	  * entered before programming to disable it:
663 	  * 1] Check relevant display engine registers to verify if mode
664 	  *  set disable sequence was followed.
665 	  * 2] Check if display uninitialize sequence is initialized.
666 	  */
667 }
668 
669 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
670 				u32 state)
671 {
672 	int rewrites = 0;
673 	int rereads = 0;
674 	u32 v;
675 
676 	intel_de_write(dev_priv, DC_STATE_EN, state);
677 
678 	/* It has been observed that disabling the dc6 state sometimes
679 	 * doesn't stick and dmc keeps returning old value. Make sure
680 	 * the write really sticks enough times and also force rewrite until
681 	 * we are confident that state is exactly what we want.
682 	 */
683 	do  {
684 		v = intel_de_read(dev_priv, DC_STATE_EN);
685 
686 		if (v != state) {
687 			intel_de_write(dev_priv, DC_STATE_EN, state);
688 			rewrites++;
689 			rereads = 0;
690 		} else if (rereads++ > 5) {
691 			break;
692 		}
693 
694 	} while (rewrites < 100);
695 
696 	if (v != state)
697 		drm_err(&dev_priv->drm,
698 			"Writing dc state to 0x%x failed, now 0x%x\n",
699 			state, v);
700 
701 	/* Most of the times we need one retry, avoid spam */
702 	if (rewrites > 1)
703 		drm_dbg_kms(&dev_priv->drm,
704 			    "Rewrote dc state to 0x%x %d times\n",
705 			    state, rewrites);
706 }
707 
708 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
709 {
710 	u32 mask;
711 
712 	mask = DC_STATE_EN_UPTO_DC5;
713 
714 	if (INTEL_GEN(dev_priv) >= 12)
715 		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
716 					  | DC_STATE_EN_DC9;
717 	else if (IS_GEN(dev_priv, 11))
718 		mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
719 	else if (IS_GEN9_LP(dev_priv))
720 		mask |= DC_STATE_EN_DC9;
721 	else
722 		mask |= DC_STATE_EN_UPTO_DC6;
723 
724 	return mask;
725 }
726 
727 static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
728 {
729 	u32 val;
730 
731 	val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv);
732 
733 	drm_dbg_kms(&dev_priv->drm,
734 		    "Resetting DC state tracking from %02x to %02x\n",
735 		    dev_priv->csr.dc_state, val);
736 	dev_priv->csr.dc_state = val;
737 }
738 
739 /**
740  * gen9_set_dc_state - set target display C power state
741  * @dev_priv: i915 device instance
742  * @state: target DC power state
743  * - DC_STATE_DISABLE
744  * - DC_STATE_EN_UPTO_DC5
745  * - DC_STATE_EN_UPTO_DC6
746  * - DC_STATE_EN_DC9
747  *
748  * Signal to DMC firmware/HW the target DC power state passed in @state.
749  * DMC/HW can turn off individual display clocks and power rails when entering
750  * a deeper DC power state (higher in number) and turns these back when exiting
751  * that state to a shallower power state (lower in number). The HW will decide
752  * when to actually enter a given state on an on-demand basis, for instance
753  * depending on the active state of display pipes. The state of display
754  * registers backed by affected power rails are saved/restored as needed.
755  *
756  * Based on the above enabling a deeper DC power state is asynchronous wrt.
757  * enabling it. Disabling a deeper power state is synchronous: for instance
758  * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
759  * back on and register state is restored. This is guaranteed by the MMIO write
760  * to DC_STATE_EN blocking until the state is restored.
761  */
762 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
763 {
764 	u32 val;
765 	u32 mask;
766 
767 	if (drm_WARN_ON_ONCE(&dev_priv->drm,
768 			     state & ~dev_priv->csr.allowed_dc_mask))
769 		state &= dev_priv->csr.allowed_dc_mask;
770 
771 	val = intel_de_read(dev_priv, DC_STATE_EN);
772 	mask = gen9_dc_mask(dev_priv);
773 	drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n",
774 		    val & mask, state);
775 
776 	/* Check if DMC is ignoring our DC state requests */
777 	if ((val & mask) != dev_priv->csr.dc_state)
778 		drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
779 			dev_priv->csr.dc_state, val & mask);
780 
781 	val &= ~mask;
782 	val |= state;
783 
784 	gen9_write_dc_state(dev_priv, val);
785 
786 	dev_priv->csr.dc_state = val & mask;
787 }
788 
789 static u32
790 sanitize_target_dc_state(struct drm_i915_private *dev_priv,
791 			 u32 target_dc_state)
792 {
793 	u32 states[] = {
794 		DC_STATE_EN_UPTO_DC6,
795 		DC_STATE_EN_UPTO_DC5,
796 		DC_STATE_EN_DC3CO,
797 		DC_STATE_DISABLE,
798 	};
799 	int i;
800 
801 	for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
802 		if (target_dc_state != states[i])
803 			continue;
804 
805 		if (dev_priv->csr.allowed_dc_mask & target_dc_state)
806 			break;
807 
808 		target_dc_state = states[i + 1];
809 	}
810 
811 	return target_dc_state;
812 }
813 
814 static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
815 {
816 	drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n");
817 	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
818 }
819 
820 static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
821 {
822 	u32 val;
823 
824 	drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
825 	val = intel_de_read(dev_priv, DC_STATE_EN);
826 	val &= ~DC_STATE_DC3CO_STATUS;
827 	intel_de_write(dev_priv, DC_STATE_EN, val);
828 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
829 	/*
830 	 * Delay of 200us DC3CO Exit time B.Spec 49196
831 	 */
832 	usleep_range(200, 210);
833 }
834 
835 static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
836 {
837 	assert_can_enable_dc9(dev_priv);
838 
839 	drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n");
840 	/*
841 	 * Power sequencer reset is not needed on
842 	 * platforms with South Display Engine on PCH,
843 	 * because PPS registers are always on.
844 	 */
845 	if (!HAS_PCH_SPLIT(dev_priv))
846 		intel_power_sequencer_reset(dev_priv);
847 	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
848 }
849 
850 static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
851 {
852 	assert_can_disable_dc9(dev_priv);
853 
854 	drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n");
855 
856 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
857 
858 	intel_pps_unlock_regs_wa(dev_priv);
859 }
860 
861 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
862 {
863 	drm_WARN_ONCE(&dev_priv->drm,
864 		      !intel_de_read(dev_priv, CSR_PROGRAM(0)),
865 		      "CSR program storage start is NULL\n");
866 	drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_SSP_BASE),
867 		      "CSR SSP Base Not fine\n");
868 	drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_HTP_SKL),
869 		      "CSR HTP Not fine\n");
870 }
871 
872 static struct i915_power_well *
873 lookup_power_well(struct drm_i915_private *dev_priv,
874 		  enum i915_power_well_id power_well_id)
875 {
876 	struct i915_power_well *power_well;
877 
878 	for_each_power_well(dev_priv, power_well)
879 		if (power_well->desc->id == power_well_id)
880 			return power_well;
881 
882 	/*
883 	 * It's not feasible to add error checking code to the callers since
884 	 * this condition really shouldn't happen and it doesn't even make sense
885 	 * to abort things like display initialization sequences. Just return
886 	 * the first power well and hope the WARN gets reported so we can fix
887 	 * our driver.
888 	 */
889 	drm_WARN(&dev_priv->drm, 1,
890 		 "Power well %d not defined for this platform\n",
891 		 power_well_id);
892 	return &dev_priv->power_domains.power_wells[0];
893 }
894 
895 /**
896  * intel_display_power_set_target_dc_state - Set target dc state.
897  * @dev_priv: i915 device
898  * @state: state which needs to be set as target_dc_state.
899  *
900  * This function set the "DC off" power well target_dc_state,
901  * based upon this target_dc_stste, "DC off" power well will
902  * enable desired DC state.
903  */
904 void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
905 					     u32 state)
906 {
907 	struct i915_power_well *power_well;
908 	bool dc_off_enabled;
909 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
910 
911 	mutex_lock(&power_domains->lock);
912 	power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
913 
914 	if (drm_WARN_ON(&dev_priv->drm, !power_well))
915 		goto unlock;
916 
917 	state = sanitize_target_dc_state(dev_priv, state);
918 
919 	if (state == dev_priv->csr.target_dc_state)
920 		goto unlock;
921 
922 	dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv,
923 							   power_well);
924 	/*
925 	 * If DC off power well is disabled, need to enable and disable the
926 	 * DC off power well to effect target DC state.
927 	 */
928 	if (!dc_off_enabled)
929 		power_well->desc->ops->enable(dev_priv, power_well);
930 
931 	dev_priv->csr.target_dc_state = state;
932 
933 	if (!dc_off_enabled)
934 		power_well->desc->ops->disable(dev_priv, power_well);
935 
936 unlock:
937 	mutex_unlock(&power_domains->lock);
938 }
939 
940 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
941 {
942 	enum i915_power_well_id high_pg;
943 
944 	/* Power wells at this level and above must be disabled for DC5 entry */
945 	if (INTEL_GEN(dev_priv) >= 12)
946 		high_pg = TGL_DISP_PW_3;
947 	else
948 		high_pg = SKL_DISP_PW_2;
949 
950 	drm_WARN_ONCE(&dev_priv->drm,
951 		      intel_display_power_well_is_enabled(dev_priv, high_pg),
952 		      "Power wells above platform's DC5 limit still enabled.\n");
953 
954 	drm_WARN_ONCE(&dev_priv->drm,
955 		      (intel_de_read(dev_priv, DC_STATE_EN) &
956 		       DC_STATE_EN_UPTO_DC5),
957 		      "DC5 already programmed to be enabled.\n");
958 	assert_rpm_wakelock_held(&dev_priv->runtime_pm);
959 
960 	assert_csr_loaded(dev_priv);
961 }
962 
963 static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
964 {
965 	assert_can_enable_dc5(dev_priv);
966 
967 	drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n");
968 
969 	/* Wa Display #1183: skl,kbl,cfl */
970 	if (IS_GEN9_BC(dev_priv))
971 		intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
972 			       intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
973 
974 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
975 }
976 
977 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
978 {
979 	drm_WARN_ONCE(&dev_priv->drm,
980 		      intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
981 		      "Backlight is not disabled.\n");
982 	drm_WARN_ONCE(&dev_priv->drm,
983 		      (intel_de_read(dev_priv, DC_STATE_EN) &
984 		       DC_STATE_EN_UPTO_DC6),
985 		      "DC6 already programmed to be enabled.\n");
986 
987 	assert_csr_loaded(dev_priv);
988 }
989 
990 static void skl_enable_dc6(struct drm_i915_private *dev_priv)
991 {
992 	assert_can_enable_dc6(dev_priv);
993 
994 	drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n");
995 
996 	/* Wa Display #1183: skl,kbl,cfl */
997 	if (IS_GEN9_BC(dev_priv))
998 		intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
999 			       intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
1000 
1001 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
1002 }
1003 
1004 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
1005 				   struct i915_power_well *power_well)
1006 {
1007 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
1008 	int pw_idx = power_well->desc->hsw.idx;
1009 	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
1010 	u32 bios_req = intel_de_read(dev_priv, regs->bios);
1011 
1012 	/* Take over the request bit if set by BIOS. */
1013 	if (bios_req & mask) {
1014 		u32 drv_req = intel_de_read(dev_priv, regs->driver);
1015 
1016 		if (!(drv_req & mask))
1017 			intel_de_write(dev_priv, regs->driver, drv_req | mask);
1018 		intel_de_write(dev_priv, regs->bios, bios_req & ~mask);
1019 	}
1020 }
1021 
1022 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1023 					   struct i915_power_well *power_well)
1024 {
1025 	bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
1026 }
1027 
1028 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1029 					    struct i915_power_well *power_well)
1030 {
1031 	bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
1032 }
1033 
1034 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
1035 					    struct i915_power_well *power_well)
1036 {
1037 	return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
1038 }
1039 
1040 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
1041 {
1042 	struct i915_power_well *power_well;
1043 
1044 	power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
1045 	if (power_well->count > 0)
1046 		bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1047 
1048 	power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1049 	if (power_well->count > 0)
1050 		bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1051 
1052 	if (IS_GEMINILAKE(dev_priv)) {
1053 		power_well = lookup_power_well(dev_priv,
1054 					       GLK_DISP_PW_DPIO_CMN_C);
1055 		if (power_well->count > 0)
1056 			bxt_ddi_phy_verify_state(dev_priv,
1057 						 power_well->desc->bxt.phy);
1058 	}
1059 }
1060 
1061 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
1062 					   struct i915_power_well *power_well)
1063 {
1064 	return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
1065 		(intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
1066 }
1067 
1068 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
1069 {
1070 	u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
1071 	u8 enabled_dbuf_slices = dev_priv->enabled_dbuf_slices_mask;
1072 
1073 	drm_WARN(&dev_priv->drm,
1074 		 hw_enabled_dbuf_slices != enabled_dbuf_slices,
1075 		 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
1076 		 hw_enabled_dbuf_slices,
1077 		 enabled_dbuf_slices);
1078 }
1079 
1080 static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
1081 {
1082 	struct intel_cdclk_config cdclk_config = {};
1083 
1084 	if (dev_priv->csr.target_dc_state == DC_STATE_EN_DC3CO) {
1085 		tgl_disable_dc3co(dev_priv);
1086 		return;
1087 	}
1088 
1089 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1090 
1091 	dev_priv->display.get_cdclk(dev_priv, &cdclk_config);
1092 	/* Can't read out voltage_level so can't use intel_cdclk_changed() */
1093 	drm_WARN_ON(&dev_priv->drm,
1094 		    intel_cdclk_needs_modeset(&dev_priv->cdclk.hw,
1095 					      &cdclk_config));
1096 
1097 	gen9_assert_dbuf_enabled(dev_priv);
1098 
1099 	if (IS_GEN9_LP(dev_priv))
1100 		bxt_verify_ddi_phy_power_wells(dev_priv);
1101 
1102 	if (INTEL_GEN(dev_priv) >= 11)
1103 		/*
1104 		 * DMC retains HW context only for port A, the other combo
1105 		 * PHY's HW context for port B is lost after DC transitions,
1106 		 * so we need to restore it manually.
1107 		 */
1108 		intel_combo_phy_init(dev_priv);
1109 }
1110 
1111 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
1112 					  struct i915_power_well *power_well)
1113 {
1114 	gen9_disable_dc_states(dev_priv);
1115 }
1116 
1117 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
1118 					   struct i915_power_well *power_well)
1119 {
1120 	if (!dev_priv->csr.dmc_payload)
1121 		return;
1122 
1123 	switch (dev_priv->csr.target_dc_state) {
1124 	case DC_STATE_EN_DC3CO:
1125 		tgl_enable_dc3co(dev_priv);
1126 		break;
1127 	case DC_STATE_EN_UPTO_DC6:
1128 		skl_enable_dc6(dev_priv);
1129 		break;
1130 	case DC_STATE_EN_UPTO_DC5:
1131 		gen9_enable_dc5(dev_priv);
1132 		break;
1133 	}
1134 }
1135 
1136 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1137 					 struct i915_power_well *power_well)
1138 {
1139 }
1140 
1141 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1142 					   struct i915_power_well *power_well)
1143 {
1144 }
1145 
1146 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1147 					     struct i915_power_well *power_well)
1148 {
1149 	return true;
1150 }
1151 
1152 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1153 					 struct i915_power_well *power_well)
1154 {
1155 	if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1156 		i830_enable_pipe(dev_priv, PIPE_A);
1157 	if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1158 		i830_enable_pipe(dev_priv, PIPE_B);
1159 }
1160 
1161 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1162 					  struct i915_power_well *power_well)
1163 {
1164 	i830_disable_pipe(dev_priv, PIPE_B);
1165 	i830_disable_pipe(dev_priv, PIPE_A);
1166 }
1167 
1168 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1169 					  struct i915_power_well *power_well)
1170 {
1171 	return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1172 		intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1173 }
1174 
1175 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1176 					  struct i915_power_well *power_well)
1177 {
1178 	if (power_well->count > 0)
1179 		i830_pipes_power_well_enable(dev_priv, power_well);
1180 	else
1181 		i830_pipes_power_well_disable(dev_priv, power_well);
1182 }
1183 
1184 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1185 			       struct i915_power_well *power_well, bool enable)
1186 {
1187 	int pw_idx = power_well->desc->vlv.idx;
1188 	u32 mask;
1189 	u32 state;
1190 	u32 ctrl;
1191 
1192 	mask = PUNIT_PWRGT_MASK(pw_idx);
1193 	state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1194 			 PUNIT_PWRGT_PWR_GATE(pw_idx);
1195 
1196 	vlv_punit_get(dev_priv);
1197 
1198 #define COND \
1199 	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1200 
1201 	if (COND)
1202 		goto out;
1203 
1204 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1205 	ctrl &= ~mask;
1206 	ctrl |= state;
1207 	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1208 
1209 	if (wait_for(COND, 100))
1210 		drm_err(&dev_priv->drm,
1211 			"timeout setting power well state %08x (%08x)\n",
1212 			state,
1213 			vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1214 
1215 #undef COND
1216 
1217 out:
1218 	vlv_punit_put(dev_priv);
1219 }
1220 
1221 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1222 				  struct i915_power_well *power_well)
1223 {
1224 	vlv_set_power_well(dev_priv, power_well, true);
1225 }
1226 
1227 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1228 				   struct i915_power_well *power_well)
1229 {
1230 	vlv_set_power_well(dev_priv, power_well, false);
1231 }
1232 
1233 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1234 				   struct i915_power_well *power_well)
1235 {
1236 	int pw_idx = power_well->desc->vlv.idx;
1237 	bool enabled = false;
1238 	u32 mask;
1239 	u32 state;
1240 	u32 ctrl;
1241 
1242 	mask = PUNIT_PWRGT_MASK(pw_idx);
1243 	ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
1244 
1245 	vlv_punit_get(dev_priv);
1246 
1247 	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1248 	/*
1249 	 * We only ever set the power-on and power-gate states, anything
1250 	 * else is unexpected.
1251 	 */
1252 	drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1253 		    state != PUNIT_PWRGT_PWR_GATE(pw_idx));
1254 	if (state == ctrl)
1255 		enabled = true;
1256 
1257 	/*
1258 	 * A transient state at this point would mean some unexpected party
1259 	 * is poking at the power controls too.
1260 	 */
1261 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1262 	drm_WARN_ON(&dev_priv->drm, ctrl != state);
1263 
1264 	vlv_punit_put(dev_priv);
1265 
1266 	return enabled;
1267 }
1268 
1269 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1270 {
1271 	u32 val;
1272 
1273 	/*
1274 	 * On driver load, a pipe may be active and driving a DSI display.
1275 	 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1276 	 * (and never recovering) in this case. intel_dsi_post_disable() will
1277 	 * clear it when we turn off the display.
1278 	 */
1279 	val = intel_de_read(dev_priv, DSPCLK_GATE_D);
1280 	val &= DPOUNIT_CLOCK_GATE_DISABLE;
1281 	val |= VRHUNIT_CLOCK_GATE_DISABLE;
1282 	intel_de_write(dev_priv, DSPCLK_GATE_D, val);
1283 
1284 	/*
1285 	 * Disable trickle feed and enable pnd deadline calculation
1286 	 */
1287 	intel_de_write(dev_priv, MI_ARB_VLV,
1288 		       MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1289 	intel_de_write(dev_priv, CBR1_VLV, 0);
1290 
1291 	drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
1292 	intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
1293 		       DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq,
1294 					 1000));
1295 }
1296 
1297 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1298 {
1299 	struct intel_encoder *encoder;
1300 	enum pipe pipe;
1301 
1302 	/*
1303 	 * Enable the CRI clock source so we can get at the
1304 	 * display and the reference clock for VGA
1305 	 * hotplug / manual detection. Supposedly DSI also
1306 	 * needs the ref clock up and running.
1307 	 *
1308 	 * CHV DPLL B/C have some issues if VGA mode is enabled.
1309 	 */
1310 	for_each_pipe(dev_priv, pipe) {
1311 		u32 val = intel_de_read(dev_priv, DPLL(pipe));
1312 
1313 		val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1314 		if (pipe != PIPE_A)
1315 			val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1316 
1317 		intel_de_write(dev_priv, DPLL(pipe), val);
1318 	}
1319 
1320 	vlv_init_display_clock_gating(dev_priv);
1321 
1322 	spin_lock_irq(&dev_priv->irq_lock);
1323 	valleyview_enable_display_irqs(dev_priv);
1324 	spin_unlock_irq(&dev_priv->irq_lock);
1325 
1326 	/*
1327 	 * During driver initialization/resume we can avoid restoring the
1328 	 * part of the HW/SW state that will be inited anyway explicitly.
1329 	 */
1330 	if (dev_priv->power_domains.initializing)
1331 		return;
1332 
1333 	intel_hpd_init(dev_priv);
1334 
1335 	/* Re-enable the ADPA, if we have one */
1336 	for_each_intel_encoder(&dev_priv->drm, encoder) {
1337 		if (encoder->type == INTEL_OUTPUT_ANALOG)
1338 			intel_crt_reset(&encoder->base);
1339 	}
1340 
1341 	intel_vga_redisable_power_on(dev_priv);
1342 
1343 	intel_pps_unlock_regs_wa(dev_priv);
1344 }
1345 
1346 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1347 {
1348 	spin_lock_irq(&dev_priv->irq_lock);
1349 	valleyview_disable_display_irqs(dev_priv);
1350 	spin_unlock_irq(&dev_priv->irq_lock);
1351 
1352 	/* make sure we're done processing display irqs */
1353 	intel_synchronize_irq(dev_priv);
1354 
1355 	intel_power_sequencer_reset(dev_priv);
1356 
1357 	/* Prevent us from re-enabling polling on accident in late suspend */
1358 #ifdef __linux__
1359 	if (!dev_priv->drm.dev->power.is_suspended)
1360 #else
1361 	if (!cold)
1362 #endif
1363 		intel_hpd_poll_init(dev_priv);
1364 }
1365 
1366 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1367 					  struct i915_power_well *power_well)
1368 {
1369 	vlv_set_power_well(dev_priv, power_well, true);
1370 
1371 	vlv_display_power_well_init(dev_priv);
1372 }
1373 
1374 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1375 					   struct i915_power_well *power_well)
1376 {
1377 	vlv_display_power_well_deinit(dev_priv);
1378 
1379 	vlv_set_power_well(dev_priv, power_well, false);
1380 }
1381 
1382 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1383 					   struct i915_power_well *power_well)
1384 {
1385 	/* since ref/cri clock was enabled */
1386 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1387 
1388 	vlv_set_power_well(dev_priv, power_well, true);
1389 
1390 	/*
1391 	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1392 	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
1393 	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
1394 	 *   b.	The other bits such as sfr settings / modesel may all
1395 	 *	be set to 0.
1396 	 *
1397 	 * This should only be done on init and resume from S3 with
1398 	 * both PLLs disabled, or we risk losing DPIO and PLL
1399 	 * synchronization.
1400 	 */
1401 	intel_de_write(dev_priv, DPIO_CTL,
1402 		       intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST);
1403 }
1404 
1405 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1406 					    struct i915_power_well *power_well)
1407 {
1408 	enum pipe pipe;
1409 
1410 	for_each_pipe(dev_priv, pipe)
1411 		assert_pll_disabled(dev_priv, pipe);
1412 
1413 	/* Assert common reset */
1414 	intel_de_write(dev_priv, DPIO_CTL,
1415 		       intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST);
1416 
1417 	vlv_set_power_well(dev_priv, power_well, false);
1418 }
1419 
1420 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1421 
1422 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1423 
1424 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1425 {
1426 	struct i915_power_well *cmn_bc =
1427 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1428 	struct i915_power_well *cmn_d =
1429 		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1430 	u32 phy_control = dev_priv->chv_phy_control;
1431 	u32 phy_status = 0;
1432 	u32 phy_status_mask = 0xffffffff;
1433 
1434 	/*
1435 	 * The BIOS can leave the PHY is some weird state
1436 	 * where it doesn't fully power down some parts.
1437 	 * Disable the asserts until the PHY has been fully
1438 	 * reset (ie. the power well has been disabled at
1439 	 * least once).
1440 	 */
1441 	if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1442 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1443 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1444 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1445 				     PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1446 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1447 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1448 
1449 	if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1450 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1451 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1452 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1453 
1454 	if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
1455 		phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1456 
1457 		/* this assumes override is only used to enable lanes */
1458 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1459 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1460 
1461 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1462 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1463 
1464 		/* CL1 is on whenever anything is on in either channel */
1465 		if (BITS_SET(phy_control,
1466 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1467 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1468 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1469 
1470 		/*
1471 		 * The DPLLB check accounts for the pipe B + port A usage
1472 		 * with CL2 powered up but all the lanes in the second channel
1473 		 * powered down.
1474 		 */
1475 		if (BITS_SET(phy_control,
1476 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1477 		    (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1478 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1479 
1480 		if (BITS_SET(phy_control,
1481 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1482 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1483 		if (BITS_SET(phy_control,
1484 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1485 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1486 
1487 		if (BITS_SET(phy_control,
1488 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1489 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1490 		if (BITS_SET(phy_control,
1491 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1492 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1493 	}
1494 
1495 	if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
1496 		phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1497 
1498 		/* this assumes override is only used to enable lanes */
1499 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1500 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1501 
1502 		if (BITS_SET(phy_control,
1503 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1504 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1505 
1506 		if (BITS_SET(phy_control,
1507 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1508 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1509 		if (BITS_SET(phy_control,
1510 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1511 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1512 	}
1513 
1514 	phy_status &= phy_status_mask;
1515 
1516 	/*
1517 	 * The PHY may be busy with some initial calibration and whatnot,
1518 	 * so the power state can take a while to actually change.
1519 	 */
1520 	if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
1521 				       phy_status_mask, phy_status, 10))
1522 		drm_err(&dev_priv->drm,
1523 			"Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1524 			intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
1525 			phy_status, dev_priv->chv_phy_control);
1526 }
1527 
1528 #undef BITS_SET
1529 
1530 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1531 					   struct i915_power_well *power_well)
1532 {
1533 	enum dpio_phy phy;
1534 	enum pipe pipe;
1535 	u32 tmp;
1536 
1537 	drm_WARN_ON_ONCE(&dev_priv->drm,
1538 			 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1539 			 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1540 
1541 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1542 		pipe = PIPE_A;
1543 		phy = DPIO_PHY0;
1544 	} else {
1545 		pipe = PIPE_C;
1546 		phy = DPIO_PHY1;
1547 	}
1548 
1549 	/* since ref/cri clock was enabled */
1550 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1551 	vlv_set_power_well(dev_priv, power_well, true);
1552 
1553 	/* Poll for phypwrgood signal */
1554 	if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
1555 				  PHY_POWERGOOD(phy), 1))
1556 		drm_err(&dev_priv->drm, "Display PHY %d is not power up\n",
1557 			phy);
1558 
1559 	vlv_dpio_get(dev_priv);
1560 
1561 	/* Enable dynamic power down */
1562 	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1563 	tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1564 		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1565 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1566 
1567 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1568 		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1569 		tmp |= DPIO_DYNPWRDOWNEN_CH1;
1570 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1571 	} else {
1572 		/*
1573 		 * Force the non-existing CL2 off. BXT does this
1574 		 * too, so maybe it saves some power even though
1575 		 * CL2 doesn't exist?
1576 		 */
1577 		tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1578 		tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1579 		vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1580 	}
1581 
1582 	vlv_dpio_put(dev_priv);
1583 
1584 	dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1585 	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1586 		       dev_priv->chv_phy_control);
1587 
1588 	drm_dbg_kms(&dev_priv->drm,
1589 		    "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1590 		    phy, dev_priv->chv_phy_control);
1591 
1592 	assert_chv_phy_status(dev_priv);
1593 }
1594 
1595 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1596 					    struct i915_power_well *power_well)
1597 {
1598 	enum dpio_phy phy;
1599 
1600 	drm_WARN_ON_ONCE(&dev_priv->drm,
1601 			 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1602 			 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1603 
1604 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1605 		phy = DPIO_PHY0;
1606 		assert_pll_disabled(dev_priv, PIPE_A);
1607 		assert_pll_disabled(dev_priv, PIPE_B);
1608 	} else {
1609 		phy = DPIO_PHY1;
1610 		assert_pll_disabled(dev_priv, PIPE_C);
1611 	}
1612 
1613 	dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1614 	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1615 		       dev_priv->chv_phy_control);
1616 
1617 	vlv_set_power_well(dev_priv, power_well, false);
1618 
1619 	drm_dbg_kms(&dev_priv->drm,
1620 		    "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1621 		    phy, dev_priv->chv_phy_control);
1622 
1623 	/* PHY is fully reset now, so we can enable the PHY state asserts */
1624 	dev_priv->chv_phy_assert[phy] = true;
1625 
1626 	assert_chv_phy_status(dev_priv);
1627 }
1628 
1629 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1630 				     enum dpio_channel ch, bool override, unsigned int mask)
1631 {
1632 	enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1633 	u32 reg, val, expected, actual;
1634 
1635 	/*
1636 	 * The BIOS can leave the PHY is some weird state
1637 	 * where it doesn't fully power down some parts.
1638 	 * Disable the asserts until the PHY has been fully
1639 	 * reset (ie. the power well has been disabled at
1640 	 * least once).
1641 	 */
1642 	if (!dev_priv->chv_phy_assert[phy])
1643 		return;
1644 
1645 	if (ch == DPIO_CH0)
1646 		reg = _CHV_CMN_DW0_CH0;
1647 	else
1648 		reg = _CHV_CMN_DW6_CH1;
1649 
1650 	vlv_dpio_get(dev_priv);
1651 	val = vlv_dpio_read(dev_priv, pipe, reg);
1652 	vlv_dpio_put(dev_priv);
1653 
1654 	/*
1655 	 * This assumes !override is only used when the port is disabled.
1656 	 * All lanes should power down even without the override when
1657 	 * the port is disabled.
1658 	 */
1659 	if (!override || mask == 0xf) {
1660 		expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1661 		/*
1662 		 * If CH1 common lane is not active anymore
1663 		 * (eg. for pipe B DPLL) the entire channel will
1664 		 * shut down, which causes the common lane registers
1665 		 * to read as 0. That means we can't actually check
1666 		 * the lane power down status bits, but as the entire
1667 		 * register reads as 0 it's a good indication that the
1668 		 * channel is indeed entirely powered down.
1669 		 */
1670 		if (ch == DPIO_CH1 && val == 0)
1671 			expected = 0;
1672 	} else if (mask != 0x0) {
1673 		expected = DPIO_ANYDL_POWERDOWN;
1674 	} else {
1675 		expected = 0;
1676 	}
1677 
1678 	if (ch == DPIO_CH0)
1679 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1680 	else
1681 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1682 	actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1683 
1684 	drm_WARN(&dev_priv->drm, actual != expected,
1685 		 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1686 		 !!(actual & DPIO_ALLDL_POWERDOWN),
1687 		 !!(actual & DPIO_ANYDL_POWERDOWN),
1688 		 !!(expected & DPIO_ALLDL_POWERDOWN),
1689 		 !!(expected & DPIO_ANYDL_POWERDOWN),
1690 		 reg, val);
1691 }
1692 
1693 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1694 			  enum dpio_channel ch, bool override)
1695 {
1696 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1697 	bool was_override;
1698 
1699 	mutex_lock(&power_domains->lock);
1700 
1701 	was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1702 
1703 	if (override == was_override)
1704 		goto out;
1705 
1706 	if (override)
1707 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1708 	else
1709 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1710 
1711 	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1712 		       dev_priv->chv_phy_control);
1713 
1714 	drm_dbg_kms(&dev_priv->drm,
1715 		    "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1716 		    phy, ch, dev_priv->chv_phy_control);
1717 
1718 	assert_chv_phy_status(dev_priv);
1719 
1720 out:
1721 	mutex_unlock(&power_domains->lock);
1722 
1723 	return was_override;
1724 }
1725 
1726 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1727 			     bool override, unsigned int mask)
1728 {
1729 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1730 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1731 	enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(encoder));
1732 	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder));
1733 
1734 	mutex_lock(&power_domains->lock);
1735 
1736 	dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1737 	dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1738 
1739 	if (override)
1740 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1741 	else
1742 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1743 
1744 	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1745 		       dev_priv->chv_phy_control);
1746 
1747 	drm_dbg_kms(&dev_priv->drm,
1748 		    "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1749 		    phy, ch, mask, dev_priv->chv_phy_control);
1750 
1751 	assert_chv_phy_status(dev_priv);
1752 
1753 	assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1754 
1755 	mutex_unlock(&power_domains->lock);
1756 }
1757 
1758 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1759 					struct i915_power_well *power_well)
1760 {
1761 	enum pipe pipe = PIPE_A;
1762 	bool enabled;
1763 	u32 state, ctrl;
1764 
1765 	vlv_punit_get(dev_priv);
1766 
1767 	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
1768 	/*
1769 	 * We only ever set the power-on and power-gate states, anything
1770 	 * else is unexpected.
1771 	 */
1772 	drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) &&
1773 		    state != DP_SSS_PWR_GATE(pipe));
1774 	enabled = state == DP_SSS_PWR_ON(pipe);
1775 
1776 	/*
1777 	 * A transient state at this point would mean some unexpected party
1778 	 * is poking at the power controls too.
1779 	 */
1780 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
1781 	drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state);
1782 
1783 	vlv_punit_put(dev_priv);
1784 
1785 	return enabled;
1786 }
1787 
1788 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1789 				    struct i915_power_well *power_well,
1790 				    bool enable)
1791 {
1792 	enum pipe pipe = PIPE_A;
1793 	u32 state;
1794 	u32 ctrl;
1795 
1796 	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1797 
1798 	vlv_punit_get(dev_priv);
1799 
1800 #define COND \
1801 	((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
1802 
1803 	if (COND)
1804 		goto out;
1805 
1806 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
1807 	ctrl &= ~DP_SSC_MASK(pipe);
1808 	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1809 	vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
1810 
1811 	if (wait_for(COND, 100))
1812 		drm_err(&dev_priv->drm,
1813 			"timeout setting power well state %08x (%08x)\n",
1814 			state,
1815 			vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
1816 
1817 #undef COND
1818 
1819 out:
1820 	vlv_punit_put(dev_priv);
1821 }
1822 
1823 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
1824 					struct i915_power_well *power_well)
1825 {
1826 	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1827 		       dev_priv->chv_phy_control);
1828 }
1829 
1830 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1831 				       struct i915_power_well *power_well)
1832 {
1833 	chv_set_pipe_power_well(dev_priv, power_well, true);
1834 
1835 	vlv_display_power_well_init(dev_priv);
1836 }
1837 
1838 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1839 					struct i915_power_well *power_well)
1840 {
1841 	vlv_display_power_well_deinit(dev_priv);
1842 
1843 	chv_set_pipe_power_well(dev_priv, power_well, false);
1844 }
1845 
1846 static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
1847 {
1848 	return power_domains->async_put_domains[0] |
1849 	       power_domains->async_put_domains[1];
1850 }
1851 
1852 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
1853 
1854 static bool
1855 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1856 {
1857 	return !WARN_ON(power_domains->async_put_domains[0] &
1858 			power_domains->async_put_domains[1]);
1859 }
1860 
1861 static bool
1862 __async_put_domains_state_ok(struct i915_power_domains *power_domains)
1863 {
1864 	enum intel_display_power_domain domain;
1865 	bool err = false;
1866 
1867 	err |= !assert_async_put_domain_masks_disjoint(power_domains);
1868 	err |= WARN_ON(!!power_domains->async_put_wakeref !=
1869 		       !!__async_put_domains_mask(power_domains));
1870 
1871 	for_each_power_domain(domain, __async_put_domains_mask(power_domains))
1872 		err |= WARN_ON(power_domains->domain_use_count[domain] != 1);
1873 
1874 	return !err;
1875 }
1876 
1877 static void print_power_domains(struct i915_power_domains *power_domains,
1878 				const char *prefix, u64 mask)
1879 {
1880 	enum intel_display_power_domain domain;
1881 
1882 	DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask));
1883 	for_each_power_domain(domain, mask)
1884 		DRM_DEBUG_DRIVER("%s use_count %d\n",
1885 				 intel_display_power_domain_str(domain),
1886 				 power_domains->domain_use_count[domain]);
1887 }
1888 
1889 static void
1890 print_async_put_domains_state(struct i915_power_domains *power_domains)
1891 {
1892 	DRM_DEBUG_DRIVER("async_put_wakeref %u\n",
1893 			 power_domains->async_put_wakeref);
1894 
1895 	print_power_domains(power_domains, "async_put_domains[0]",
1896 			    power_domains->async_put_domains[0]);
1897 	print_power_domains(power_domains, "async_put_domains[1]",
1898 			    power_domains->async_put_domains[1]);
1899 }
1900 
1901 static void
1902 verify_async_put_domains_state(struct i915_power_domains *power_domains)
1903 {
1904 	if (!__async_put_domains_state_ok(power_domains))
1905 		print_async_put_domains_state(power_domains);
1906 }
1907 
1908 #else
1909 
1910 static void
1911 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1912 {
1913 }
1914 
1915 static void
1916 verify_async_put_domains_state(struct i915_power_domains *power_domains)
1917 {
1918 }
1919 
1920 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
1921 
1922 static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
1923 {
1924 	assert_async_put_domain_masks_disjoint(power_domains);
1925 
1926 	return __async_put_domains_mask(power_domains);
1927 }
1928 
1929 static void
1930 async_put_domains_clear_domain(struct i915_power_domains *power_domains,
1931 			       enum intel_display_power_domain domain)
1932 {
1933 	assert_async_put_domain_masks_disjoint(power_domains);
1934 
1935 	power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
1936 	power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
1937 }
1938 
1939 static bool
1940 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
1941 				       enum intel_display_power_domain domain)
1942 {
1943 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1944 	bool ret = false;
1945 
1946 	if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
1947 		goto out_verify;
1948 
1949 	async_put_domains_clear_domain(power_domains, domain);
1950 
1951 	ret = true;
1952 
1953 	if (async_put_domains_mask(power_domains))
1954 		goto out_verify;
1955 
1956 	cancel_delayed_work(&power_domains->async_put_work);
1957 	intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
1958 				 fetch_and_zero(&power_domains->async_put_wakeref));
1959 out_verify:
1960 	verify_async_put_domains_state(power_domains);
1961 
1962 	return ret;
1963 }
1964 
1965 static void
1966 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1967 				 enum intel_display_power_domain domain)
1968 {
1969 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1970 	struct i915_power_well *power_well;
1971 
1972 	if (intel_display_power_grab_async_put_ref(dev_priv, domain))
1973 		return;
1974 
1975 	for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
1976 		intel_power_well_get(dev_priv, power_well);
1977 
1978 	power_domains->domain_use_count[domain]++;
1979 }
1980 
1981 /**
1982  * intel_display_power_get - grab a power domain reference
1983  * @dev_priv: i915 device instance
1984  * @domain: power domain to reference
1985  *
1986  * This function grabs a power domain reference for @domain and ensures that the
1987  * power domain and all its parents are powered up. Therefore users should only
1988  * grab a reference to the innermost power domain they need.
1989  *
1990  * Any power domain reference obtained by this function must have a symmetric
1991  * call to intel_display_power_put() to release the reference again.
1992  */
1993 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
1994 					enum intel_display_power_domain domain)
1995 {
1996 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1997 	intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1998 
1999 	mutex_lock(&power_domains->lock);
2000 	__intel_display_power_get_domain(dev_priv, domain);
2001 	mutex_unlock(&power_domains->lock);
2002 
2003 	return wakeref;
2004 }
2005 
2006 /**
2007  * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
2008  * @dev_priv: i915 device instance
2009  * @domain: power domain to reference
2010  *
2011  * This function grabs a power domain reference for @domain and ensures that the
2012  * power domain and all its parents are powered up. Therefore users should only
2013  * grab a reference to the innermost power domain they need.
2014  *
2015  * Any power domain reference obtained by this function must have a symmetric
2016  * call to intel_display_power_put() to release the reference again.
2017  */
2018 intel_wakeref_t
2019 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
2020 				   enum intel_display_power_domain domain)
2021 {
2022 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2023 	intel_wakeref_t wakeref;
2024 	bool is_enabled;
2025 
2026 	wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
2027 	if (!wakeref)
2028 		return false;
2029 
2030 	mutex_lock(&power_domains->lock);
2031 
2032 	if (__intel_display_power_is_enabled(dev_priv, domain)) {
2033 		__intel_display_power_get_domain(dev_priv, domain);
2034 		is_enabled = true;
2035 	} else {
2036 		is_enabled = false;
2037 	}
2038 
2039 	mutex_unlock(&power_domains->lock);
2040 
2041 	if (!is_enabled) {
2042 		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2043 		wakeref = 0;
2044 	}
2045 
2046 	return wakeref;
2047 }
2048 
2049 static void
2050 __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
2051 				 enum intel_display_power_domain domain)
2052 {
2053 	struct i915_power_domains *power_domains;
2054 	struct i915_power_well *power_well;
2055 	const char *name = intel_display_power_domain_str(domain);
2056 
2057 	power_domains = &dev_priv->power_domains;
2058 
2059 	drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
2060 		 "Use count on domain %s is already zero\n",
2061 		 name);
2062 	drm_WARN(&dev_priv->drm,
2063 		 async_put_domains_mask(power_domains) & BIT_ULL(domain),
2064 		 "Async disabling of domain %s is pending\n",
2065 		 name);
2066 
2067 	power_domains->domain_use_count[domain]--;
2068 
2069 	for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
2070 		intel_power_well_put(dev_priv, power_well);
2071 }
2072 
2073 static void __intel_display_power_put(struct drm_i915_private *dev_priv,
2074 				      enum intel_display_power_domain domain)
2075 {
2076 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2077 
2078 	mutex_lock(&power_domains->lock);
2079 	__intel_display_power_put_domain(dev_priv, domain);
2080 	mutex_unlock(&power_domains->lock);
2081 }
2082 
2083 /**
2084  * intel_display_power_put_unchecked - release an unchecked power domain reference
2085  * @dev_priv: i915 device instance
2086  * @domain: power domain to reference
2087  *
2088  * This function drops the power domain reference obtained by
2089  * intel_display_power_get() and might power down the corresponding hardware
2090  * block right away if this is the last reference.
2091  *
2092  * This function exists only for historical reasons and should be avoided in
2093  * new code, as the correctness of its use cannot be checked. Always use
2094  * intel_display_power_put() instead.
2095  */
2096 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
2097 				       enum intel_display_power_domain domain)
2098 {
2099 	__intel_display_power_put(dev_priv, domain);
2100 	intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
2101 }
2102 
2103 static void
2104 queue_async_put_domains_work(struct i915_power_domains *power_domains,
2105 			     intel_wakeref_t wakeref)
2106 {
2107 	WARN_ON(power_domains->async_put_wakeref);
2108 	power_domains->async_put_wakeref = wakeref;
2109 	WARN_ON(!queue_delayed_work(system_unbound_wq,
2110 				    &power_domains->async_put_work,
2111 				    msecs_to_jiffies(100)));
2112 }
2113 
2114 static void
2115 release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
2116 {
2117 	struct drm_i915_private *dev_priv =
2118 		container_of(power_domains, struct drm_i915_private,
2119 			     power_domains);
2120 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2121 	enum intel_display_power_domain domain;
2122 	intel_wakeref_t wakeref;
2123 
2124 	/*
2125 	 * The caller must hold already raw wakeref, upgrade that to a proper
2126 	 * wakeref to make the state checker happy about the HW access during
2127 	 * power well disabling.
2128 	 */
2129 	assert_rpm_raw_wakeref_held(rpm);
2130 	wakeref = intel_runtime_pm_get(rpm);
2131 
2132 	for_each_power_domain(domain, mask) {
2133 		/* Clear before put, so put's sanity check is happy. */
2134 		async_put_domains_clear_domain(power_domains, domain);
2135 		__intel_display_power_put_domain(dev_priv, domain);
2136 	}
2137 
2138 	intel_runtime_pm_put(rpm, wakeref);
2139 }
2140 
2141 static void
2142 intel_display_power_put_async_work(struct work_struct *work)
2143 {
2144 	struct drm_i915_private *dev_priv =
2145 		container_of(work, struct drm_i915_private,
2146 			     power_domains.async_put_work.work);
2147 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2148 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2149 	intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
2150 	intel_wakeref_t old_work_wakeref = 0;
2151 
2152 	mutex_lock(&power_domains->lock);
2153 
2154 	/*
2155 	 * Bail out if all the domain refs pending to be released were grabbed
2156 	 * by subsequent gets or a flush_work.
2157 	 */
2158 	old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2159 	if (!old_work_wakeref)
2160 		goto out_verify;
2161 
2162 	release_async_put_domains(power_domains,
2163 				  power_domains->async_put_domains[0]);
2164 
2165 	/* Requeue the work if more domains were async put meanwhile. */
2166 	if (power_domains->async_put_domains[1]) {
2167 		power_domains->async_put_domains[0] =
2168 			fetch_and_zero(&power_domains->async_put_domains[1]);
2169 		queue_async_put_domains_work(power_domains,
2170 					     fetch_and_zero(&new_work_wakeref));
2171 	}
2172 
2173 out_verify:
2174 	verify_async_put_domains_state(power_domains);
2175 
2176 	mutex_unlock(&power_domains->lock);
2177 
2178 	if (old_work_wakeref)
2179 		intel_runtime_pm_put_raw(rpm, old_work_wakeref);
2180 	if (new_work_wakeref)
2181 		intel_runtime_pm_put_raw(rpm, new_work_wakeref);
2182 }
2183 
2184 /**
2185  * intel_display_power_put_async - release a power domain reference asynchronously
2186  * @i915: i915 device instance
2187  * @domain: power domain to reference
2188  * @wakeref: wakeref acquired for the reference that is being released
2189  *
2190  * This function drops the power domain reference obtained by
2191  * intel_display_power_get*() and schedules a work to power down the
2192  * corresponding hardware block if this is the last reference.
2193  */
2194 void __intel_display_power_put_async(struct drm_i915_private *i915,
2195 				     enum intel_display_power_domain domain,
2196 				     intel_wakeref_t wakeref)
2197 {
2198 	struct i915_power_domains *power_domains = &i915->power_domains;
2199 	struct intel_runtime_pm *rpm = &i915->runtime_pm;
2200 	intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
2201 
2202 	mutex_lock(&power_domains->lock);
2203 
2204 	if (power_domains->domain_use_count[domain] > 1) {
2205 		__intel_display_power_put_domain(i915, domain);
2206 
2207 		goto out_verify;
2208 	}
2209 
2210 	drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
2211 
2212 	/* Let a pending work requeue itself or queue a new one. */
2213 	if (power_domains->async_put_wakeref) {
2214 		power_domains->async_put_domains[1] |= BIT_ULL(domain);
2215 	} else {
2216 		power_domains->async_put_domains[0] |= BIT_ULL(domain);
2217 		queue_async_put_domains_work(power_domains,
2218 					     fetch_and_zero(&work_wakeref));
2219 	}
2220 
2221 out_verify:
2222 	verify_async_put_domains_state(power_domains);
2223 
2224 	mutex_unlock(&power_domains->lock);
2225 
2226 	if (work_wakeref)
2227 		intel_runtime_pm_put_raw(rpm, work_wakeref);
2228 
2229 	intel_runtime_pm_put(rpm, wakeref);
2230 }
2231 
2232 /**
2233  * intel_display_power_flush_work - flushes the async display power disabling work
2234  * @i915: i915 device instance
2235  *
2236  * Flushes any pending work that was scheduled by a preceding
2237  * intel_display_power_put_async() call, completing the disabling of the
2238  * corresponding power domains.
2239  *
2240  * Note that the work handler function may still be running after this
2241  * function returns; to ensure that the work handler isn't running use
2242  * intel_display_power_flush_work_sync() instead.
2243  */
2244 void intel_display_power_flush_work(struct drm_i915_private *i915)
2245 {
2246 	struct i915_power_domains *power_domains = &i915->power_domains;
2247 	intel_wakeref_t work_wakeref;
2248 
2249 	mutex_lock(&power_domains->lock);
2250 
2251 	work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2252 	if (!work_wakeref)
2253 		goto out_verify;
2254 
2255 	release_async_put_domains(power_domains,
2256 				  async_put_domains_mask(power_domains));
2257 	cancel_delayed_work(&power_domains->async_put_work);
2258 
2259 out_verify:
2260 	verify_async_put_domains_state(power_domains);
2261 
2262 	mutex_unlock(&power_domains->lock);
2263 
2264 	if (work_wakeref)
2265 		intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
2266 }
2267 
2268 /**
2269  * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
2270  * @i915: i915 device instance
2271  *
2272  * Like intel_display_power_flush_work(), but also ensure that the work
2273  * handler function is not running any more when this function returns.
2274  */
2275 static void
2276 intel_display_power_flush_work_sync(struct drm_i915_private *i915)
2277 {
2278 	struct i915_power_domains *power_domains = &i915->power_domains;
2279 
2280 	intel_display_power_flush_work(i915);
2281 	cancel_delayed_work_sync(&power_domains->async_put_work);
2282 
2283 	verify_async_put_domains_state(power_domains);
2284 
2285 	drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
2286 }
2287 
2288 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2289 /**
2290  * intel_display_power_put - release a power domain reference
2291  * @dev_priv: i915 device instance
2292  * @domain: power domain to reference
2293  * @wakeref: wakeref acquired for the reference that is being released
2294  *
2295  * This function drops the power domain reference obtained by
2296  * intel_display_power_get() and might power down the corresponding hardware
2297  * block right away if this is the last reference.
2298  */
2299 void intel_display_power_put(struct drm_i915_private *dev_priv,
2300 			     enum intel_display_power_domain domain,
2301 			     intel_wakeref_t wakeref)
2302 {
2303 	__intel_display_power_put(dev_priv, domain);
2304 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2305 }
2306 #endif
2307 
2308 #define I830_PIPES_POWER_DOMAINS (		\
2309 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2310 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2311 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2312 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2313 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2314 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2315 	BIT_ULL(POWER_DOMAIN_INIT))
2316 
2317 #define VLV_DISPLAY_POWER_DOMAINS (		\
2318 	BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |	\
2319 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2320 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2321 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2322 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2323 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2324 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2325 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2326 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2327 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
2328 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
2329 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2330 	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
2331 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2332 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2333 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
2334 	BIT_ULL(POWER_DOMAIN_INIT))
2335 
2336 #define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
2337 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2338 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2339 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
2340 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2341 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2342 	BIT_ULL(POWER_DOMAIN_INIT))
2343 
2344 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
2345 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2346 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2347 	BIT_ULL(POWER_DOMAIN_INIT))
2348 
2349 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
2350 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2351 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2352 	BIT_ULL(POWER_DOMAIN_INIT))
2353 
2354 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
2355 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2356 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2357 	BIT_ULL(POWER_DOMAIN_INIT))
2358 
2359 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
2360 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2361 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2362 	BIT_ULL(POWER_DOMAIN_INIT))
2363 
2364 #define CHV_DISPLAY_POWER_DOMAINS (		\
2365 	BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |	\
2366 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2367 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2368 	BIT_ULL(POWER_DOMAIN_PIPE_C) |		\
2369 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2370 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2371 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2372 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2373 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2374 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |	\
2375 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2376 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2377 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2378 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
2379 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2380 	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
2381 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2382 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2383 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
2384 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
2385 	BIT_ULL(POWER_DOMAIN_INIT))
2386 
2387 #define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
2388 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2389 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2390 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2391 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2392 	BIT_ULL(POWER_DOMAIN_INIT))
2393 
2394 #define CHV_DPIO_CMN_D_POWER_DOMAINS (		\
2395 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2396 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
2397 	BIT_ULL(POWER_DOMAIN_INIT))
2398 
2399 #define HSW_DISPLAY_POWER_DOMAINS (			\
2400 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2401 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2402 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
2403 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2404 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2405 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2406 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2407 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2408 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2409 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2410 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2411 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
2412 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2413 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2414 	BIT_ULL(POWER_DOMAIN_INIT))
2415 
2416 #define BDW_DISPLAY_POWER_DOMAINS (			\
2417 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2418 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2419 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2420 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2421 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2422 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2423 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2424 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2425 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2426 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2427 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
2428 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2429 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2430 	BIT_ULL(POWER_DOMAIN_INIT))
2431 
2432 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2433 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2434 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2435 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2436 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2437 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2438 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2439 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2440 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2441 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2442 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2443 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |		\
2444 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2445 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2446 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2447 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2448 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2449 	BIT_ULL(POWER_DOMAIN_INIT))
2450 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (		\
2451 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
2452 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |		\
2453 	BIT_ULL(POWER_DOMAIN_INIT))
2454 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
2455 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
2456 	BIT_ULL(POWER_DOMAIN_INIT))
2457 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
2458 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
2459 	BIT_ULL(POWER_DOMAIN_INIT))
2460 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS (		\
2461 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
2462 	BIT_ULL(POWER_DOMAIN_INIT))
2463 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2464 	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2465 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2466 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2467 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2468 	BIT_ULL(POWER_DOMAIN_INIT))
2469 
2470 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2471 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2472 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2473 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2474 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2475 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2476 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2477 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2478 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2479 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2480 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2481 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2482 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2483 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2484 	BIT_ULL(POWER_DOMAIN_INIT))
2485 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2486 	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2487 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2488 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2489 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2490 	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
2491 	BIT_ULL(POWER_DOMAIN_INIT))
2492 #define BXT_DPIO_CMN_A_POWER_DOMAINS (			\
2493 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
2494 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2495 	BIT_ULL(POWER_DOMAIN_INIT))
2496 #define BXT_DPIO_CMN_BC_POWER_DOMAINS (			\
2497 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2498 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2499 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2500 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2501 	BIT_ULL(POWER_DOMAIN_INIT))
2502 
2503 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2504 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2505 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2506 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2507 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2508 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2509 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2510 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2511 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2512 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2513 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2514 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2515 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2516 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2517 	BIT_ULL(POWER_DOMAIN_INIT))
2518 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (		\
2519 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2520 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
2521 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2522 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
2523 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2524 #define GLK_DPIO_CMN_A_POWER_DOMAINS (			\
2525 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
2526 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2527 	BIT_ULL(POWER_DOMAIN_INIT))
2528 #define GLK_DPIO_CMN_B_POWER_DOMAINS (			\
2529 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2530 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2531 	BIT_ULL(POWER_DOMAIN_INIT))
2532 #define GLK_DPIO_CMN_C_POWER_DOMAINS (			\
2533 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2534 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2535 	BIT_ULL(POWER_DOMAIN_INIT))
2536 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS (		\
2537 	BIT_ULL(POWER_DOMAIN_AUX_A) |		\
2538 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2539 	BIT_ULL(POWER_DOMAIN_INIT))
2540 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS (		\
2541 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2542 	BIT_ULL(POWER_DOMAIN_INIT))
2543 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS (		\
2544 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2545 	BIT_ULL(POWER_DOMAIN_INIT))
2546 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2547 	GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2548 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2549 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2550 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2551 	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
2552 	BIT_ULL(POWER_DOMAIN_INIT))
2553 
2554 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2555 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2556 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2557 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2558 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2559 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2560 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2561 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2562 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2563 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2564 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2565 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |		\
2566 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2567 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2568 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2569 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2570 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2571 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2572 	BIT_ULL(POWER_DOMAIN_INIT))
2573 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS (		\
2574 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
2575 	BIT_ULL(POWER_DOMAIN_INIT))
2576 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS (		\
2577 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
2578 	BIT_ULL(POWER_DOMAIN_INIT))
2579 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS (		\
2580 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
2581 	BIT_ULL(POWER_DOMAIN_INIT))
2582 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS (		\
2583 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
2584 	BIT_ULL(POWER_DOMAIN_INIT))
2585 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS (		\
2586 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2587 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2588 	BIT_ULL(POWER_DOMAIN_INIT))
2589 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS (		\
2590 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2591 	BIT_ULL(POWER_DOMAIN_INIT))
2592 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS (		\
2593 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2594 	BIT_ULL(POWER_DOMAIN_INIT))
2595 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS (		\
2596 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2597 	BIT_ULL(POWER_DOMAIN_INIT))
2598 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS (		\
2599 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2600 	BIT_ULL(POWER_DOMAIN_INIT))
2601 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS (		\
2602 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |		\
2603 	BIT_ULL(POWER_DOMAIN_INIT))
2604 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2605 	CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2606 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2607 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2608 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2609 	BIT_ULL(POWER_DOMAIN_INIT))
2610 
2611 /*
2612  * ICL PW_0/PG_0 domains (HW/DMC control):
2613  * - PCI
2614  * - clocks except port PLL
2615  * - central power except FBC
2616  * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
2617  * ICL PW_1/PG_1 domains (HW/DMC control):
2618  * - DBUF function
2619  * - PIPE_A and its planes, except VGA
2620  * - transcoder EDP + PSR
2621  * - transcoder DSI
2622  * - DDI_A
2623  * - FBC
2624  */
2625 #define ICL_PW_4_POWER_DOMAINS (			\
2626 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2627 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2628 	BIT_ULL(POWER_DOMAIN_INIT))
2629 	/* VDSC/joining */
2630 #define ICL_PW_3_POWER_DOMAINS (			\
2631 	ICL_PW_4_POWER_DOMAINS |			\
2632 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2633 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2634 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2635 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2636 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2637 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2638 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2639 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2640 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |	\
2641 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |	\
2642 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2643 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2644 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2645 	BIT_ULL(POWER_DOMAIN_AUX_E) |			\
2646 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2647 	BIT_ULL(POWER_DOMAIN_AUX_C_TBT) |		\
2648 	BIT_ULL(POWER_DOMAIN_AUX_D_TBT) |		\
2649 	BIT_ULL(POWER_DOMAIN_AUX_E_TBT) |		\
2650 	BIT_ULL(POWER_DOMAIN_AUX_F_TBT) |		\
2651 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2652 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2653 	BIT_ULL(POWER_DOMAIN_INIT))
2654 	/*
2655 	 * - transcoder WD
2656 	 * - KVMR (HW control)
2657 	 */
2658 #define ICL_PW_2_POWER_DOMAINS (			\
2659 	ICL_PW_3_POWER_DOMAINS |			\
2660 	BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) |		\
2661 	BIT_ULL(POWER_DOMAIN_INIT))
2662 	/*
2663 	 * - KVMR (HW control)
2664 	 */
2665 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2666 	ICL_PW_2_POWER_DOMAINS |			\
2667 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2668 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2669 	BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) |			\
2670 	BIT_ULL(POWER_DOMAIN_INIT))
2671 
2672 #define ICL_DDI_IO_A_POWER_DOMAINS (			\
2673 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2674 #define ICL_DDI_IO_B_POWER_DOMAINS (			\
2675 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2676 #define ICL_DDI_IO_C_POWER_DOMAINS (			\
2677 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2678 #define ICL_DDI_IO_D_POWER_DOMAINS (			\
2679 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2680 #define ICL_DDI_IO_E_POWER_DOMAINS (			\
2681 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2682 #define ICL_DDI_IO_F_POWER_DOMAINS (			\
2683 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2684 
2685 #define ICL_AUX_A_IO_POWER_DOMAINS (			\
2686 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2687 	BIT_ULL(POWER_DOMAIN_AUX_A))
2688 #define ICL_AUX_B_IO_POWER_DOMAINS (			\
2689 	BIT_ULL(POWER_DOMAIN_AUX_B))
2690 #define ICL_AUX_C_TC1_IO_POWER_DOMAINS (		\
2691 	BIT_ULL(POWER_DOMAIN_AUX_C))
2692 #define ICL_AUX_D_TC2_IO_POWER_DOMAINS (		\
2693 	BIT_ULL(POWER_DOMAIN_AUX_D))
2694 #define ICL_AUX_E_TC3_IO_POWER_DOMAINS (		\
2695 	BIT_ULL(POWER_DOMAIN_AUX_E))
2696 #define ICL_AUX_F_TC4_IO_POWER_DOMAINS (		\
2697 	BIT_ULL(POWER_DOMAIN_AUX_F))
2698 #define ICL_AUX_C_TBT1_IO_POWER_DOMAINS (		\
2699 	BIT_ULL(POWER_DOMAIN_AUX_C_TBT))
2700 #define ICL_AUX_D_TBT2_IO_POWER_DOMAINS (		\
2701 	BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2702 #define ICL_AUX_E_TBT3_IO_POWER_DOMAINS (		\
2703 	BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2704 #define ICL_AUX_F_TBT4_IO_POWER_DOMAINS (		\
2705 	BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2706 
2707 #define TGL_PW_5_POWER_DOMAINS (			\
2708 	BIT_ULL(POWER_DOMAIN_PIPE_D) |			\
2709 	BIT_ULL(POWER_DOMAIN_TRANSCODER_D) |		\
2710 	BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) |     \
2711 	BIT_ULL(POWER_DOMAIN_INIT))
2712 
2713 #define TGL_PW_4_POWER_DOMAINS (			\
2714 	TGL_PW_5_POWER_DOMAINS |			\
2715 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2716 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2717 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2718 	BIT_ULL(POWER_DOMAIN_INIT))
2719 
2720 #define TGL_PW_3_POWER_DOMAINS (			\
2721 	TGL_PW_4_POWER_DOMAINS |			\
2722 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2723 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2724 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2725 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2726 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |	\
2727 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |	\
2728 	BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES) |	\
2729 	BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES) |	\
2730 	BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES) |	\
2731 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2732 	BIT_ULL(POWER_DOMAIN_AUX_E) |			\
2733 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2734 	BIT_ULL(POWER_DOMAIN_AUX_G) |			\
2735 	BIT_ULL(POWER_DOMAIN_AUX_H) |			\
2736 	BIT_ULL(POWER_DOMAIN_AUX_I) |			\
2737 	BIT_ULL(POWER_DOMAIN_AUX_D_TBT) |		\
2738 	BIT_ULL(POWER_DOMAIN_AUX_E_TBT) |		\
2739 	BIT_ULL(POWER_DOMAIN_AUX_F_TBT) |		\
2740 	BIT_ULL(POWER_DOMAIN_AUX_G_TBT) |		\
2741 	BIT_ULL(POWER_DOMAIN_AUX_H_TBT) |		\
2742 	BIT_ULL(POWER_DOMAIN_AUX_I_TBT) |		\
2743 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2744 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2745 	BIT_ULL(POWER_DOMAIN_INIT))
2746 
2747 #define TGL_PW_2_POWER_DOMAINS (			\
2748 	TGL_PW_3_POWER_DOMAINS |			\
2749 	BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) |	\
2750 	BIT_ULL(POWER_DOMAIN_INIT))
2751 
2752 #define TGL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2753 	TGL_PW_3_POWER_DOMAINS |			\
2754 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2755 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2756 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2757 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2758 	BIT_ULL(POWER_DOMAIN_INIT))
2759 
2760 #define TGL_DDI_IO_D_TC1_POWER_DOMAINS (	\
2761 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2762 #define TGL_DDI_IO_E_TC2_POWER_DOMAINS (	\
2763 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2764 #define TGL_DDI_IO_F_TC3_POWER_DOMAINS (	\
2765 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2766 #define TGL_DDI_IO_G_TC4_POWER_DOMAINS (	\
2767 	BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO))
2768 #define TGL_DDI_IO_H_TC5_POWER_DOMAINS (	\
2769 	BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO))
2770 #define TGL_DDI_IO_I_TC6_POWER_DOMAINS (	\
2771 	BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO))
2772 
2773 #define TGL_AUX_A_IO_POWER_DOMAINS (		\
2774 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |	\
2775 	BIT_ULL(POWER_DOMAIN_AUX_A))
2776 #define TGL_AUX_B_IO_POWER_DOMAINS (		\
2777 	BIT_ULL(POWER_DOMAIN_AUX_B))
2778 #define TGL_AUX_C_IO_POWER_DOMAINS (		\
2779 	BIT_ULL(POWER_DOMAIN_AUX_C))
2780 #define TGL_AUX_D_TC1_IO_POWER_DOMAINS (	\
2781 	BIT_ULL(POWER_DOMAIN_AUX_D))
2782 #define TGL_AUX_E_TC2_IO_POWER_DOMAINS (	\
2783 	BIT_ULL(POWER_DOMAIN_AUX_E))
2784 #define TGL_AUX_F_TC3_IO_POWER_DOMAINS (	\
2785 	BIT_ULL(POWER_DOMAIN_AUX_F))
2786 #define TGL_AUX_G_TC4_IO_POWER_DOMAINS (	\
2787 	BIT_ULL(POWER_DOMAIN_AUX_G))
2788 #define TGL_AUX_H_TC5_IO_POWER_DOMAINS (	\
2789 	BIT_ULL(POWER_DOMAIN_AUX_H))
2790 #define TGL_AUX_I_TC6_IO_POWER_DOMAINS (	\
2791 	BIT_ULL(POWER_DOMAIN_AUX_I))
2792 #define TGL_AUX_D_TBT1_IO_POWER_DOMAINS (	\
2793 	BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2794 #define TGL_AUX_E_TBT2_IO_POWER_DOMAINS (	\
2795 	BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2796 #define TGL_AUX_F_TBT3_IO_POWER_DOMAINS (	\
2797 	BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2798 #define TGL_AUX_G_TBT4_IO_POWER_DOMAINS (	\
2799 	BIT_ULL(POWER_DOMAIN_AUX_G_TBT))
2800 #define TGL_AUX_H_TBT5_IO_POWER_DOMAINS (	\
2801 	BIT_ULL(POWER_DOMAIN_AUX_H_TBT))
2802 #define TGL_AUX_I_TBT6_IO_POWER_DOMAINS (	\
2803 	BIT_ULL(POWER_DOMAIN_AUX_I_TBT))
2804 
2805 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
2806 	.sync_hw = i9xx_power_well_sync_hw_noop,
2807 	.enable = i9xx_always_on_power_well_noop,
2808 	.disable = i9xx_always_on_power_well_noop,
2809 	.is_enabled = i9xx_always_on_power_well_enabled,
2810 };
2811 
2812 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
2813 	.sync_hw = chv_pipe_power_well_sync_hw,
2814 	.enable = chv_pipe_power_well_enable,
2815 	.disable = chv_pipe_power_well_disable,
2816 	.is_enabled = chv_pipe_power_well_enabled,
2817 };
2818 
2819 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
2820 	.sync_hw = i9xx_power_well_sync_hw_noop,
2821 	.enable = chv_dpio_cmn_power_well_enable,
2822 	.disable = chv_dpio_cmn_power_well_disable,
2823 	.is_enabled = vlv_power_well_enabled,
2824 };
2825 
2826 static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
2827 	{
2828 		.name = "always-on",
2829 		.always_on = true,
2830 		.domains = POWER_DOMAIN_MASK,
2831 		.ops = &i9xx_always_on_power_well_ops,
2832 		.id = DISP_PW_ID_NONE,
2833 	},
2834 };
2835 
2836 static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2837 	.sync_hw = i830_pipes_power_well_sync_hw,
2838 	.enable = i830_pipes_power_well_enable,
2839 	.disable = i830_pipes_power_well_disable,
2840 	.is_enabled = i830_pipes_power_well_enabled,
2841 };
2842 
2843 static const struct i915_power_well_desc i830_power_wells[] = {
2844 	{
2845 		.name = "always-on",
2846 		.always_on = true,
2847 		.domains = POWER_DOMAIN_MASK,
2848 		.ops = &i9xx_always_on_power_well_ops,
2849 		.id = DISP_PW_ID_NONE,
2850 	},
2851 	{
2852 		.name = "pipes",
2853 		.domains = I830_PIPES_POWER_DOMAINS,
2854 		.ops = &i830_pipes_power_well_ops,
2855 		.id = DISP_PW_ID_NONE,
2856 	},
2857 };
2858 
2859 static const struct i915_power_well_ops hsw_power_well_ops = {
2860 	.sync_hw = hsw_power_well_sync_hw,
2861 	.enable = hsw_power_well_enable,
2862 	.disable = hsw_power_well_disable,
2863 	.is_enabled = hsw_power_well_enabled,
2864 };
2865 
2866 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
2867 	.sync_hw = i9xx_power_well_sync_hw_noop,
2868 	.enable = gen9_dc_off_power_well_enable,
2869 	.disable = gen9_dc_off_power_well_disable,
2870 	.is_enabled = gen9_dc_off_power_well_enabled,
2871 };
2872 
2873 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
2874 	.sync_hw = i9xx_power_well_sync_hw_noop,
2875 	.enable = bxt_dpio_cmn_power_well_enable,
2876 	.disable = bxt_dpio_cmn_power_well_disable,
2877 	.is_enabled = bxt_dpio_cmn_power_well_enabled,
2878 };
2879 
2880 static const struct i915_power_well_regs hsw_power_well_regs = {
2881 	.bios	= HSW_PWR_WELL_CTL1,
2882 	.driver	= HSW_PWR_WELL_CTL2,
2883 	.kvmr	= HSW_PWR_WELL_CTL3,
2884 	.debug	= HSW_PWR_WELL_CTL4,
2885 };
2886 
2887 static const struct i915_power_well_desc hsw_power_wells[] = {
2888 	{
2889 		.name = "always-on",
2890 		.always_on = true,
2891 		.domains = POWER_DOMAIN_MASK,
2892 		.ops = &i9xx_always_on_power_well_ops,
2893 		.id = DISP_PW_ID_NONE,
2894 	},
2895 	{
2896 		.name = "display",
2897 		.domains = HSW_DISPLAY_POWER_DOMAINS,
2898 		.ops = &hsw_power_well_ops,
2899 		.id = HSW_DISP_PW_GLOBAL,
2900 		{
2901 			.hsw.regs = &hsw_power_well_regs,
2902 			.hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2903 			.hsw.has_vga = true,
2904 		},
2905 	},
2906 };
2907 
2908 static const struct i915_power_well_desc bdw_power_wells[] = {
2909 	{
2910 		.name = "always-on",
2911 		.always_on = true,
2912 		.domains = POWER_DOMAIN_MASK,
2913 		.ops = &i9xx_always_on_power_well_ops,
2914 		.id = DISP_PW_ID_NONE,
2915 	},
2916 	{
2917 		.name = "display",
2918 		.domains = BDW_DISPLAY_POWER_DOMAINS,
2919 		.ops = &hsw_power_well_ops,
2920 		.id = HSW_DISP_PW_GLOBAL,
2921 		{
2922 			.hsw.regs = &hsw_power_well_regs,
2923 			.hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2924 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2925 			.hsw.has_vga = true,
2926 		},
2927 	},
2928 };
2929 
2930 static const struct i915_power_well_ops vlv_display_power_well_ops = {
2931 	.sync_hw = i9xx_power_well_sync_hw_noop,
2932 	.enable = vlv_display_power_well_enable,
2933 	.disable = vlv_display_power_well_disable,
2934 	.is_enabled = vlv_power_well_enabled,
2935 };
2936 
2937 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
2938 	.sync_hw = i9xx_power_well_sync_hw_noop,
2939 	.enable = vlv_dpio_cmn_power_well_enable,
2940 	.disable = vlv_dpio_cmn_power_well_disable,
2941 	.is_enabled = vlv_power_well_enabled,
2942 };
2943 
2944 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
2945 	.sync_hw = i9xx_power_well_sync_hw_noop,
2946 	.enable = vlv_power_well_enable,
2947 	.disable = vlv_power_well_disable,
2948 	.is_enabled = vlv_power_well_enabled,
2949 };
2950 
2951 static const struct i915_power_well_desc vlv_power_wells[] = {
2952 	{
2953 		.name = "always-on",
2954 		.always_on = true,
2955 		.domains = POWER_DOMAIN_MASK,
2956 		.ops = &i9xx_always_on_power_well_ops,
2957 		.id = DISP_PW_ID_NONE,
2958 	},
2959 	{
2960 		.name = "display",
2961 		.domains = VLV_DISPLAY_POWER_DOMAINS,
2962 		.ops = &vlv_display_power_well_ops,
2963 		.id = VLV_DISP_PW_DISP2D,
2964 		{
2965 			.vlv.idx = PUNIT_PWGT_IDX_DISP2D,
2966 		},
2967 	},
2968 	{
2969 		.name = "dpio-tx-b-01",
2970 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2971 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2972 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2973 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2974 		.ops = &vlv_dpio_power_well_ops,
2975 		.id = DISP_PW_ID_NONE,
2976 		{
2977 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
2978 		},
2979 	},
2980 	{
2981 		.name = "dpio-tx-b-23",
2982 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2983 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2984 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2985 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2986 		.ops = &vlv_dpio_power_well_ops,
2987 		.id = DISP_PW_ID_NONE,
2988 		{
2989 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
2990 		},
2991 	},
2992 	{
2993 		.name = "dpio-tx-c-01",
2994 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2995 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2996 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2997 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2998 		.ops = &vlv_dpio_power_well_ops,
2999 		.id = DISP_PW_ID_NONE,
3000 		{
3001 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
3002 		},
3003 	},
3004 	{
3005 		.name = "dpio-tx-c-23",
3006 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3007 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3008 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3009 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3010 		.ops = &vlv_dpio_power_well_ops,
3011 		.id = DISP_PW_ID_NONE,
3012 		{
3013 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
3014 		},
3015 	},
3016 	{
3017 		.name = "dpio-common",
3018 		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
3019 		.ops = &vlv_dpio_cmn_power_well_ops,
3020 		.id = VLV_DISP_PW_DPIO_CMN_BC,
3021 		{
3022 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3023 		},
3024 	},
3025 };
3026 
3027 static const struct i915_power_well_desc chv_power_wells[] = {
3028 	{
3029 		.name = "always-on",
3030 		.always_on = true,
3031 		.domains = POWER_DOMAIN_MASK,
3032 		.ops = &i9xx_always_on_power_well_ops,
3033 		.id = DISP_PW_ID_NONE,
3034 	},
3035 	{
3036 		.name = "display",
3037 		/*
3038 		 * Pipe A power well is the new disp2d well. Pipe B and C
3039 		 * power wells don't actually exist. Pipe A power well is
3040 		 * required for any pipe to work.
3041 		 */
3042 		.domains = CHV_DISPLAY_POWER_DOMAINS,
3043 		.ops = &chv_pipe_power_well_ops,
3044 		.id = DISP_PW_ID_NONE,
3045 	},
3046 	{
3047 		.name = "dpio-common-bc",
3048 		.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
3049 		.ops = &chv_dpio_cmn_power_well_ops,
3050 		.id = VLV_DISP_PW_DPIO_CMN_BC,
3051 		{
3052 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3053 		},
3054 	},
3055 	{
3056 		.name = "dpio-common-d",
3057 		.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
3058 		.ops = &chv_dpio_cmn_power_well_ops,
3059 		.id = CHV_DISP_PW_DPIO_CMN_D,
3060 		{
3061 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
3062 		},
3063 	},
3064 };
3065 
3066 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
3067 					 enum i915_power_well_id power_well_id)
3068 {
3069 	struct i915_power_well *power_well;
3070 	bool ret;
3071 
3072 	power_well = lookup_power_well(dev_priv, power_well_id);
3073 	ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
3074 
3075 	return ret;
3076 }
3077 
3078 static const struct i915_power_well_desc skl_power_wells[] = {
3079 	{
3080 		.name = "always-on",
3081 		.always_on = true,
3082 		.domains = POWER_DOMAIN_MASK,
3083 		.ops = &i9xx_always_on_power_well_ops,
3084 		.id = DISP_PW_ID_NONE,
3085 	},
3086 	{
3087 		.name = "power well 1",
3088 		/* Handled by the DMC firmware */
3089 		.always_on = true,
3090 		.domains = 0,
3091 		.ops = &hsw_power_well_ops,
3092 		.id = SKL_DISP_PW_1,
3093 		{
3094 			.hsw.regs = &hsw_power_well_regs,
3095 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3096 			.hsw.has_fuses = true,
3097 		},
3098 	},
3099 	{
3100 		.name = "MISC IO power well",
3101 		/* Handled by the DMC firmware */
3102 		.always_on = true,
3103 		.domains = 0,
3104 		.ops = &hsw_power_well_ops,
3105 		.id = SKL_DISP_PW_MISC_IO,
3106 		{
3107 			.hsw.regs = &hsw_power_well_regs,
3108 			.hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
3109 		},
3110 	},
3111 	{
3112 		.name = "DC off",
3113 		.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
3114 		.ops = &gen9_dc_off_power_well_ops,
3115 		.id = SKL_DISP_DC_OFF,
3116 	},
3117 	{
3118 		.name = "power well 2",
3119 		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3120 		.ops = &hsw_power_well_ops,
3121 		.id = SKL_DISP_PW_2,
3122 		{
3123 			.hsw.regs = &hsw_power_well_regs,
3124 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3125 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3126 			.hsw.has_vga = true,
3127 			.hsw.has_fuses = true,
3128 		},
3129 	},
3130 	{
3131 		.name = "DDI A/E IO power well",
3132 		.domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
3133 		.ops = &hsw_power_well_ops,
3134 		.id = DISP_PW_ID_NONE,
3135 		{
3136 			.hsw.regs = &hsw_power_well_regs,
3137 			.hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
3138 		},
3139 	},
3140 	{
3141 		.name = "DDI B IO power well",
3142 		.domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3143 		.ops = &hsw_power_well_ops,
3144 		.id = DISP_PW_ID_NONE,
3145 		{
3146 			.hsw.regs = &hsw_power_well_regs,
3147 			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3148 		},
3149 	},
3150 	{
3151 		.name = "DDI C IO power well",
3152 		.domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3153 		.ops = &hsw_power_well_ops,
3154 		.id = DISP_PW_ID_NONE,
3155 		{
3156 			.hsw.regs = &hsw_power_well_regs,
3157 			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3158 		},
3159 	},
3160 	{
3161 		.name = "DDI D IO power well",
3162 		.domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
3163 		.ops = &hsw_power_well_ops,
3164 		.id = DISP_PW_ID_NONE,
3165 		{
3166 			.hsw.regs = &hsw_power_well_regs,
3167 			.hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3168 		},
3169 	},
3170 };
3171 
3172 static const struct i915_power_well_desc bxt_power_wells[] = {
3173 	{
3174 		.name = "always-on",
3175 		.always_on = true,
3176 		.domains = POWER_DOMAIN_MASK,
3177 		.ops = &i9xx_always_on_power_well_ops,
3178 		.id = DISP_PW_ID_NONE,
3179 	},
3180 	{
3181 		.name = "power well 1",
3182 		/* Handled by the DMC firmware */
3183 		.always_on = true,
3184 		.domains = 0,
3185 		.ops = &hsw_power_well_ops,
3186 		.id = SKL_DISP_PW_1,
3187 		{
3188 			.hsw.regs = &hsw_power_well_regs,
3189 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3190 			.hsw.has_fuses = true,
3191 		},
3192 	},
3193 	{
3194 		.name = "DC off",
3195 		.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
3196 		.ops = &gen9_dc_off_power_well_ops,
3197 		.id = SKL_DISP_DC_OFF,
3198 	},
3199 	{
3200 		.name = "power well 2",
3201 		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3202 		.ops = &hsw_power_well_ops,
3203 		.id = SKL_DISP_PW_2,
3204 		{
3205 			.hsw.regs = &hsw_power_well_regs,
3206 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3207 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3208 			.hsw.has_vga = true,
3209 			.hsw.has_fuses = true,
3210 		},
3211 	},
3212 	{
3213 		.name = "dpio-common-a",
3214 		.domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
3215 		.ops = &bxt_dpio_cmn_power_well_ops,
3216 		.id = BXT_DISP_PW_DPIO_CMN_A,
3217 		{
3218 			.bxt.phy = DPIO_PHY1,
3219 		},
3220 	},
3221 	{
3222 		.name = "dpio-common-bc",
3223 		.domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
3224 		.ops = &bxt_dpio_cmn_power_well_ops,
3225 		.id = VLV_DISP_PW_DPIO_CMN_BC,
3226 		{
3227 			.bxt.phy = DPIO_PHY0,
3228 		},
3229 	},
3230 };
3231 
3232 static const struct i915_power_well_desc glk_power_wells[] = {
3233 	{
3234 		.name = "always-on",
3235 		.always_on = true,
3236 		.domains = POWER_DOMAIN_MASK,
3237 		.ops = &i9xx_always_on_power_well_ops,
3238 		.id = DISP_PW_ID_NONE,
3239 	},
3240 	{
3241 		.name = "power well 1",
3242 		/* Handled by the DMC firmware */
3243 		.always_on = true,
3244 		.domains = 0,
3245 		.ops = &hsw_power_well_ops,
3246 		.id = SKL_DISP_PW_1,
3247 		{
3248 			.hsw.regs = &hsw_power_well_regs,
3249 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3250 			.hsw.has_fuses = true,
3251 		},
3252 	},
3253 	{
3254 		.name = "DC off",
3255 		.domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
3256 		.ops = &gen9_dc_off_power_well_ops,
3257 		.id = SKL_DISP_DC_OFF,
3258 	},
3259 	{
3260 		.name = "power well 2",
3261 		.domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3262 		.ops = &hsw_power_well_ops,
3263 		.id = SKL_DISP_PW_2,
3264 		{
3265 			.hsw.regs = &hsw_power_well_regs,
3266 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3267 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3268 			.hsw.has_vga = true,
3269 			.hsw.has_fuses = true,
3270 		},
3271 	},
3272 	{
3273 		.name = "dpio-common-a",
3274 		.domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
3275 		.ops = &bxt_dpio_cmn_power_well_ops,
3276 		.id = BXT_DISP_PW_DPIO_CMN_A,
3277 		{
3278 			.bxt.phy = DPIO_PHY1,
3279 		},
3280 	},
3281 	{
3282 		.name = "dpio-common-b",
3283 		.domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
3284 		.ops = &bxt_dpio_cmn_power_well_ops,
3285 		.id = VLV_DISP_PW_DPIO_CMN_BC,
3286 		{
3287 			.bxt.phy = DPIO_PHY0,
3288 		},
3289 	},
3290 	{
3291 		.name = "dpio-common-c",
3292 		.domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
3293 		.ops = &bxt_dpio_cmn_power_well_ops,
3294 		.id = GLK_DISP_PW_DPIO_CMN_C,
3295 		{
3296 			.bxt.phy = DPIO_PHY2,
3297 		},
3298 	},
3299 	{
3300 		.name = "AUX A",
3301 		.domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
3302 		.ops = &hsw_power_well_ops,
3303 		.id = DISP_PW_ID_NONE,
3304 		{
3305 			.hsw.regs = &hsw_power_well_regs,
3306 			.hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3307 		},
3308 	},
3309 	{
3310 		.name = "AUX B",
3311 		.domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
3312 		.ops = &hsw_power_well_ops,
3313 		.id = DISP_PW_ID_NONE,
3314 		{
3315 			.hsw.regs = &hsw_power_well_regs,
3316 			.hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3317 		},
3318 	},
3319 	{
3320 		.name = "AUX C",
3321 		.domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
3322 		.ops = &hsw_power_well_ops,
3323 		.id = DISP_PW_ID_NONE,
3324 		{
3325 			.hsw.regs = &hsw_power_well_regs,
3326 			.hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3327 		},
3328 	},
3329 	{
3330 		.name = "DDI A IO power well",
3331 		.domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
3332 		.ops = &hsw_power_well_ops,
3333 		.id = DISP_PW_ID_NONE,
3334 		{
3335 			.hsw.regs = &hsw_power_well_regs,
3336 			.hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3337 		},
3338 	},
3339 	{
3340 		.name = "DDI B IO power well",
3341 		.domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3342 		.ops = &hsw_power_well_ops,
3343 		.id = DISP_PW_ID_NONE,
3344 		{
3345 			.hsw.regs = &hsw_power_well_regs,
3346 			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3347 		},
3348 	},
3349 	{
3350 		.name = "DDI C IO power well",
3351 		.domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3352 		.ops = &hsw_power_well_ops,
3353 		.id = DISP_PW_ID_NONE,
3354 		{
3355 			.hsw.regs = &hsw_power_well_regs,
3356 			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3357 		},
3358 	},
3359 };
3360 
3361 static const struct i915_power_well_desc cnl_power_wells[] = {
3362 	{
3363 		.name = "always-on",
3364 		.always_on = true,
3365 		.domains = POWER_DOMAIN_MASK,
3366 		.ops = &i9xx_always_on_power_well_ops,
3367 		.id = DISP_PW_ID_NONE,
3368 	},
3369 	{
3370 		.name = "power well 1",
3371 		/* Handled by the DMC firmware */
3372 		.always_on = true,
3373 		.domains = 0,
3374 		.ops = &hsw_power_well_ops,
3375 		.id = SKL_DISP_PW_1,
3376 		{
3377 			.hsw.regs = &hsw_power_well_regs,
3378 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3379 			.hsw.has_fuses = true,
3380 		},
3381 	},
3382 	{
3383 		.name = "AUX A",
3384 		.domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
3385 		.ops = &hsw_power_well_ops,
3386 		.id = DISP_PW_ID_NONE,
3387 		{
3388 			.hsw.regs = &hsw_power_well_regs,
3389 			.hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3390 		},
3391 	},
3392 	{
3393 		.name = "AUX B",
3394 		.domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
3395 		.ops = &hsw_power_well_ops,
3396 		.id = DISP_PW_ID_NONE,
3397 		{
3398 			.hsw.regs = &hsw_power_well_regs,
3399 			.hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3400 		},
3401 	},
3402 	{
3403 		.name = "AUX C",
3404 		.domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
3405 		.ops = &hsw_power_well_ops,
3406 		.id = DISP_PW_ID_NONE,
3407 		{
3408 			.hsw.regs = &hsw_power_well_regs,
3409 			.hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3410 		},
3411 	},
3412 	{
3413 		.name = "AUX D",
3414 		.domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
3415 		.ops = &hsw_power_well_ops,
3416 		.id = DISP_PW_ID_NONE,
3417 		{
3418 			.hsw.regs = &hsw_power_well_regs,
3419 			.hsw.idx = CNL_PW_CTL_IDX_AUX_D,
3420 		},
3421 	},
3422 	{
3423 		.name = "DC off",
3424 		.domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
3425 		.ops = &gen9_dc_off_power_well_ops,
3426 		.id = SKL_DISP_DC_OFF,
3427 	},
3428 	{
3429 		.name = "power well 2",
3430 		.domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3431 		.ops = &hsw_power_well_ops,
3432 		.id = SKL_DISP_PW_2,
3433 		{
3434 			.hsw.regs = &hsw_power_well_regs,
3435 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3436 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3437 			.hsw.has_vga = true,
3438 			.hsw.has_fuses = true,
3439 		},
3440 	},
3441 	{
3442 		.name = "DDI A IO power well",
3443 		.domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
3444 		.ops = &hsw_power_well_ops,
3445 		.id = DISP_PW_ID_NONE,
3446 		{
3447 			.hsw.regs = &hsw_power_well_regs,
3448 			.hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3449 		},
3450 	},
3451 	{
3452 		.name = "DDI B IO power well",
3453 		.domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
3454 		.ops = &hsw_power_well_ops,
3455 		.id = DISP_PW_ID_NONE,
3456 		{
3457 			.hsw.regs = &hsw_power_well_regs,
3458 			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3459 		},
3460 	},
3461 	{
3462 		.name = "DDI C IO power well",
3463 		.domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
3464 		.ops = &hsw_power_well_ops,
3465 		.id = DISP_PW_ID_NONE,
3466 		{
3467 			.hsw.regs = &hsw_power_well_regs,
3468 			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3469 		},
3470 	},
3471 	{
3472 		.name = "DDI D IO power well",
3473 		.domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
3474 		.ops = &hsw_power_well_ops,
3475 		.id = DISP_PW_ID_NONE,
3476 		{
3477 			.hsw.regs = &hsw_power_well_regs,
3478 			.hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3479 		},
3480 	},
3481 	{
3482 		.name = "DDI F IO power well",
3483 		.domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
3484 		.ops = &hsw_power_well_ops,
3485 		.id = DISP_PW_ID_NONE,
3486 		{
3487 			.hsw.regs = &hsw_power_well_regs,
3488 			.hsw.idx = CNL_PW_CTL_IDX_DDI_F,
3489 		},
3490 	},
3491 	{
3492 		.name = "AUX F",
3493 		.domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
3494 		.ops = &hsw_power_well_ops,
3495 		.id = DISP_PW_ID_NONE,
3496 		{
3497 			.hsw.regs = &hsw_power_well_regs,
3498 			.hsw.idx = CNL_PW_CTL_IDX_AUX_F,
3499 		},
3500 	},
3501 };
3502 
3503 static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
3504 	.sync_hw = hsw_power_well_sync_hw,
3505 	.enable = icl_combo_phy_aux_power_well_enable,
3506 	.disable = icl_combo_phy_aux_power_well_disable,
3507 	.is_enabled = hsw_power_well_enabled,
3508 };
3509 
3510 static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
3511 	.sync_hw = hsw_power_well_sync_hw,
3512 	.enable = icl_tc_phy_aux_power_well_enable,
3513 	.disable = icl_tc_phy_aux_power_well_disable,
3514 	.is_enabled = hsw_power_well_enabled,
3515 };
3516 
3517 static const struct i915_power_well_regs icl_aux_power_well_regs = {
3518 	.bios	= ICL_PWR_WELL_CTL_AUX1,
3519 	.driver	= ICL_PWR_WELL_CTL_AUX2,
3520 	.debug	= ICL_PWR_WELL_CTL_AUX4,
3521 };
3522 
3523 static const struct i915_power_well_regs icl_ddi_power_well_regs = {
3524 	.bios	= ICL_PWR_WELL_CTL_DDI1,
3525 	.driver	= ICL_PWR_WELL_CTL_DDI2,
3526 	.debug	= ICL_PWR_WELL_CTL_DDI4,
3527 };
3528 
3529 static const struct i915_power_well_desc icl_power_wells[] = {
3530 	{
3531 		.name = "always-on",
3532 		.always_on = true,
3533 		.domains = POWER_DOMAIN_MASK,
3534 		.ops = &i9xx_always_on_power_well_ops,
3535 		.id = DISP_PW_ID_NONE,
3536 	},
3537 	{
3538 		.name = "power well 1",
3539 		/* Handled by the DMC firmware */
3540 		.always_on = true,
3541 		.domains = 0,
3542 		.ops = &hsw_power_well_ops,
3543 		.id = SKL_DISP_PW_1,
3544 		{
3545 			.hsw.regs = &hsw_power_well_regs,
3546 			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
3547 			.hsw.has_fuses = true,
3548 		},
3549 	},
3550 	{
3551 		.name = "DC off",
3552 		.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3553 		.ops = &gen9_dc_off_power_well_ops,
3554 		.id = SKL_DISP_DC_OFF,
3555 	},
3556 	{
3557 		.name = "power well 2",
3558 		.domains = ICL_PW_2_POWER_DOMAINS,
3559 		.ops = &hsw_power_well_ops,
3560 		.id = SKL_DISP_PW_2,
3561 		{
3562 			.hsw.regs = &hsw_power_well_regs,
3563 			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
3564 			.hsw.has_fuses = true,
3565 		},
3566 	},
3567 	{
3568 		.name = "power well 3",
3569 		.domains = ICL_PW_3_POWER_DOMAINS,
3570 		.ops = &hsw_power_well_ops,
3571 		.id = DISP_PW_ID_NONE,
3572 		{
3573 			.hsw.regs = &hsw_power_well_regs,
3574 			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
3575 			.hsw.irq_pipe_mask = BIT(PIPE_B),
3576 			.hsw.has_vga = true,
3577 			.hsw.has_fuses = true,
3578 		},
3579 	},
3580 	{
3581 		.name = "DDI A IO",
3582 		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
3583 		.ops = &hsw_power_well_ops,
3584 		.id = DISP_PW_ID_NONE,
3585 		{
3586 			.hsw.regs = &icl_ddi_power_well_regs,
3587 			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3588 		},
3589 	},
3590 	{
3591 		.name = "DDI B IO",
3592 		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
3593 		.ops = &hsw_power_well_ops,
3594 		.id = DISP_PW_ID_NONE,
3595 		{
3596 			.hsw.regs = &icl_ddi_power_well_regs,
3597 			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3598 		},
3599 	},
3600 	{
3601 		.name = "DDI C IO",
3602 		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
3603 		.ops = &hsw_power_well_ops,
3604 		.id = DISP_PW_ID_NONE,
3605 		{
3606 			.hsw.regs = &icl_ddi_power_well_regs,
3607 			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3608 		},
3609 	},
3610 	{
3611 		.name = "DDI D IO",
3612 		.domains = ICL_DDI_IO_D_POWER_DOMAINS,
3613 		.ops = &hsw_power_well_ops,
3614 		.id = DISP_PW_ID_NONE,
3615 		{
3616 			.hsw.regs = &icl_ddi_power_well_regs,
3617 			.hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3618 		},
3619 	},
3620 	{
3621 		.name = "DDI E IO",
3622 		.domains = ICL_DDI_IO_E_POWER_DOMAINS,
3623 		.ops = &hsw_power_well_ops,
3624 		.id = DISP_PW_ID_NONE,
3625 		{
3626 			.hsw.regs = &icl_ddi_power_well_regs,
3627 			.hsw.idx = ICL_PW_CTL_IDX_DDI_E,
3628 		},
3629 	},
3630 	{
3631 		.name = "DDI F IO",
3632 		.domains = ICL_DDI_IO_F_POWER_DOMAINS,
3633 		.ops = &hsw_power_well_ops,
3634 		.id = DISP_PW_ID_NONE,
3635 		{
3636 			.hsw.regs = &icl_ddi_power_well_regs,
3637 			.hsw.idx = ICL_PW_CTL_IDX_DDI_F,
3638 		},
3639 	},
3640 	{
3641 		.name = "AUX A",
3642 		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
3643 		.ops = &icl_combo_phy_aux_power_well_ops,
3644 		.id = DISP_PW_ID_NONE,
3645 		{
3646 			.hsw.regs = &icl_aux_power_well_regs,
3647 			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3648 		},
3649 	},
3650 	{
3651 		.name = "AUX B",
3652 		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
3653 		.ops = &icl_combo_phy_aux_power_well_ops,
3654 		.id = DISP_PW_ID_NONE,
3655 		{
3656 			.hsw.regs = &icl_aux_power_well_regs,
3657 			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3658 		},
3659 	},
3660 	{
3661 		.name = "AUX C TC1",
3662 		.domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
3663 		.ops = &icl_tc_phy_aux_power_well_ops,
3664 		.id = DISP_PW_ID_NONE,
3665 		{
3666 			.hsw.regs = &icl_aux_power_well_regs,
3667 			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3668 			.hsw.is_tc_tbt = false,
3669 		},
3670 	},
3671 	{
3672 		.name = "AUX D TC2",
3673 		.domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
3674 		.ops = &icl_tc_phy_aux_power_well_ops,
3675 		.id = DISP_PW_ID_NONE,
3676 		{
3677 			.hsw.regs = &icl_aux_power_well_regs,
3678 			.hsw.idx = ICL_PW_CTL_IDX_AUX_D,
3679 			.hsw.is_tc_tbt = false,
3680 		},
3681 	},
3682 	{
3683 		.name = "AUX E TC3",
3684 		.domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS,
3685 		.ops = &icl_tc_phy_aux_power_well_ops,
3686 		.id = DISP_PW_ID_NONE,
3687 		{
3688 			.hsw.regs = &icl_aux_power_well_regs,
3689 			.hsw.idx = ICL_PW_CTL_IDX_AUX_E,
3690 			.hsw.is_tc_tbt = false,
3691 		},
3692 	},
3693 	{
3694 		.name = "AUX F TC4",
3695 		.domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS,
3696 		.ops = &icl_tc_phy_aux_power_well_ops,
3697 		.id = DISP_PW_ID_NONE,
3698 		{
3699 			.hsw.regs = &icl_aux_power_well_regs,
3700 			.hsw.idx = ICL_PW_CTL_IDX_AUX_F,
3701 			.hsw.is_tc_tbt = false,
3702 		},
3703 	},
3704 	{
3705 		.name = "AUX C TBT1",
3706 		.domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS,
3707 		.ops = &icl_tc_phy_aux_power_well_ops,
3708 		.id = DISP_PW_ID_NONE,
3709 		{
3710 			.hsw.regs = &icl_aux_power_well_regs,
3711 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
3712 			.hsw.is_tc_tbt = true,
3713 		},
3714 	},
3715 	{
3716 		.name = "AUX D TBT2",
3717 		.domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS,
3718 		.ops = &icl_tc_phy_aux_power_well_ops,
3719 		.id = DISP_PW_ID_NONE,
3720 		{
3721 			.hsw.regs = &icl_aux_power_well_regs,
3722 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
3723 			.hsw.is_tc_tbt = true,
3724 		},
3725 	},
3726 	{
3727 		.name = "AUX E TBT3",
3728 		.domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS,
3729 		.ops = &icl_tc_phy_aux_power_well_ops,
3730 		.id = DISP_PW_ID_NONE,
3731 		{
3732 			.hsw.regs = &icl_aux_power_well_regs,
3733 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
3734 			.hsw.is_tc_tbt = true,
3735 		},
3736 	},
3737 	{
3738 		.name = "AUX F TBT4",
3739 		.domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS,
3740 		.ops = &icl_tc_phy_aux_power_well_ops,
3741 		.id = DISP_PW_ID_NONE,
3742 		{
3743 			.hsw.regs = &icl_aux_power_well_regs,
3744 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
3745 			.hsw.is_tc_tbt = true,
3746 		},
3747 	},
3748 	{
3749 		.name = "power well 4",
3750 		.domains = ICL_PW_4_POWER_DOMAINS,
3751 		.ops = &hsw_power_well_ops,
3752 		.id = DISP_PW_ID_NONE,
3753 		{
3754 			.hsw.regs = &hsw_power_well_regs,
3755 			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
3756 			.hsw.has_fuses = true,
3757 			.hsw.irq_pipe_mask = BIT(PIPE_C),
3758 		},
3759 	},
3760 };
3761 
3762 static const struct i915_power_well_desc ehl_power_wells[] = {
3763 	{
3764 		.name = "always-on",
3765 		.always_on = true,
3766 		.domains = POWER_DOMAIN_MASK,
3767 		.ops = &i9xx_always_on_power_well_ops,
3768 		.id = DISP_PW_ID_NONE,
3769 	},
3770 	{
3771 		.name = "power well 1",
3772 		/* Handled by the DMC firmware */
3773 		.always_on = true,
3774 		.domains = 0,
3775 		.ops = &hsw_power_well_ops,
3776 		.id = SKL_DISP_PW_1,
3777 		{
3778 			.hsw.regs = &hsw_power_well_regs,
3779 			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
3780 			.hsw.has_fuses = true,
3781 		},
3782 	},
3783 	{
3784 		.name = "DC off",
3785 		.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3786 		.ops = &gen9_dc_off_power_well_ops,
3787 		.id = SKL_DISP_DC_OFF,
3788 	},
3789 	{
3790 		.name = "power well 2",
3791 		.domains = ICL_PW_2_POWER_DOMAINS,
3792 		.ops = &hsw_power_well_ops,
3793 		.id = SKL_DISP_PW_2,
3794 		{
3795 			.hsw.regs = &hsw_power_well_regs,
3796 			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
3797 			.hsw.has_fuses = true,
3798 		},
3799 	},
3800 	{
3801 		.name = "power well 3",
3802 		.domains = ICL_PW_3_POWER_DOMAINS,
3803 		.ops = &hsw_power_well_ops,
3804 		.id = DISP_PW_ID_NONE,
3805 		{
3806 			.hsw.regs = &hsw_power_well_regs,
3807 			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
3808 			.hsw.irq_pipe_mask = BIT(PIPE_B),
3809 			.hsw.has_vga = true,
3810 			.hsw.has_fuses = true,
3811 		},
3812 	},
3813 	{
3814 		.name = "DDI A IO",
3815 		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
3816 		.ops = &hsw_power_well_ops,
3817 		.id = DISP_PW_ID_NONE,
3818 		{
3819 			.hsw.regs = &icl_ddi_power_well_regs,
3820 			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3821 		},
3822 	},
3823 	{
3824 		.name = "DDI B IO",
3825 		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
3826 		.ops = &hsw_power_well_ops,
3827 		.id = DISP_PW_ID_NONE,
3828 		{
3829 			.hsw.regs = &icl_ddi_power_well_regs,
3830 			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3831 		},
3832 	},
3833 	{
3834 		.name = "DDI C IO",
3835 		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
3836 		.ops = &hsw_power_well_ops,
3837 		.id = DISP_PW_ID_NONE,
3838 		{
3839 			.hsw.regs = &icl_ddi_power_well_regs,
3840 			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3841 		},
3842 	},
3843 	{
3844 		.name = "DDI D IO",
3845 		.domains = ICL_DDI_IO_D_POWER_DOMAINS,
3846 		.ops = &hsw_power_well_ops,
3847 		.id = DISP_PW_ID_NONE,
3848 		{
3849 			.hsw.regs = &icl_ddi_power_well_regs,
3850 			.hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3851 		},
3852 	},
3853 	{
3854 		.name = "AUX A",
3855 		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
3856 		.ops = &hsw_power_well_ops,
3857 		.id = DISP_PW_ID_NONE,
3858 		{
3859 			.hsw.regs = &icl_aux_power_well_regs,
3860 			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3861 		},
3862 	},
3863 	{
3864 		.name = "AUX B",
3865 		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
3866 		.ops = &hsw_power_well_ops,
3867 		.id = DISP_PW_ID_NONE,
3868 		{
3869 			.hsw.regs = &icl_aux_power_well_regs,
3870 			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3871 		},
3872 	},
3873 	{
3874 		.name = "AUX C",
3875 		.domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
3876 		.ops = &hsw_power_well_ops,
3877 		.id = DISP_PW_ID_NONE,
3878 		{
3879 			.hsw.regs = &icl_aux_power_well_regs,
3880 			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3881 		},
3882 	},
3883 	{
3884 		.name = "AUX D",
3885 		.domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
3886 		.ops = &hsw_power_well_ops,
3887 		.id = DISP_PW_ID_NONE,
3888 		{
3889 			.hsw.regs = &icl_aux_power_well_regs,
3890 			.hsw.idx = ICL_PW_CTL_IDX_AUX_D,
3891 		},
3892 	},
3893 	{
3894 		.name = "power well 4",
3895 		.domains = ICL_PW_4_POWER_DOMAINS,
3896 		.ops = &hsw_power_well_ops,
3897 		.id = DISP_PW_ID_NONE,
3898 		{
3899 			.hsw.regs = &hsw_power_well_regs,
3900 			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
3901 			.hsw.has_fuses = true,
3902 			.hsw.irq_pipe_mask = BIT(PIPE_C),
3903 		},
3904 	},
3905 };
3906 
3907 static const struct i915_power_well_desc tgl_power_wells[] = {
3908 	{
3909 		.name = "always-on",
3910 		.always_on = true,
3911 		.domains = POWER_DOMAIN_MASK,
3912 		.ops = &i9xx_always_on_power_well_ops,
3913 		.id = DISP_PW_ID_NONE,
3914 	},
3915 	{
3916 		.name = "power well 1",
3917 		/* Handled by the DMC firmware */
3918 		.always_on = true,
3919 		.domains = 0,
3920 		.ops = &hsw_power_well_ops,
3921 		.id = SKL_DISP_PW_1,
3922 		{
3923 			.hsw.regs = &hsw_power_well_regs,
3924 			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
3925 			.hsw.has_fuses = true,
3926 		},
3927 	},
3928 	{
3929 		.name = "DC off",
3930 		.domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS,
3931 		.ops = &gen9_dc_off_power_well_ops,
3932 		.id = SKL_DISP_DC_OFF,
3933 	},
3934 	{
3935 		.name = "power well 2",
3936 		.domains = TGL_PW_2_POWER_DOMAINS,
3937 		.ops = &hsw_power_well_ops,
3938 		.id = SKL_DISP_PW_2,
3939 		{
3940 			.hsw.regs = &hsw_power_well_regs,
3941 			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
3942 			.hsw.has_fuses = true,
3943 		},
3944 	},
3945 	{
3946 		.name = "power well 3",
3947 		.domains = TGL_PW_3_POWER_DOMAINS,
3948 		.ops = &hsw_power_well_ops,
3949 		.id = TGL_DISP_PW_3,
3950 		{
3951 			.hsw.regs = &hsw_power_well_regs,
3952 			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
3953 			.hsw.irq_pipe_mask = BIT(PIPE_B),
3954 			.hsw.has_vga = true,
3955 			.hsw.has_fuses = true,
3956 		},
3957 	},
3958 	{
3959 		.name = "DDI A IO",
3960 		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
3961 		.ops = &hsw_power_well_ops,
3962 		.id = DISP_PW_ID_NONE,
3963 		{
3964 			.hsw.regs = &icl_ddi_power_well_regs,
3965 			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3966 		}
3967 	},
3968 	{
3969 		.name = "DDI B IO",
3970 		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
3971 		.ops = &hsw_power_well_ops,
3972 		.id = DISP_PW_ID_NONE,
3973 		{
3974 			.hsw.regs = &icl_ddi_power_well_regs,
3975 			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3976 		}
3977 	},
3978 	{
3979 		.name = "DDI C IO",
3980 		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
3981 		.ops = &hsw_power_well_ops,
3982 		.id = DISP_PW_ID_NONE,
3983 		{
3984 			.hsw.regs = &icl_ddi_power_well_regs,
3985 			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3986 		}
3987 	},
3988 	{
3989 		.name = "DDI D TC1 IO",
3990 		.domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS,
3991 		.ops = &hsw_power_well_ops,
3992 		.id = DISP_PW_ID_NONE,
3993 		{
3994 			.hsw.regs = &icl_ddi_power_well_regs,
3995 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
3996 		},
3997 	},
3998 	{
3999 		.name = "DDI E TC2 IO",
4000 		.domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS,
4001 		.ops = &hsw_power_well_ops,
4002 		.id = DISP_PW_ID_NONE,
4003 		{
4004 			.hsw.regs = &icl_ddi_power_well_regs,
4005 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
4006 		},
4007 	},
4008 	{
4009 		.name = "DDI F TC3 IO",
4010 		.domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS,
4011 		.ops = &hsw_power_well_ops,
4012 		.id = DISP_PW_ID_NONE,
4013 		{
4014 			.hsw.regs = &icl_ddi_power_well_regs,
4015 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
4016 		},
4017 	},
4018 	{
4019 		.name = "DDI G TC4 IO",
4020 		.domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS,
4021 		.ops = &hsw_power_well_ops,
4022 		.id = DISP_PW_ID_NONE,
4023 		{
4024 			.hsw.regs = &icl_ddi_power_well_regs,
4025 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
4026 		},
4027 	},
4028 	{
4029 		.name = "DDI H TC5 IO",
4030 		.domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS,
4031 		.ops = &hsw_power_well_ops,
4032 		.id = DISP_PW_ID_NONE,
4033 		{
4034 			.hsw.regs = &icl_ddi_power_well_regs,
4035 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC5,
4036 		},
4037 	},
4038 	{
4039 		.name = "DDI I TC6 IO",
4040 		.domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS,
4041 		.ops = &hsw_power_well_ops,
4042 		.id = DISP_PW_ID_NONE,
4043 		{
4044 			.hsw.regs = &icl_ddi_power_well_regs,
4045 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC6,
4046 		},
4047 	},
4048 	{
4049 		.name = "AUX A",
4050 		.domains = TGL_AUX_A_IO_POWER_DOMAINS,
4051 		.ops = &hsw_power_well_ops,
4052 		.id = DISP_PW_ID_NONE,
4053 		{
4054 			.hsw.regs = &icl_aux_power_well_regs,
4055 			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
4056 		},
4057 	},
4058 	{
4059 		.name = "AUX B",
4060 		.domains = TGL_AUX_B_IO_POWER_DOMAINS,
4061 		.ops = &hsw_power_well_ops,
4062 		.id = DISP_PW_ID_NONE,
4063 		{
4064 			.hsw.regs = &icl_aux_power_well_regs,
4065 			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
4066 		},
4067 	},
4068 	{
4069 		.name = "AUX C",
4070 		.domains = TGL_AUX_C_IO_POWER_DOMAINS,
4071 		.ops = &hsw_power_well_ops,
4072 		.id = DISP_PW_ID_NONE,
4073 		{
4074 			.hsw.regs = &icl_aux_power_well_regs,
4075 			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
4076 		},
4077 	},
4078 	{
4079 		.name = "AUX D TC1",
4080 		.domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
4081 		.ops = &icl_tc_phy_aux_power_well_ops,
4082 		.id = DISP_PW_ID_NONE,
4083 		{
4084 			.hsw.regs = &icl_aux_power_well_regs,
4085 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
4086 			.hsw.is_tc_tbt = false,
4087 		},
4088 	},
4089 	{
4090 		.name = "AUX E TC2",
4091 		.domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
4092 		.ops = &icl_tc_phy_aux_power_well_ops,
4093 		.id = DISP_PW_ID_NONE,
4094 		{
4095 			.hsw.regs = &icl_aux_power_well_regs,
4096 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
4097 			.hsw.is_tc_tbt = false,
4098 		},
4099 	},
4100 	{
4101 		.name = "AUX F TC3",
4102 		.domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS,
4103 		.ops = &icl_tc_phy_aux_power_well_ops,
4104 		.id = DISP_PW_ID_NONE,
4105 		{
4106 			.hsw.regs = &icl_aux_power_well_regs,
4107 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
4108 			.hsw.is_tc_tbt = false,
4109 		},
4110 	},
4111 	{
4112 		.name = "AUX G TC4",
4113 		.domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS,
4114 		.ops = &icl_tc_phy_aux_power_well_ops,
4115 		.id = DISP_PW_ID_NONE,
4116 		{
4117 			.hsw.regs = &icl_aux_power_well_regs,
4118 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
4119 			.hsw.is_tc_tbt = false,
4120 		},
4121 	},
4122 	{
4123 		.name = "AUX H TC5",
4124 		.domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS,
4125 		.ops = &icl_tc_phy_aux_power_well_ops,
4126 		.id = DISP_PW_ID_NONE,
4127 		{
4128 			.hsw.regs = &icl_aux_power_well_regs,
4129 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC5,
4130 			.hsw.is_tc_tbt = false,
4131 		},
4132 	},
4133 	{
4134 		.name = "AUX I TC6",
4135 		.domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS,
4136 		.ops = &icl_tc_phy_aux_power_well_ops,
4137 		.id = DISP_PW_ID_NONE,
4138 		{
4139 			.hsw.regs = &icl_aux_power_well_regs,
4140 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC6,
4141 			.hsw.is_tc_tbt = false,
4142 		},
4143 	},
4144 	{
4145 		.name = "AUX D TBT1",
4146 		.domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS,
4147 		.ops = &icl_tc_phy_aux_power_well_ops,
4148 		.id = DISP_PW_ID_NONE,
4149 		{
4150 			.hsw.regs = &icl_aux_power_well_regs,
4151 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
4152 			.hsw.is_tc_tbt = true,
4153 		},
4154 	},
4155 	{
4156 		.name = "AUX E TBT2",
4157 		.domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS,
4158 		.ops = &icl_tc_phy_aux_power_well_ops,
4159 		.id = DISP_PW_ID_NONE,
4160 		{
4161 			.hsw.regs = &icl_aux_power_well_regs,
4162 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
4163 			.hsw.is_tc_tbt = true,
4164 		},
4165 	},
4166 	{
4167 		.name = "AUX F TBT3",
4168 		.domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS,
4169 		.ops = &icl_tc_phy_aux_power_well_ops,
4170 		.id = DISP_PW_ID_NONE,
4171 		{
4172 			.hsw.regs = &icl_aux_power_well_regs,
4173 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
4174 			.hsw.is_tc_tbt = true,
4175 		},
4176 	},
4177 	{
4178 		.name = "AUX G TBT4",
4179 		.domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS,
4180 		.ops = &icl_tc_phy_aux_power_well_ops,
4181 		.id = DISP_PW_ID_NONE,
4182 		{
4183 			.hsw.regs = &icl_aux_power_well_regs,
4184 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
4185 			.hsw.is_tc_tbt = true,
4186 		},
4187 	},
4188 	{
4189 		.name = "AUX H TBT5",
4190 		.domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS,
4191 		.ops = &icl_tc_phy_aux_power_well_ops,
4192 		.id = DISP_PW_ID_NONE,
4193 		{
4194 			.hsw.regs = &icl_aux_power_well_regs,
4195 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5,
4196 			.hsw.is_tc_tbt = true,
4197 		},
4198 	},
4199 	{
4200 		.name = "AUX I TBT6",
4201 		.domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS,
4202 		.ops = &icl_tc_phy_aux_power_well_ops,
4203 		.id = DISP_PW_ID_NONE,
4204 		{
4205 			.hsw.regs = &icl_aux_power_well_regs,
4206 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6,
4207 			.hsw.is_tc_tbt = true,
4208 		},
4209 	},
4210 	{
4211 		.name = "power well 4",
4212 		.domains = TGL_PW_4_POWER_DOMAINS,
4213 		.ops = &hsw_power_well_ops,
4214 		.id = DISP_PW_ID_NONE,
4215 		{
4216 			.hsw.regs = &hsw_power_well_regs,
4217 			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
4218 			.hsw.has_fuses = true,
4219 			.hsw.irq_pipe_mask = BIT(PIPE_C),
4220 		}
4221 	},
4222 	{
4223 		.name = "power well 5",
4224 		.domains = TGL_PW_5_POWER_DOMAINS,
4225 		.ops = &hsw_power_well_ops,
4226 		.id = DISP_PW_ID_NONE,
4227 		{
4228 			.hsw.regs = &hsw_power_well_regs,
4229 			.hsw.idx = TGL_PW_CTL_IDX_PW_5,
4230 			.hsw.has_fuses = true,
4231 			.hsw.irq_pipe_mask = BIT(PIPE_D),
4232 		},
4233 	},
4234 };
4235 
4236 static int
4237 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
4238 				   int disable_power_well)
4239 {
4240 	if (disable_power_well >= 0)
4241 		return !!disable_power_well;
4242 
4243 	return 1;
4244 }
4245 
4246 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
4247 			       int enable_dc)
4248 {
4249 	u32 mask;
4250 	int requested_dc;
4251 	int max_dc;
4252 
4253 	if (INTEL_GEN(dev_priv) >= 12) {
4254 		max_dc = 4;
4255 		/*
4256 		 * DC9 has a separate HW flow from the rest of the DC states,
4257 		 * not depending on the DMC firmware. It's needed by system
4258 		 * suspend/resume, so allow it unconditionally.
4259 		 */
4260 		mask = DC_STATE_EN_DC9;
4261 	} else if (IS_GEN(dev_priv, 11)) {
4262 		max_dc = 2;
4263 		mask = DC_STATE_EN_DC9;
4264 	} else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
4265 		max_dc = 2;
4266 		mask = 0;
4267 	} else if (IS_GEN9_LP(dev_priv)) {
4268 		max_dc = 1;
4269 		mask = DC_STATE_EN_DC9;
4270 	} else {
4271 		max_dc = 0;
4272 		mask = 0;
4273 	}
4274 
4275 	if (!i915_modparams.disable_power_well)
4276 		max_dc = 0;
4277 
4278 	if (enable_dc >= 0 && enable_dc <= max_dc) {
4279 		requested_dc = enable_dc;
4280 	} else if (enable_dc == -1) {
4281 		requested_dc = max_dc;
4282 	} else if (enable_dc > max_dc && enable_dc <= 4) {
4283 		drm_dbg_kms(&dev_priv->drm,
4284 			    "Adjusting requested max DC state (%d->%d)\n",
4285 			    enable_dc, max_dc);
4286 		requested_dc = max_dc;
4287 	} else {
4288 		drm_err(&dev_priv->drm,
4289 			"Unexpected value for enable_dc (%d)\n", enable_dc);
4290 		requested_dc = max_dc;
4291 	}
4292 
4293 	switch (requested_dc) {
4294 	case 4:
4295 		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
4296 		break;
4297 	case 3:
4298 		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
4299 		break;
4300 	case 2:
4301 		mask |= DC_STATE_EN_UPTO_DC6;
4302 		break;
4303 	case 1:
4304 		mask |= DC_STATE_EN_UPTO_DC5;
4305 		break;
4306 	}
4307 
4308 	drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
4309 
4310 	return mask;
4311 }
4312 
4313 static int
4314 __set_power_wells(struct i915_power_domains *power_domains,
4315 		  const struct i915_power_well_desc *power_well_descs,
4316 		  int power_well_count)
4317 {
4318 	u64 power_well_ids = 0;
4319 	int i;
4320 
4321 	power_domains->power_well_count = power_well_count;
4322 	power_domains->power_wells =
4323 				kcalloc(power_well_count,
4324 					sizeof(*power_domains->power_wells),
4325 					GFP_KERNEL);
4326 	if (!power_domains->power_wells)
4327 		return -ENOMEM;
4328 
4329 	for (i = 0; i < power_well_count; i++) {
4330 		enum i915_power_well_id id = power_well_descs[i].id;
4331 
4332 		power_domains->power_wells[i].desc = &power_well_descs[i];
4333 
4334 		if (id == DISP_PW_ID_NONE)
4335 			continue;
4336 
4337 		WARN_ON(id >= sizeof(power_well_ids) * 8);
4338 		WARN_ON(power_well_ids & BIT_ULL(id));
4339 		power_well_ids |= BIT_ULL(id);
4340 	}
4341 
4342 	return 0;
4343 }
4344 
4345 #define set_power_wells(power_domains, __power_well_descs) \
4346 	__set_power_wells(power_domains, __power_well_descs, \
4347 			  ARRAY_SIZE(__power_well_descs))
4348 
4349 /**
4350  * intel_power_domains_init - initializes the power domain structures
4351  * @dev_priv: i915 device instance
4352  *
4353  * Initializes the power domain structures for @dev_priv depending upon the
4354  * supported platform.
4355  */
4356 int intel_power_domains_init(struct drm_i915_private *dev_priv)
4357 {
4358 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4359 	int err;
4360 
4361 	i915_modparams.disable_power_well =
4362 		sanitize_disable_power_well_option(dev_priv,
4363 						   i915_modparams.disable_power_well);
4364 	dev_priv->csr.allowed_dc_mask =
4365 		get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
4366 
4367 	dev_priv->csr.target_dc_state =
4368 		sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
4369 
4370 	BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
4371 
4372 	rw_init(&power_domains->lock, "ipdl");
4373 
4374 	INIT_DELAYED_WORK(&power_domains->async_put_work,
4375 			  intel_display_power_put_async_work);
4376 
4377 	/*
4378 	 * The enabling order will be from lower to higher indexed wells,
4379 	 * the disabling order is reversed.
4380 	 */
4381 	if (IS_GEN(dev_priv, 12)) {
4382 		err = set_power_wells(power_domains, tgl_power_wells);
4383 	} else if (IS_ELKHARTLAKE(dev_priv)) {
4384 		err = set_power_wells(power_domains, ehl_power_wells);
4385 	} else if (IS_GEN(dev_priv, 11)) {
4386 		err = set_power_wells(power_domains, icl_power_wells);
4387 	} else if (IS_CANNONLAKE(dev_priv)) {
4388 		err = set_power_wells(power_domains, cnl_power_wells);
4389 
4390 		/*
4391 		 * DDI and Aux IO are getting enabled for all ports
4392 		 * regardless the presence or use. So, in order to avoid
4393 		 * timeouts, lets remove them from the list
4394 		 * for the SKUs without port F.
4395 		 */
4396 		if (!IS_CNL_WITH_PORT_F(dev_priv))
4397 			power_domains->power_well_count -= 2;
4398 	} else if (IS_GEMINILAKE(dev_priv)) {
4399 		err = set_power_wells(power_domains, glk_power_wells);
4400 	} else if (IS_BROXTON(dev_priv)) {
4401 		err = set_power_wells(power_domains, bxt_power_wells);
4402 	} else if (IS_GEN9_BC(dev_priv)) {
4403 		err = set_power_wells(power_domains, skl_power_wells);
4404 	} else if (IS_CHERRYVIEW(dev_priv)) {
4405 		err = set_power_wells(power_domains, chv_power_wells);
4406 	} else if (IS_BROADWELL(dev_priv)) {
4407 		err = set_power_wells(power_domains, bdw_power_wells);
4408 	} else if (IS_HASWELL(dev_priv)) {
4409 		err = set_power_wells(power_domains, hsw_power_wells);
4410 	} else if (IS_VALLEYVIEW(dev_priv)) {
4411 		err = set_power_wells(power_domains, vlv_power_wells);
4412 	} else if (IS_I830(dev_priv)) {
4413 		err = set_power_wells(power_domains, i830_power_wells);
4414 	} else {
4415 		err = set_power_wells(power_domains, i9xx_always_on_power_well);
4416 	}
4417 
4418 	return err;
4419 }
4420 
4421 /**
4422  * intel_power_domains_cleanup - clean up power domains resources
4423  * @dev_priv: i915 device instance
4424  *
4425  * Release any resources acquired by intel_power_domains_init()
4426  */
4427 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
4428 {
4429 	kfree(dev_priv->power_domains.power_wells);
4430 }
4431 
4432 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
4433 {
4434 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4435 	struct i915_power_well *power_well;
4436 
4437 	mutex_lock(&power_domains->lock);
4438 	for_each_power_well(dev_priv, power_well) {
4439 		power_well->desc->ops->sync_hw(dev_priv, power_well);
4440 		power_well->hw_enabled =
4441 			power_well->desc->ops->is_enabled(dev_priv, power_well);
4442 	}
4443 	mutex_unlock(&power_domains->lock);
4444 }
4445 
4446 static inline
4447 bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
4448 			  i915_reg_t reg, bool enable)
4449 {
4450 	u32 val, status;
4451 
4452 	val = intel_de_read(dev_priv, reg);
4453 	val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
4454 	intel_de_write(dev_priv, reg, val);
4455 	intel_de_posting_read(dev_priv, reg);
4456 	udelay(10);
4457 
4458 	status = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
4459 	if ((enable && !status) || (!enable && status)) {
4460 		drm_err(&dev_priv->drm, "DBus power %s timeout!\n",
4461 			enable ? "enable" : "disable");
4462 		return false;
4463 	}
4464 	return true;
4465 }
4466 
4467 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
4468 {
4469 	icl_dbuf_slices_update(dev_priv, BIT(DBUF_S1));
4470 }
4471 
4472 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
4473 {
4474 	icl_dbuf_slices_update(dev_priv, 0);
4475 }
4476 
4477 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
4478 			    u8 req_slices)
4479 {
4480 	int i;
4481 	int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
4482 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4483 
4484 	drm_WARN(&dev_priv->drm, hweight8(req_slices) > max_slices,
4485 		 "Invalid number of dbuf slices requested\n");
4486 
4487 	DRM_DEBUG_KMS("Updating dbuf slices to 0x%x\n", req_slices);
4488 
4489 	/*
4490 	 * Might be running this in parallel to gen9_dc_off_power_well_enable
4491 	 * being called from intel_dp_detect for instance,
4492 	 * which causes assertion triggered by race condition,
4493 	 * as gen9_assert_dbuf_enabled might preempt this when registers
4494 	 * were already updated, while dev_priv was not.
4495 	 */
4496 	mutex_lock(&power_domains->lock);
4497 
4498 	for (i = 0; i < max_slices; i++) {
4499 		intel_dbuf_slice_set(dev_priv,
4500 				     DBUF_CTL_S(i),
4501 				     (req_slices & BIT(i)) != 0);
4502 	}
4503 
4504 	dev_priv->enabled_dbuf_slices_mask = req_slices;
4505 
4506 	mutex_unlock(&power_domains->lock);
4507 }
4508 
4509 static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
4510 {
4511 	skl_ddb_get_hw_state(dev_priv);
4512 	/*
4513 	 * Just power up at least 1 slice, we will
4514 	 * figure out later which slices we have and what we need.
4515 	 */
4516 	icl_dbuf_slices_update(dev_priv, dev_priv->enabled_dbuf_slices_mask |
4517 			       BIT(DBUF_S1));
4518 }
4519 
4520 static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
4521 {
4522 	icl_dbuf_slices_update(dev_priv, 0);
4523 }
4524 
4525 static void icl_mbus_init(struct drm_i915_private *dev_priv)
4526 {
4527 	u32 mask, val;
4528 
4529 	mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
4530 		MBUS_ABOX_BT_CREDIT_POOL2_MASK |
4531 		MBUS_ABOX_B_CREDIT_MASK |
4532 		MBUS_ABOX_BW_CREDIT_MASK;
4533 	val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
4534 		MBUS_ABOX_BT_CREDIT_POOL2(16) |
4535 		MBUS_ABOX_B_CREDIT(1) |
4536 		MBUS_ABOX_BW_CREDIT(1);
4537 
4538 	intel_de_rmw(dev_priv, MBUS_ABOX_CTL, mask, val);
4539 	if (INTEL_GEN(dev_priv) >= 12) {
4540 		intel_de_rmw(dev_priv, MBUS_ABOX1_CTL, mask, val);
4541 		intel_de_rmw(dev_priv, MBUS_ABOX2_CTL, mask, val);
4542 	}
4543 }
4544 
4545 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
4546 {
4547 	u32 val = intel_de_read(dev_priv, LCPLL_CTL);
4548 
4549 	/*
4550 	 * The LCPLL register should be turned on by the BIOS. For now
4551 	 * let's just check its state and print errors in case
4552 	 * something is wrong.  Don't even try to turn it on.
4553 	 */
4554 
4555 	if (val & LCPLL_CD_SOURCE_FCLK)
4556 		drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
4557 
4558 	if (val & LCPLL_PLL_DISABLE)
4559 		drm_err(&dev_priv->drm, "LCPLL is disabled\n");
4560 
4561 	if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
4562 		drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
4563 }
4564 
4565 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
4566 {
4567 	struct drm_device *dev = &dev_priv->drm;
4568 	struct intel_crtc *crtc;
4569 
4570 	for_each_intel_crtc(dev, crtc)
4571 		I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
4572 				pipe_name(crtc->pipe));
4573 
4574 	I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2),
4575 			"Display power well on\n");
4576 	I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE,
4577 			"SPLL enabled\n");
4578 	I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
4579 			"WRPLL1 enabled\n");
4580 	I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
4581 			"WRPLL2 enabled\n");
4582 	I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON,
4583 			"Panel power on\n");
4584 	I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
4585 			"CPU PWM1 enabled\n");
4586 	if (IS_HASWELL(dev_priv))
4587 		I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
4588 				"CPU PWM2 enabled\n");
4589 	I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
4590 			"PCH PWM1 enabled\n");
4591 	I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
4592 			"Utility pin enabled\n");
4593 	I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,
4594 			"PCH GTC enabled\n");
4595 
4596 	/*
4597 	 * In theory we can still leave IRQs enabled, as long as only the HPD
4598 	 * interrupts remain enabled. We used to check for that, but since it's
4599 	 * gen-specific and since we only disable LCPLL after we fully disable
4600 	 * the interrupts, the check below should be enough.
4601 	 */
4602 	I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
4603 }
4604 
4605 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
4606 {
4607 	if (IS_HASWELL(dev_priv))
4608 		return intel_de_read(dev_priv, D_COMP_HSW);
4609 	else
4610 		return intel_de_read(dev_priv, D_COMP_BDW);
4611 }
4612 
4613 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
4614 {
4615 	if (IS_HASWELL(dev_priv)) {
4616 		if (sandybridge_pcode_write(dev_priv,
4617 					    GEN6_PCODE_WRITE_D_COMP, val))
4618 			drm_dbg_kms(&dev_priv->drm,
4619 				    "Failed to write to D_COMP\n");
4620 	} else {
4621 		intel_de_write(dev_priv, D_COMP_BDW, val);
4622 		intel_de_posting_read(dev_priv, D_COMP_BDW);
4623 	}
4624 }
4625 
4626 /*
4627  * This function implements pieces of two sequences from BSpec:
4628  * - Sequence for display software to disable LCPLL
4629  * - Sequence for display software to allow package C8+
4630  * The steps implemented here are just the steps that actually touch the LCPLL
4631  * register. Callers should take care of disabling all the display engine
4632  * functions, doing the mode unset, fixing interrupts, etc.
4633  */
4634 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
4635 			      bool switch_to_fclk, bool allow_power_down)
4636 {
4637 	u32 val;
4638 
4639 	assert_can_disable_lcpll(dev_priv);
4640 
4641 	val = intel_de_read(dev_priv, LCPLL_CTL);
4642 
4643 	if (switch_to_fclk) {
4644 		val |= LCPLL_CD_SOURCE_FCLK;
4645 		intel_de_write(dev_priv, LCPLL_CTL, val);
4646 
4647 		if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
4648 				LCPLL_CD_SOURCE_FCLK_DONE, 1))
4649 			drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
4650 
4651 		val = intel_de_read(dev_priv, LCPLL_CTL);
4652 	}
4653 
4654 	val |= LCPLL_PLL_DISABLE;
4655 	intel_de_write(dev_priv, LCPLL_CTL, val);
4656 	intel_de_posting_read(dev_priv, LCPLL_CTL);
4657 
4658 	if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
4659 		drm_err(&dev_priv->drm, "LCPLL still locked\n");
4660 
4661 	val = hsw_read_dcomp(dev_priv);
4662 	val |= D_COMP_COMP_DISABLE;
4663 	hsw_write_dcomp(dev_priv, val);
4664 	ndelay(100);
4665 
4666 	if (wait_for((hsw_read_dcomp(dev_priv) &
4667 		      D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
4668 		drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
4669 
4670 	if (allow_power_down) {
4671 		val = intel_de_read(dev_priv, LCPLL_CTL);
4672 		val |= LCPLL_POWER_DOWN_ALLOW;
4673 		intel_de_write(dev_priv, LCPLL_CTL, val);
4674 		intel_de_posting_read(dev_priv, LCPLL_CTL);
4675 	}
4676 }
4677 
4678 /*
4679  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
4680  * source.
4681  */
4682 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
4683 {
4684 	u32 val;
4685 
4686 	val = intel_de_read(dev_priv, LCPLL_CTL);
4687 
4688 	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
4689 		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
4690 		return;
4691 
4692 	/*
4693 	 * Make sure we're not on PC8 state before disabling PC8, otherwise
4694 	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
4695 	 */
4696 	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
4697 
4698 	if (val & LCPLL_POWER_DOWN_ALLOW) {
4699 		val &= ~LCPLL_POWER_DOWN_ALLOW;
4700 		intel_de_write(dev_priv, LCPLL_CTL, val);
4701 		intel_de_posting_read(dev_priv, LCPLL_CTL);
4702 	}
4703 
4704 	val = hsw_read_dcomp(dev_priv);
4705 	val |= D_COMP_COMP_FORCE;
4706 	val &= ~D_COMP_COMP_DISABLE;
4707 	hsw_write_dcomp(dev_priv, val);
4708 
4709 	val = intel_de_read(dev_priv, LCPLL_CTL);
4710 	val &= ~LCPLL_PLL_DISABLE;
4711 	intel_de_write(dev_priv, LCPLL_CTL, val);
4712 
4713 	if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
4714 		drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
4715 
4716 	if (val & LCPLL_CD_SOURCE_FCLK) {
4717 		val = intel_de_read(dev_priv, LCPLL_CTL);
4718 		val &= ~LCPLL_CD_SOURCE_FCLK;
4719 		intel_de_write(dev_priv, LCPLL_CTL, val);
4720 
4721 		if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
4722 				 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
4723 			drm_err(&dev_priv->drm,
4724 				"Switching back to LCPLL failed\n");
4725 	}
4726 
4727 	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
4728 
4729 	intel_update_cdclk(dev_priv);
4730 	intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK");
4731 }
4732 
4733 /*
4734  * Package states C8 and deeper are really deep PC states that can only be
4735  * reached when all the devices on the system allow it, so even if the graphics
4736  * device allows PC8+, it doesn't mean the system will actually get to these
4737  * states. Our driver only allows PC8+ when going into runtime PM.
4738  *
4739  * The requirements for PC8+ are that all the outputs are disabled, the power
4740  * well is disabled and most interrupts are disabled, and these are also
4741  * requirements for runtime PM. When these conditions are met, we manually do
4742  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
4743  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
4744  * hang the machine.
4745  *
4746  * When we really reach PC8 or deeper states (not just when we allow it) we lose
4747  * the state of some registers, so when we come back from PC8+ we need to
4748  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
4749  * need to take care of the registers kept by RC6. Notice that this happens even
4750  * if we don't put the device in PCI D3 state (which is what currently happens
4751  * because of the runtime PM support).
4752  *
4753  * For more, read "Display Sequences for Package C8" on the hardware
4754  * documentation.
4755  */
4756 static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
4757 {
4758 	u32 val;
4759 
4760 	drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
4761 
4762 	if (HAS_PCH_LPT_LP(dev_priv)) {
4763 		val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
4764 		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
4765 		intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
4766 	}
4767 
4768 	lpt_disable_clkout_dp(dev_priv);
4769 	hsw_disable_lcpll(dev_priv, true, true);
4770 }
4771 
4772 static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
4773 {
4774 	u32 val;
4775 
4776 	drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
4777 
4778 	hsw_restore_lcpll(dev_priv);
4779 	intel_init_pch_refclk(dev_priv);
4780 
4781 	if (HAS_PCH_LPT_LP(dev_priv)) {
4782 		val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
4783 		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
4784 		intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
4785 	}
4786 }
4787 
4788 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
4789 				      bool enable)
4790 {
4791 	i915_reg_t reg;
4792 	u32 reset_bits, val;
4793 
4794 	if (IS_IVYBRIDGE(dev_priv)) {
4795 		reg = GEN7_MSG_CTL;
4796 		reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
4797 	} else {
4798 		reg = HSW_NDE_RSTWRN_OPT;
4799 		reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
4800 	}
4801 
4802 	val = intel_de_read(dev_priv, reg);
4803 
4804 	if (enable)
4805 		val |= reset_bits;
4806 	else
4807 		val &= ~reset_bits;
4808 
4809 	intel_de_write(dev_priv, reg, val);
4810 }
4811 
4812 static void skl_display_core_init(struct drm_i915_private *dev_priv,
4813 				  bool resume)
4814 {
4815 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4816 	struct i915_power_well *well;
4817 
4818 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4819 
4820 	/* enable PCH reset handshake */
4821 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4822 
4823 	/* enable PG1 and Misc I/O */
4824 	mutex_lock(&power_domains->lock);
4825 
4826 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4827 	intel_power_well_enable(dev_priv, well);
4828 
4829 	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
4830 	intel_power_well_enable(dev_priv, well);
4831 
4832 	mutex_unlock(&power_domains->lock);
4833 
4834 	intel_cdclk_init_hw(dev_priv);
4835 
4836 	gen9_dbuf_enable(dev_priv);
4837 
4838 	if (resume && dev_priv->csr.dmc_payload)
4839 		intel_csr_load_program(dev_priv);
4840 }
4841 
4842 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
4843 {
4844 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4845 	struct i915_power_well *well;
4846 
4847 	gen9_disable_dc_states(dev_priv);
4848 
4849 	gen9_dbuf_disable(dev_priv);
4850 
4851 	intel_cdclk_uninit_hw(dev_priv);
4852 
4853 	/* The spec doesn't call for removing the reset handshake flag */
4854 	/* disable PG1 and Misc I/O */
4855 
4856 	mutex_lock(&power_domains->lock);
4857 
4858 	/*
4859 	 * BSpec says to keep the MISC IO power well enabled here, only
4860 	 * remove our request for power well 1.
4861 	 * Note that even though the driver's request is removed power well 1
4862 	 * may stay enabled after this due to DMC's own request on it.
4863 	 */
4864 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4865 	intel_power_well_disable(dev_priv, well);
4866 
4867 	mutex_unlock(&power_domains->lock);
4868 
4869 	usleep_range(10, 30);		/* 10 us delay per Bspec */
4870 }
4871 
4872 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
4873 {
4874 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4875 	struct i915_power_well *well;
4876 
4877 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4878 
4879 	/*
4880 	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
4881 	 * or else the reset will hang because there is no PCH to respond.
4882 	 * Move the handshake programming to initialization sequence.
4883 	 * Previously was left up to BIOS.
4884 	 */
4885 	intel_pch_reset_handshake(dev_priv, false);
4886 
4887 	/* Enable PG1 */
4888 	mutex_lock(&power_domains->lock);
4889 
4890 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4891 	intel_power_well_enable(dev_priv, well);
4892 
4893 	mutex_unlock(&power_domains->lock);
4894 
4895 	intel_cdclk_init_hw(dev_priv);
4896 
4897 	gen9_dbuf_enable(dev_priv);
4898 
4899 	if (resume && dev_priv->csr.dmc_payload)
4900 		intel_csr_load_program(dev_priv);
4901 }
4902 
4903 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
4904 {
4905 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4906 	struct i915_power_well *well;
4907 
4908 	gen9_disable_dc_states(dev_priv);
4909 
4910 	gen9_dbuf_disable(dev_priv);
4911 
4912 	intel_cdclk_uninit_hw(dev_priv);
4913 
4914 	/* The spec doesn't call for removing the reset handshake flag */
4915 
4916 	/*
4917 	 * Disable PW1 (PG1).
4918 	 * Note that even though the driver's request is removed power well 1
4919 	 * may stay enabled after this due to DMC's own request on it.
4920 	 */
4921 	mutex_lock(&power_domains->lock);
4922 
4923 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4924 	intel_power_well_disable(dev_priv, well);
4925 
4926 	mutex_unlock(&power_domains->lock);
4927 
4928 	usleep_range(10, 30);		/* 10 us delay per Bspec */
4929 }
4930 
4931 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
4932 {
4933 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4934 	struct i915_power_well *well;
4935 
4936 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4937 
4938 	/* 1. Enable PCH Reset Handshake */
4939 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4940 
4941 	/* 2-3. */
4942 	intel_combo_phy_init(dev_priv);
4943 
4944 	/*
4945 	 * 4. Enable Power Well 1 (PG1).
4946 	 *    The AUX IO power wells will be enabled on demand.
4947 	 */
4948 	mutex_lock(&power_domains->lock);
4949 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4950 	intel_power_well_enable(dev_priv, well);
4951 	mutex_unlock(&power_domains->lock);
4952 
4953 	/* 5. Enable CD clock */
4954 	intel_cdclk_init_hw(dev_priv);
4955 
4956 	/* 6. Enable DBUF */
4957 	gen9_dbuf_enable(dev_priv);
4958 
4959 	if (resume && dev_priv->csr.dmc_payload)
4960 		intel_csr_load_program(dev_priv);
4961 }
4962 
4963 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
4964 {
4965 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4966 	struct i915_power_well *well;
4967 
4968 	gen9_disable_dc_states(dev_priv);
4969 
4970 	/* 1. Disable all display engine functions -> aready done */
4971 
4972 	/* 2. Disable DBUF */
4973 	gen9_dbuf_disable(dev_priv);
4974 
4975 	/* 3. Disable CD clock */
4976 	intel_cdclk_uninit_hw(dev_priv);
4977 
4978 	/*
4979 	 * 4. Disable Power Well 1 (PG1).
4980 	 *    The AUX IO power wells are toggled on demand, so they are already
4981 	 *    disabled at this point.
4982 	 */
4983 	mutex_lock(&power_domains->lock);
4984 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4985 	intel_power_well_disable(dev_priv, well);
4986 	mutex_unlock(&power_domains->lock);
4987 
4988 	usleep_range(10, 30);		/* 10 us delay per Bspec */
4989 
4990 	/* 5. */
4991 	intel_combo_phy_uninit(dev_priv);
4992 }
4993 
4994 struct buddy_page_mask {
4995 	u32 page_mask;
4996 	u8 type;
4997 	u8 num_channels;
4998 };
4999 
5000 static const struct buddy_page_mask tgl_buddy_page_masks[] = {
5001 	{ .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0xE },
5002 	{ .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0xF },
5003 	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
5004 	{ .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1F },
5005 	{}
5006 };
5007 
5008 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
5009 	{ .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
5010 	{ .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1 },
5011 	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
5012 	{ .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x3 },
5013 	{}
5014 };
5015 
5016 static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
5017 {
5018 	enum intel_dram_type type = dev_priv->dram_info.type;
5019 	u8 num_channels = dev_priv->dram_info.num_channels;
5020 	const struct buddy_page_mask *table;
5021 	int i;
5022 
5023 	if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0))
5024 		/* Wa_1409767108: tgl */
5025 		table = wa_1409767108_buddy_page_masks;
5026 	else
5027 		table = tgl_buddy_page_masks;
5028 
5029 	for (i = 0; table[i].page_mask != 0; i++)
5030 		if (table[i].num_channels == num_channels &&
5031 		    table[i].type == type)
5032 			break;
5033 
5034 	if (table[i].page_mask == 0) {
5035 		drm_dbg(&dev_priv->drm,
5036 			"Unknown memory configuration; disabling address buddy logic.\n");
5037 		intel_de_write(dev_priv, BW_BUDDY1_CTL, BW_BUDDY_DISABLE);
5038 		intel_de_write(dev_priv, BW_BUDDY2_CTL, BW_BUDDY_DISABLE);
5039 	} else {
5040 		intel_de_write(dev_priv, BW_BUDDY1_PAGE_MASK,
5041 			       table[i].page_mask);
5042 		intel_de_write(dev_priv, BW_BUDDY2_PAGE_MASK,
5043 			       table[i].page_mask);
5044 
5045 		/* Wa_22010178259:tgl */
5046 		intel_de_rmw(dev_priv, BW_BUDDY1_CTL,
5047 			     BW_BUDDY_TLB_REQ_TIMER_MASK,
5048 			     REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8));
5049 		intel_de_rmw(dev_priv, BW_BUDDY2_CTL,
5050 			     BW_BUDDY_TLB_REQ_TIMER_MASK,
5051 			     REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8));
5052 	}
5053 }
5054 
5055 static void icl_display_core_init(struct drm_i915_private *dev_priv,
5056 				  bool resume)
5057 {
5058 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5059 	struct i915_power_well *well;
5060 
5061 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5062 
5063 	/* 1. Enable PCH reset handshake. */
5064 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5065 
5066 	/* 2. Initialize all combo phys */
5067 	intel_combo_phy_init(dev_priv);
5068 
5069 	/*
5070 	 * 3. Enable Power Well 1 (PG1).
5071 	 *    The AUX IO power wells will be enabled on demand.
5072 	 */
5073 	mutex_lock(&power_domains->lock);
5074 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5075 	intel_power_well_enable(dev_priv, well);
5076 	mutex_unlock(&power_domains->lock);
5077 
5078 	/* 4. Enable CDCLK. */
5079 	intel_cdclk_init_hw(dev_priv);
5080 
5081 	/* 5. Enable DBUF. */
5082 	icl_dbuf_enable(dev_priv);
5083 
5084 	/* 6. Setup MBUS. */
5085 	icl_mbus_init(dev_priv);
5086 
5087 	/* 7. Program arbiter BW_BUDDY registers */
5088 	if (INTEL_GEN(dev_priv) >= 12)
5089 		tgl_bw_buddy_init(dev_priv);
5090 
5091 	if (resume && dev_priv->csr.dmc_payload)
5092 		intel_csr_load_program(dev_priv);
5093 }
5094 
5095 static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
5096 {
5097 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5098 	struct i915_power_well *well;
5099 
5100 	gen9_disable_dc_states(dev_priv);
5101 
5102 	/* 1. Disable all display engine functions -> aready done */
5103 
5104 	/* 2. Disable DBUF */
5105 	icl_dbuf_disable(dev_priv);
5106 
5107 	/* 3. Disable CD clock */
5108 	intel_cdclk_uninit_hw(dev_priv);
5109 
5110 	/*
5111 	 * 4. Disable Power Well 1 (PG1).
5112 	 *    The AUX IO power wells are toggled on demand, so they are already
5113 	 *    disabled at this point.
5114 	 */
5115 	mutex_lock(&power_domains->lock);
5116 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5117 	intel_power_well_disable(dev_priv, well);
5118 	mutex_unlock(&power_domains->lock);
5119 
5120 	/* 5. */
5121 	intel_combo_phy_uninit(dev_priv);
5122 }
5123 
5124 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
5125 {
5126 	struct i915_power_well *cmn_bc =
5127 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5128 	struct i915_power_well *cmn_d =
5129 		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
5130 
5131 	/*
5132 	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
5133 	 * workaround never ever read DISPLAY_PHY_CONTROL, and
5134 	 * instead maintain a shadow copy ourselves. Use the actual
5135 	 * power well state and lane status to reconstruct the
5136 	 * expected initial value.
5137 	 */
5138 	dev_priv->chv_phy_control =
5139 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
5140 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
5141 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
5142 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
5143 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
5144 
5145 	/*
5146 	 * If all lanes are disabled we leave the override disabled
5147 	 * with all power down bits cleared to match the state we
5148 	 * would use after disabling the port. Otherwise enable the
5149 	 * override and set the lane powerdown bits accding to the
5150 	 * current lane status.
5151 	 */
5152 	if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
5153 		u32 status = intel_de_read(dev_priv, DPLL(PIPE_A));
5154 		unsigned int mask;
5155 
5156 		mask = status & DPLL_PORTB_READY_MASK;
5157 		if (mask == 0xf)
5158 			mask = 0x0;
5159 		else
5160 			dev_priv->chv_phy_control |=
5161 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
5162 
5163 		dev_priv->chv_phy_control |=
5164 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
5165 
5166 		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
5167 		if (mask == 0xf)
5168 			mask = 0x0;
5169 		else
5170 			dev_priv->chv_phy_control |=
5171 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
5172 
5173 		dev_priv->chv_phy_control |=
5174 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
5175 
5176 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
5177 
5178 		dev_priv->chv_phy_assert[DPIO_PHY0] = false;
5179 	} else {
5180 		dev_priv->chv_phy_assert[DPIO_PHY0] = true;
5181 	}
5182 
5183 	if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
5184 		u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
5185 		unsigned int mask;
5186 
5187 		mask = status & DPLL_PORTD_READY_MASK;
5188 
5189 		if (mask == 0xf)
5190 			mask = 0x0;
5191 		else
5192 			dev_priv->chv_phy_control |=
5193 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
5194 
5195 		dev_priv->chv_phy_control |=
5196 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
5197 
5198 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
5199 
5200 		dev_priv->chv_phy_assert[DPIO_PHY1] = false;
5201 	} else {
5202 		dev_priv->chv_phy_assert[DPIO_PHY1] = true;
5203 	}
5204 
5205 	drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
5206 		    dev_priv->chv_phy_control);
5207 
5208 	/* Defer application of initial phy_control to enabling the powerwell */
5209 }
5210 
5211 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
5212 {
5213 	struct i915_power_well *cmn =
5214 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5215 	struct i915_power_well *disp2d =
5216 		lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
5217 
5218 	/* If the display might be already active skip this */
5219 	if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
5220 	    disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
5221 	    intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
5222 		return;
5223 
5224 	drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
5225 
5226 	/* cmnlane needs DPLL registers */
5227 	disp2d->desc->ops->enable(dev_priv, disp2d);
5228 
5229 	/*
5230 	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
5231 	 * Need to assert and de-assert PHY SB reset by gating the
5232 	 * common lane power, then un-gating it.
5233 	 * Simply ungating isn't enough to reset the PHY enough to get
5234 	 * ports and lanes running.
5235 	 */
5236 	cmn->desc->ops->disable(dev_priv, cmn);
5237 }
5238 
5239 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
5240 {
5241 	bool ret;
5242 
5243 	vlv_punit_get(dev_priv);
5244 	ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
5245 	vlv_punit_put(dev_priv);
5246 
5247 	return ret;
5248 }
5249 
5250 static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
5251 {
5252 	drm_WARN(&dev_priv->drm,
5253 		 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
5254 		 "VED not power gated\n");
5255 }
5256 
5257 static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
5258 {
5259 #ifdef notyet
5260 	static const struct pci_device_id isp_ids[] = {
5261 		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
5262 		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
5263 		{}
5264 	};
5265 
5266 	drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
5267 		 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
5268 		 "ISP not power gated\n");
5269 #endif
5270 }
5271 
5272 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
5273 
5274 /**
5275  * intel_power_domains_init_hw - initialize hardware power domain state
5276  * @i915: i915 device instance
5277  * @resume: Called from resume code paths or not
5278  *
5279  * This function initializes the hardware power domain state and enables all
5280  * power wells belonging to the INIT power domain. Power wells in other
5281  * domains (and not in the INIT domain) are referenced or disabled by
5282  * intel_modeset_readout_hw_state(). After that the reference count of each
5283  * power well must match its HW enabled state, see
5284  * intel_power_domains_verify_state().
5285  *
5286  * It will return with power domains disabled (to be enabled later by
5287  * intel_power_domains_enable()) and must be paired with
5288  * intel_power_domains_driver_remove().
5289  */
5290 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
5291 {
5292 	struct i915_power_domains *power_domains = &i915->power_domains;
5293 
5294 	power_domains->initializing = true;
5295 
5296 	if (INTEL_GEN(i915) >= 11) {
5297 		icl_display_core_init(i915, resume);
5298 	} else if (IS_CANNONLAKE(i915)) {
5299 		cnl_display_core_init(i915, resume);
5300 	} else if (IS_GEN9_BC(i915)) {
5301 		skl_display_core_init(i915, resume);
5302 	} else if (IS_GEN9_LP(i915)) {
5303 		bxt_display_core_init(i915, resume);
5304 	} else if (IS_CHERRYVIEW(i915)) {
5305 		mutex_lock(&power_domains->lock);
5306 		chv_phy_control_init(i915);
5307 		mutex_unlock(&power_domains->lock);
5308 		assert_isp_power_gated(i915);
5309 	} else if (IS_VALLEYVIEW(i915)) {
5310 		mutex_lock(&power_domains->lock);
5311 		vlv_cmnlane_wa(i915);
5312 		mutex_unlock(&power_domains->lock);
5313 		assert_ved_power_gated(i915);
5314 		assert_isp_power_gated(i915);
5315 	} else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
5316 		hsw_assert_cdclk(i915);
5317 		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
5318 	} else if (IS_IVYBRIDGE(i915)) {
5319 		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
5320 	}
5321 
5322 	/*
5323 	 * Keep all power wells enabled for any dependent HW access during
5324 	 * initialization and to make sure we keep BIOS enabled display HW
5325 	 * resources powered until display HW readout is complete. We drop
5326 	 * this reference in intel_power_domains_enable().
5327 	 */
5328 	power_domains->wakeref =
5329 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
5330 
5331 	/* Disable power support if the user asked so. */
5332 	if (!i915_modparams.disable_power_well)
5333 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
5334 	intel_power_domains_sync_hw(i915);
5335 
5336 	power_domains->initializing = false;
5337 }
5338 
5339 /**
5340  * intel_power_domains_driver_remove - deinitialize hw power domain state
5341  * @i915: i915 device instance
5342  *
5343  * De-initializes the display power domain HW state. It also ensures that the
5344  * device stays powered up so that the driver can be reloaded.
5345  *
5346  * It must be called with power domains already disabled (after a call to
5347  * intel_power_domains_disable()) and must be paired with
5348  * intel_power_domains_init_hw().
5349  */
5350 void intel_power_domains_driver_remove(struct drm_i915_private *i915)
5351 {
5352 	intel_wakeref_t wakeref __maybe_unused =
5353 		fetch_and_zero(&i915->power_domains.wakeref);
5354 
5355 	/* Remove the refcount we took to keep power well support disabled. */
5356 	if (!i915_modparams.disable_power_well)
5357 		intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
5358 
5359 	intel_display_power_flush_work_sync(i915);
5360 
5361 	intel_power_domains_verify_state(i915);
5362 
5363 	/* Keep the power well enabled, but cancel its rpm wakeref. */
5364 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
5365 }
5366 
5367 /**
5368  * intel_power_domains_enable - enable toggling of display power wells
5369  * @i915: i915 device instance
5370  *
5371  * Enable the ondemand enabling/disabling of the display power wells. Note that
5372  * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
5373  * only at specific points of the display modeset sequence, thus they are not
5374  * affected by the intel_power_domains_enable()/disable() calls. The purpose
5375  * of these function is to keep the rest of power wells enabled until the end
5376  * of display HW readout (which will acquire the power references reflecting
5377  * the current HW state).
5378  */
5379 void intel_power_domains_enable(struct drm_i915_private *i915)
5380 {
5381 	intel_wakeref_t wakeref __maybe_unused =
5382 		fetch_and_zero(&i915->power_domains.wakeref);
5383 
5384 	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
5385 	intel_power_domains_verify_state(i915);
5386 }
5387 
5388 /**
5389  * intel_power_domains_disable - disable toggling of display power wells
5390  * @i915: i915 device instance
5391  *
5392  * Disable the ondemand enabling/disabling of the display power wells. See
5393  * intel_power_domains_enable() for which power wells this call controls.
5394  */
5395 void intel_power_domains_disable(struct drm_i915_private *i915)
5396 {
5397 	struct i915_power_domains *power_domains = &i915->power_domains;
5398 
5399 	drm_WARN_ON(&i915->drm, power_domains->wakeref);
5400 	power_domains->wakeref =
5401 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
5402 
5403 	intel_power_domains_verify_state(i915);
5404 }
5405 
5406 /**
5407  * intel_power_domains_suspend - suspend power domain state
5408  * @i915: i915 device instance
5409  * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
5410  *
5411  * This function prepares the hardware power domain state before entering
5412  * system suspend.
5413  *
5414  * It must be called with power domains already disabled (after a call to
5415  * intel_power_domains_disable()) and paired with intel_power_domains_resume().
5416  */
5417 void intel_power_domains_suspend(struct drm_i915_private *i915,
5418 				 enum i915_drm_suspend_mode suspend_mode)
5419 {
5420 	struct i915_power_domains *power_domains = &i915->power_domains;
5421 	intel_wakeref_t wakeref __maybe_unused =
5422 		fetch_and_zero(&power_domains->wakeref);
5423 
5424 	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
5425 
5426 	/*
5427 	 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
5428 	 * support don't manually deinit the power domains. This also means the
5429 	 * CSR/DMC firmware will stay active, it will power down any HW
5430 	 * resources as required and also enable deeper system power states
5431 	 * that would be blocked if the firmware was inactive.
5432 	 */
5433 	if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
5434 	    suspend_mode == I915_DRM_SUSPEND_IDLE &&
5435 	    i915->csr.dmc_payload) {
5436 		intel_display_power_flush_work(i915);
5437 		intel_power_domains_verify_state(i915);
5438 		return;
5439 	}
5440 
5441 	/*
5442 	 * Even if power well support was disabled we still want to disable
5443 	 * power wells if power domains must be deinitialized for suspend.
5444 	 */
5445 	if (!i915_modparams.disable_power_well)
5446 		intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
5447 
5448 	intel_display_power_flush_work(i915);
5449 	intel_power_domains_verify_state(i915);
5450 
5451 	if (INTEL_GEN(i915) >= 11)
5452 		icl_display_core_uninit(i915);
5453 	else if (IS_CANNONLAKE(i915))
5454 		cnl_display_core_uninit(i915);
5455 	else if (IS_GEN9_BC(i915))
5456 		skl_display_core_uninit(i915);
5457 	else if (IS_GEN9_LP(i915))
5458 		bxt_display_core_uninit(i915);
5459 
5460 	power_domains->display_core_suspended = true;
5461 }
5462 
5463 /**
5464  * intel_power_domains_resume - resume power domain state
5465  * @i915: i915 device instance
5466  *
5467  * This function resume the hardware power domain state during system resume.
5468  *
5469  * It will return with power domain support disabled (to be enabled later by
5470  * intel_power_domains_enable()) and must be paired with
5471  * intel_power_domains_suspend().
5472  */
5473 void intel_power_domains_resume(struct drm_i915_private *i915)
5474 {
5475 	struct i915_power_domains *power_domains = &i915->power_domains;
5476 
5477 	if (power_domains->display_core_suspended) {
5478 		intel_power_domains_init_hw(i915, true);
5479 		power_domains->display_core_suspended = false;
5480 	} else {
5481 		drm_WARN_ON(&i915->drm, power_domains->wakeref);
5482 		power_domains->wakeref =
5483 			intel_display_power_get(i915, POWER_DOMAIN_INIT);
5484 	}
5485 
5486 	intel_power_domains_verify_state(i915);
5487 }
5488 
5489 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
5490 
5491 static void intel_power_domains_dump_info(struct drm_i915_private *i915)
5492 {
5493 	struct i915_power_domains *power_domains = &i915->power_domains;
5494 	struct i915_power_well *power_well;
5495 
5496 	for_each_power_well(i915, power_well) {
5497 		enum intel_display_power_domain domain;
5498 
5499 		drm_dbg(&i915->drm, "%-25s %d\n",
5500 			power_well->desc->name, power_well->count);
5501 
5502 		for_each_power_domain(domain, power_well->desc->domains)
5503 			drm_dbg(&i915->drm, "  %-23s %d\n",
5504 				intel_display_power_domain_str(domain),
5505 				power_domains->domain_use_count[domain]);
5506 	}
5507 }
5508 
5509 /**
5510  * intel_power_domains_verify_state - verify the HW/SW state for all power wells
5511  * @i915: i915 device instance
5512  *
5513  * Verify if the reference count of each power well matches its HW enabled
5514  * state and the total refcount of the domains it belongs to. This must be
5515  * called after modeset HW state sanitization, which is responsible for
5516  * acquiring reference counts for any power wells in use and disabling the
5517  * ones left on by BIOS but not required by any active output.
5518  */
5519 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
5520 {
5521 	struct i915_power_domains *power_domains = &i915->power_domains;
5522 	struct i915_power_well *power_well;
5523 	bool dump_domain_info;
5524 
5525 	mutex_lock(&power_domains->lock);
5526 
5527 	verify_async_put_domains_state(power_domains);
5528 
5529 	dump_domain_info = false;
5530 	for_each_power_well(i915, power_well) {
5531 		enum intel_display_power_domain domain;
5532 		int domains_count;
5533 		bool enabled;
5534 
5535 		enabled = power_well->desc->ops->is_enabled(i915, power_well);
5536 		if ((power_well->count || power_well->desc->always_on) !=
5537 		    enabled)
5538 			drm_err(&i915->drm,
5539 				"power well %s state mismatch (refcount %d/enabled %d)",
5540 				power_well->desc->name,
5541 				power_well->count, enabled);
5542 
5543 		domains_count = 0;
5544 		for_each_power_domain(domain, power_well->desc->domains)
5545 			domains_count += power_domains->domain_use_count[domain];
5546 
5547 		if (power_well->count != domains_count) {
5548 			drm_err(&i915->drm,
5549 				"power well %s refcount/domain refcount mismatch "
5550 				"(refcount %d/domains refcount %d)\n",
5551 				power_well->desc->name, power_well->count,
5552 				domains_count);
5553 			dump_domain_info = true;
5554 		}
5555 	}
5556 
5557 	if (dump_domain_info) {
5558 		static bool dumped;
5559 
5560 		if (!dumped) {
5561 			intel_power_domains_dump_info(i915);
5562 			dumped = true;
5563 		}
5564 	}
5565 
5566 	mutex_unlock(&power_domains->lock);
5567 }
5568 
5569 #else
5570 
5571 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
5572 {
5573 }
5574 
5575 #endif
5576 
5577 void intel_display_power_suspend_late(struct drm_i915_private *i915)
5578 {
5579 	if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915))
5580 		bxt_enable_dc9(i915);
5581 	else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
5582 		hsw_enable_pc8(i915);
5583 }
5584 
5585 void intel_display_power_resume_early(struct drm_i915_private *i915)
5586 {
5587 	if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) {
5588 		gen9_sanitize_dc_state(i915);
5589 		bxt_disable_dc9(i915);
5590 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5591 		hsw_disable_pc8(i915);
5592 	}
5593 }
5594 
5595 void intel_display_power_suspend(struct drm_i915_private *i915)
5596 {
5597 	if (INTEL_GEN(i915) >= 11) {
5598 		icl_display_core_uninit(i915);
5599 		bxt_enable_dc9(i915);
5600 	} else if (IS_GEN9_LP(i915)) {
5601 		bxt_display_core_uninit(i915);
5602 		bxt_enable_dc9(i915);
5603 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5604 		hsw_enable_pc8(i915);
5605 	}
5606 }
5607 
5608 void intel_display_power_resume(struct drm_i915_private *i915)
5609 {
5610 	if (INTEL_GEN(i915) >= 11) {
5611 		bxt_disable_dc9(i915);
5612 		icl_display_core_init(i915, true);
5613 		if (i915->csr.dmc_payload) {
5614 			if (i915->csr.allowed_dc_mask &
5615 			    DC_STATE_EN_UPTO_DC6)
5616 				skl_enable_dc6(i915);
5617 			else if (i915->csr.allowed_dc_mask &
5618 				 DC_STATE_EN_UPTO_DC5)
5619 				gen9_enable_dc5(i915);
5620 		}
5621 	} else if (IS_GEN9_LP(i915)) {
5622 		bxt_disable_dc9(i915);
5623 		bxt_display_core_init(i915, true);
5624 		if (i915->csr.dmc_payload &&
5625 		    (i915->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
5626 			gen9_enable_dc5(i915);
5627 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5628 		hsw_disable_pc8(i915);
5629 	}
5630 }
5631