xref: /dragonfly/sys/dev/drm/i915/intel_dp.c (revision 6e316fcd)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27 
28 #include <linux/i2c.h>
29 #include <linux/export.h>
30 #include <linux/notifier.h>
31 #include <linux/reboot.h>
32 #include <drm/drmP.h>
33 #include <drm/drm_atomic_helper.h>
34 #include <drm/drm_crtc.h>
35 #include <drm/drm_crtc_helper.h>
36 #include <drm/drm_edid.h>
37 #include "intel_drv.h"
38 #include <drm/i915_drm.h>
39 #include "i915_drv.h"
40 
41 #define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
42 
43 static int disable_aux_irq = 0;
44 TUNABLE_INT("drm.i915.disable_aux_irq", &disable_aux_irq);
45 
46 /* Compliance test status bits  */
47 #define INTEL_DP_RESOLUTION_SHIFT_MASK	0
48 #define INTEL_DP_RESOLUTION_PREFERRED	(1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49 #define INTEL_DP_RESOLUTION_STANDARD	(2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
50 #define INTEL_DP_RESOLUTION_FAILSAFE	(3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
51 
52 struct dp_link_dpll {
53 	int clock;
54 	struct dpll dpll;
55 };
56 
57 static const struct dp_link_dpll gen4_dpll[] = {
58 	{ 162000,
59 		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
60 	{ 270000,
61 		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
62 };
63 
64 static const struct dp_link_dpll pch_dpll[] = {
65 	{ 162000,
66 		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
67 	{ 270000,
68 		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
69 };
70 
71 static const struct dp_link_dpll vlv_dpll[] = {
72 	{ 162000,
73 		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
74 	{ 270000,
75 		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
76 };
77 
78 /*
79  * CHV supports eDP 1.4 that have  more link rates.
80  * Below only provides the fixed rate but exclude variable rate.
81  */
82 static const struct dp_link_dpll chv_dpll[] = {
83 	/*
84 	 * CHV requires to program fractional division for m2.
85 	 * m2 is stored in fixed point format using formula below
86 	 * (m2_int << 22) | m2_fraction
87 	 */
88 	{ 162000,	/* m2_int = 32, m2_fraction = 1677722 */
89 		{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
90 	{ 270000,	/* m2_int = 27, m2_fraction = 0 */
91 		{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
92 	{ 540000,	/* m2_int = 27, m2_fraction = 0 */
93 		{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
94 };
95 
96 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
97 				  324000, 432000, 540000 };
98 static const int skl_rates[] = { 162000, 216000, 270000,
99 				  324000, 432000, 540000 };
100 static const int default_rates[] = { 162000, 270000, 540000 };
101 
102 /**
103  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
104  * @intel_dp: DP struct
105  *
106  * If a CPU or PCH DP output is attached to an eDP panel, this function
107  * will return true, and false otherwise.
108  */
109 static bool is_edp(struct intel_dp *intel_dp)
110 {
111 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
112 
113 	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
114 }
115 
116 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
117 {
118 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
119 
120 	return intel_dig_port->base.base.dev;
121 }
122 
123 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
124 {
125 	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
126 }
127 
128 static void intel_dp_link_down(struct intel_dp *intel_dp);
129 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
130 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
131 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
132 static void vlv_steal_power_sequencer(struct drm_device *dev,
133 				      enum i915_pipe pipe);
134 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
135 
136 static unsigned int intel_dp_unused_lane_mask(int lane_count)
137 {
138 	return ~((1 << lane_count) - 1) & 0xf;
139 }
140 
141 static int
142 intel_dp_max_link_bw(struct intel_dp  *intel_dp)
143 {
144 	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
145 
146 	switch (max_link_bw) {
147 	case DP_LINK_BW_1_62:
148 	case DP_LINK_BW_2_7:
149 	case DP_LINK_BW_5_4:
150 		break;
151 	default:
152 		WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
153 		     max_link_bw);
154 		max_link_bw = DP_LINK_BW_1_62;
155 		break;
156 	}
157 	return max_link_bw;
158 }
159 
160 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
161 {
162 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
163 	u8 source_max, sink_max;
164 
165 	source_max = intel_dig_port->max_lanes;
166 	sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
167 
168 	return min(source_max, sink_max);
169 }
170 
171 /*
172  * The units on the numbers in the next two are... bizarre.  Examples will
173  * make it clearer; this one parallels an example in the eDP spec.
174  *
175  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
176  *
177  *     270000 * 1 * 8 / 10 == 216000
178  *
179  * The actual data capacity of that configuration is 2.16Gbit/s, so the
180  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
181  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
182  * 119000.  At 18bpp that's 2142000 kilobits per second.
183  *
184  * Thus the strange-looking division by 10 in intel_dp_link_required, to
185  * get the result in decakilobits instead of kilobits.
186  */
187 
188 static int
189 intel_dp_link_required(int pixel_clock, int bpp)
190 {
191 	return (pixel_clock * bpp + 9) / 10;
192 }
193 
194 static int
195 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
196 {
197 	return (max_link_clock * max_lanes * 8) / 10;
198 }
199 
200 static enum drm_mode_status
201 intel_dp_mode_valid(struct drm_connector *connector,
202 		    struct drm_display_mode *mode)
203 {
204 	struct intel_dp *intel_dp = intel_attached_dp(connector);
205 	struct intel_connector *intel_connector = to_intel_connector(connector);
206 	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
207 	int target_clock = mode->clock;
208 	int max_rate, mode_rate, max_lanes, max_link_clock;
209 	int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
210 
211 	if (is_edp(intel_dp) && fixed_mode) {
212 		if (mode->hdisplay > fixed_mode->hdisplay)
213 			return MODE_PANEL;
214 
215 		if (mode->vdisplay > fixed_mode->vdisplay)
216 			return MODE_PANEL;
217 
218 		target_clock = fixed_mode->clock;
219 	}
220 
221 	max_link_clock = intel_dp_max_link_rate(intel_dp);
222 	max_lanes = intel_dp_max_lane_count(intel_dp);
223 
224 	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
225 	mode_rate = intel_dp_link_required(target_clock, 18);
226 
227 	if (mode_rate > max_rate || target_clock > max_dotclk)
228 		return MODE_CLOCK_HIGH;
229 
230 	if (mode->clock < 10000)
231 		return MODE_CLOCK_LOW;
232 
233 	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
234 		return MODE_H_ILLEGAL;
235 
236 	return MODE_OK;
237 }
238 
239 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
240 {
241 	int	i;
242 	uint32_t v = 0;
243 
244 	if (src_bytes > 4)
245 		src_bytes = 4;
246 	for (i = 0; i < src_bytes; i++)
247 		v |= ((uint32_t) src[i]) << ((3-i) * 8);
248 	return v;
249 }
250 
251 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
252 {
253 	int i;
254 	if (dst_bytes > 4)
255 		dst_bytes = 4;
256 	for (i = 0; i < dst_bytes; i++)
257 		dst[i] = src >> ((3-i) * 8);
258 }
259 
260 static void
261 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
262 				    struct intel_dp *intel_dp);
263 static void
264 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
265 					      struct intel_dp *intel_dp);
266 
267 static void pps_lock(struct intel_dp *intel_dp)
268 {
269 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
270 	struct intel_encoder *encoder = &intel_dig_port->base;
271 	struct drm_device *dev = encoder->base.dev;
272 	struct drm_i915_private *dev_priv = dev->dev_private;
273 	enum intel_display_power_domain power_domain;
274 
275 	/*
276 	 * See vlv_power_sequencer_reset() why we need
277 	 * a power domain reference here.
278 	 */
279 	power_domain = intel_display_port_aux_power_domain(encoder);
280 	intel_display_power_get(dev_priv, power_domain);
281 
282 	mutex_lock(&dev_priv->pps_mutex);
283 }
284 
285 static void pps_unlock(struct intel_dp *intel_dp)
286 {
287 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
288 	struct intel_encoder *encoder = &intel_dig_port->base;
289 	struct drm_device *dev = encoder->base.dev;
290 	struct drm_i915_private *dev_priv = dev->dev_private;
291 	enum intel_display_power_domain power_domain;
292 
293 	mutex_unlock(&dev_priv->pps_mutex);
294 
295 	power_domain = intel_display_port_aux_power_domain(encoder);
296 	intel_display_power_put(dev_priv, power_domain);
297 }
298 
299 static void
300 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
301 {
302 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
303 	struct drm_device *dev = intel_dig_port->base.base.dev;
304 	struct drm_i915_private *dev_priv = dev->dev_private;
305 	enum i915_pipe pipe = intel_dp->pps_pipe;
306 	bool pll_enabled, release_cl_override = false;
307 	enum dpio_phy phy = DPIO_PHY(pipe);
308 	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
309 	uint32_t DP;
310 
311 	if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
312 		 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
313 		 pipe_name(pipe), port_name(intel_dig_port->port)))
314 		return;
315 
316 	DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
317 		      pipe_name(pipe), port_name(intel_dig_port->port));
318 
319 	/* Preserve the BIOS-computed detected bit. This is
320 	 * supposed to be read-only.
321 	 */
322 	DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
323 	DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
324 	DP |= DP_PORT_WIDTH(1);
325 	DP |= DP_LINK_TRAIN_PAT_1;
326 
327 	if (IS_CHERRYVIEW(dev))
328 		DP |= DP_PIPE_SELECT_CHV(pipe);
329 	else if (pipe == PIPE_B)
330 		DP |= DP_PIPEB_SELECT;
331 
332 	pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
333 
334 	/*
335 	 * The DPLL for the pipe must be enabled for this to work.
336 	 * So enable temporarily it if it's not already enabled.
337 	 */
338 	if (!pll_enabled) {
339 		release_cl_override = IS_CHERRYVIEW(dev) &&
340 			!chv_phy_powergate_ch(dev_priv, phy, ch, true);
341 
342 		if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
343 				     &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
344 			DRM_ERROR("Failed to force on pll for pipe %c!\n",
345 				  pipe_name(pipe));
346 			return;
347 		}
348 	}
349 
350 	/*
351 	 * Similar magic as in intel_dp_enable_port().
352 	 * We _must_ do this port enable + disable trick
353 	 * to make this power seqeuencer lock onto the port.
354 	 * Otherwise even VDD force bit won't work.
355 	 */
356 	I915_WRITE(intel_dp->output_reg, DP);
357 	POSTING_READ(intel_dp->output_reg);
358 
359 	I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
360 	POSTING_READ(intel_dp->output_reg);
361 
362 	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
363 	POSTING_READ(intel_dp->output_reg);
364 
365 	if (!pll_enabled) {
366 		vlv_force_pll_off(dev, pipe);
367 
368 		if (release_cl_override)
369 			chv_phy_powergate_ch(dev_priv, phy, ch, false);
370 	}
371 }
372 
373 static enum i915_pipe
374 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
375 {
376 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
377 	struct drm_device *dev = intel_dig_port->base.base.dev;
378 	struct drm_i915_private *dev_priv = dev->dev_private;
379 	struct intel_encoder *encoder;
380 	unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
381 	enum i915_pipe pipe;
382 
383 	lockdep_assert_held(&dev_priv->pps_mutex);
384 
385 	/* We should never land here with regular DP ports */
386 	WARN_ON(!is_edp(intel_dp));
387 
388 	if (intel_dp->pps_pipe != INVALID_PIPE)
389 		return intel_dp->pps_pipe;
390 
391 	/*
392 	 * We don't have power sequencer currently.
393 	 * Pick one that's not used by other ports.
394 	 */
395 	for_each_intel_encoder(dev, encoder) {
396 		struct intel_dp *tmp;
397 
398 		if (encoder->type != INTEL_OUTPUT_EDP)
399 			continue;
400 
401 		tmp = enc_to_intel_dp(&encoder->base);
402 
403 		if (tmp->pps_pipe != INVALID_PIPE)
404 			pipes &= ~(1 << tmp->pps_pipe);
405 	}
406 
407 	/*
408 	 * Didn't find one. This should not happen since there
409 	 * are two power sequencers and up to two eDP ports.
410 	 */
411 	if (WARN_ON(pipes == 0))
412 		pipe = PIPE_A;
413 	else
414 		pipe = ffs(pipes) - 1;
415 
416 	vlv_steal_power_sequencer(dev, pipe);
417 	intel_dp->pps_pipe = pipe;
418 
419 	DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
420 		      pipe_name(intel_dp->pps_pipe),
421 		      port_name(intel_dig_port->port));
422 
423 	/* init power sequencer on this pipe and port */
424 	intel_dp_init_panel_power_sequencer(dev, intel_dp);
425 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
426 
427 	/*
428 	 * Even vdd force doesn't work until we've made
429 	 * the power sequencer lock in on the port.
430 	 */
431 	vlv_power_sequencer_kick(intel_dp);
432 
433 	return intel_dp->pps_pipe;
434 }
435 
436 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
437 			       enum i915_pipe pipe);
438 
439 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
440 			       enum i915_pipe pipe)
441 {
442 	return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
443 }
444 
445 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
446 				enum i915_pipe pipe)
447 {
448 	return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
449 }
450 
451 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
452 			 enum i915_pipe pipe)
453 {
454 	return true;
455 }
456 
457 static enum i915_pipe
458 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
459 		     enum port port,
460 		     vlv_pipe_check pipe_check)
461 {
462 	enum i915_pipe pipe;
463 
464 	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
465 		u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
466 			PANEL_PORT_SELECT_MASK;
467 
468 		if (port_sel != PANEL_PORT_SELECT_VLV(port))
469 			continue;
470 
471 		if (!pipe_check(dev_priv, pipe))
472 			continue;
473 
474 		return pipe;
475 	}
476 
477 	return INVALID_PIPE;
478 }
479 
480 static void
481 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
482 {
483 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
484 	struct drm_device *dev = intel_dig_port->base.base.dev;
485 	struct drm_i915_private *dev_priv = dev->dev_private;
486 	enum port port = intel_dig_port->port;
487 
488 	lockdep_assert_held(&dev_priv->pps_mutex);
489 
490 	/* try to find a pipe with this port selected */
491 	/* first pick one where the panel is on */
492 	intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
493 						  vlv_pipe_has_pp_on);
494 	/* didn't find one? pick one where vdd is on */
495 	if (intel_dp->pps_pipe == INVALID_PIPE)
496 		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
497 							  vlv_pipe_has_vdd_on);
498 	/* didn't find one? pick one with just the correct port */
499 	if (intel_dp->pps_pipe == INVALID_PIPE)
500 		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
501 							  vlv_pipe_any);
502 
503 	/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
504 	if (intel_dp->pps_pipe == INVALID_PIPE) {
505 		DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
506 			      port_name(port));
507 		return;
508 	}
509 
510 	DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
511 		      port_name(port), pipe_name(intel_dp->pps_pipe));
512 
513 	intel_dp_init_panel_power_sequencer(dev, intel_dp);
514 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
515 }
516 
517 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
518 {
519 	struct drm_device *dev = dev_priv->dev;
520 	struct intel_encoder *encoder;
521 
522 	if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)))
523 		return;
524 
525 	/*
526 	 * We can't grab pps_mutex here due to deadlock with power_domain
527 	 * mutex when power_domain functions are called while holding pps_mutex.
528 	 * That also means that in order to use pps_pipe the code needs to
529 	 * hold both a power domain reference and pps_mutex, and the power domain
530 	 * reference get/put must be done while _not_ holding pps_mutex.
531 	 * pps_{lock,unlock}() do these steps in the correct order, so one
532 	 * should use them always.
533 	 */
534 
535 	for_each_intel_encoder(dev, encoder) {
536 		struct intel_dp *intel_dp;
537 
538 		if (encoder->type != INTEL_OUTPUT_EDP)
539 			continue;
540 
541 		intel_dp = enc_to_intel_dp(&encoder->base);
542 		intel_dp->pps_pipe = INVALID_PIPE;
543 	}
544 }
545 
546 static i915_reg_t
547 _pp_ctrl_reg(struct intel_dp *intel_dp)
548 {
549 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
550 
551 	if (IS_BROXTON(dev))
552 		return BXT_PP_CONTROL(0);
553 	else if (HAS_PCH_SPLIT(dev))
554 		return PCH_PP_CONTROL;
555 	else
556 		return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
557 }
558 
559 static i915_reg_t
560 _pp_stat_reg(struct intel_dp *intel_dp)
561 {
562 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
563 
564 	if (IS_BROXTON(dev))
565 		return BXT_PP_STATUS(0);
566 	else if (HAS_PCH_SPLIT(dev))
567 		return PCH_PP_STATUS;
568 	else
569 		return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
570 }
571 
572 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
573    This function only applicable when panel PM state is not to be tracked */
574 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
575 			      void *unused)
576 {
577 	struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
578 						 edp_notifier);
579 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
580 	struct drm_i915_private *dev_priv = dev->dev_private;
581 
582 #if 0
583 	if (!is_edp(intel_dp) || code != SYS_RESTART)
584 		return 0;
585 #endif
586 
587 	pps_lock(intel_dp);
588 
589 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
590 		enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
591 		i915_reg_t pp_ctrl_reg, pp_div_reg;
592 		u32 pp_div;
593 
594 		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
595 		pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
596 		pp_div = I915_READ(pp_div_reg);
597 		pp_div &= PP_REFERENCE_DIVIDER_MASK;
598 
599 		/* 0x1F write to PP_DIV_REG sets max cycle delay */
600 		I915_WRITE(pp_div_reg, pp_div | 0x1F);
601 		I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
602 		msleep(intel_dp->panel_power_cycle_delay);
603 	}
604 
605 	pps_unlock(intel_dp);
606 
607 	return 0;
608 }
609 
610 static bool edp_have_panel_power(struct intel_dp *intel_dp)
611 {
612 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
613 	struct drm_i915_private *dev_priv = dev->dev_private;
614 
615 	lockdep_assert_held(&dev_priv->pps_mutex);
616 
617 	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
618 	    intel_dp->pps_pipe == INVALID_PIPE)
619 		return false;
620 
621 	return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
622 }
623 
624 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
625 {
626 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
627 	struct drm_i915_private *dev_priv = dev->dev_private;
628 
629 	lockdep_assert_held(&dev_priv->pps_mutex);
630 
631 	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
632 	    intel_dp->pps_pipe == INVALID_PIPE)
633 		return false;
634 
635 	return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
636 }
637 
638 static void
639 intel_dp_check_edp(struct intel_dp *intel_dp)
640 {
641 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
642 	struct drm_i915_private *dev_priv = dev->dev_private;
643 
644 	if (!is_edp(intel_dp))
645 		return;
646 
647 	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
648 		WARN(1, "eDP powered off while attempting aux channel communication.\n");
649 		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
650 			      I915_READ(_pp_stat_reg(intel_dp)),
651 			      I915_READ(_pp_ctrl_reg(intel_dp)));
652 	}
653 }
654 
655 static uint32_t
656 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
657 {
658 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
659 	struct drm_device *dev = intel_dig_port->base.base.dev;
660 	struct drm_i915_private *dev_priv = dev->dev_private;
661 	i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
662 	uint32_t status;
663 	bool done;
664 
665 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
666 	if (has_aux_irq)
667 		done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
668 					  msecs_to_jiffies_timeout(10));
669 	else
670 		done = wait_for(C, 10) == 0;
671 	if (!done)
672 		DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
673 			  has_aux_irq);
674 #undef C
675 
676 	return status;
677 }
678 
679 static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
680 {
681 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
682 	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
683 
684 	if (index)
685 		return 0;
686 
687 	/*
688 	 * The clock divider is based off the hrawclk, and would like to run at
689 	 * 2MHz.  So, take the hrawclk value and divide by 2000 and use that
690 	 */
691 	return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
692 }
693 
694 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
695 {
696 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
697 	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
698 
699 	if (index)
700 		return 0;
701 
702 	/*
703 	 * The clock divider is based off the cdclk or PCH rawclk, and would
704 	 * like to run at 2MHz.  So, take the cdclk or PCH rawclk value and
705 	 * divide by 2000 and use that
706 	 */
707 	if (intel_dig_port->port == PORT_A)
708 		return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
709 	else
710 		return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
711 }
712 
713 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
714 {
715 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
716 	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
717 
718 	if (intel_dig_port->port != PORT_A && HAS_PCH_LPT_H(dev_priv)) {
719 		/* Workaround for non-ULT HSW */
720 		switch (index) {
721 		case 0: return 63;
722 		case 1: return 72;
723 		default: return 0;
724 		}
725 	}
726 
727 	return ilk_get_aux_clock_divider(intel_dp, index);
728 }
729 
730 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
731 {
732 	/*
733 	 * SKL doesn't need us to program the AUX clock divider (Hardware will
734 	 * derive the clock from CDCLK automatically). We still implement the
735 	 * get_aux_clock_divider vfunc to plug-in into the existing code.
736 	 */
737 	return index ? 0 : 1;
738 }
739 
740 static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
741 				     bool has_aux_irq,
742 				     int send_bytes,
743 				     uint32_t aux_clock_divider)
744 {
745 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
746 	struct drm_device *dev = intel_dig_port->base.base.dev;
747 	uint32_t precharge, timeout;
748 
749 	if (IS_GEN6(dev))
750 		precharge = 3;
751 	else
752 		precharge = 5;
753 
754 	if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
755 		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
756 	else
757 		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
758 
759 	return DP_AUX_CH_CTL_SEND_BUSY |
760 	       DP_AUX_CH_CTL_DONE |
761 	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
762 	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
763 	       timeout |
764 	       DP_AUX_CH_CTL_RECEIVE_ERROR |
765 	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
766 	       (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
767 	       (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
768 }
769 
770 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
771 				      bool has_aux_irq,
772 				      int send_bytes,
773 				      uint32_t unused)
774 {
775 	return DP_AUX_CH_CTL_SEND_BUSY |
776 	       DP_AUX_CH_CTL_DONE |
777 	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
778 	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
779 	       DP_AUX_CH_CTL_TIME_OUT_1600us |
780 	       DP_AUX_CH_CTL_RECEIVE_ERROR |
781 	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
782 	       DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
783 }
784 
785 static int
786 intel_dp_aux_ch(struct intel_dp *intel_dp,
787 		const uint8_t *send, int send_bytes,
788 		uint8_t *recv, int recv_size)
789 {
790 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
791 	struct drm_device *dev = intel_dig_port->base.base.dev;
792 	struct drm_i915_private *dev_priv = dev->dev_private;
793 	i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
794 	uint32_t aux_clock_divider;
795 	int i, ret, recv_bytes;
796 	uint32_t status;
797 	int try, clock = 0;
798 #ifdef __DragonFly__
799 	bool has_aux_irq = HAS_AUX_IRQ(dev) && !disable_aux_irq;
800 #else
801 	bool has_aux_irq = HAS_AUX_IRQ(dev);
802 #endif
803 	bool vdd;
804 
805 	pps_lock(intel_dp);
806 
807 	/*
808 	 * We will be called with VDD already enabled for dpcd/edid/oui reads.
809 	 * In such cases we want to leave VDD enabled and it's up to upper layers
810 	 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
811 	 * ourselves.
812 	 */
813 	vdd = edp_panel_vdd_on(intel_dp);
814 
815 	/* dp aux is extremely sensitive to irq latency, hence request the
816 	 * lowest possible wakeup latency and so prevent the cpu from going into
817 	 * deep sleep states.
818 	 */
819 	pm_qos_update_request(&dev_priv->pm_qos, 0);
820 
821 	intel_dp_check_edp(intel_dp);
822 
823 	/* Try to wait for any previous AUX channel activity */
824 	for (try = 0; try < 3; try++) {
825 		status = I915_READ_NOTRACE(ch_ctl);
826 		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
827 			break;
828 		msleep(1);
829 	}
830 
831 	if (try == 3) {
832 		static u32 last_status = -1;
833 		const u32 status = I915_READ(ch_ctl);
834 
835 		if (status != last_status) {
836 			WARN(1, "dp_aux_ch not started status 0x%08x\n",
837 			     status);
838 			last_status = status;
839 		}
840 
841 		ret = -EBUSY;
842 		goto out;
843 	}
844 
845 	/* Only 5 data registers! */
846 	if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
847 		ret = -E2BIG;
848 		goto out;
849 	}
850 
851 	while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
852 		u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
853 							  has_aux_irq,
854 							  send_bytes,
855 							  aux_clock_divider);
856 
857 		/* Must try at least 3 times according to DP spec */
858 		for (try = 0; try < 5; try++) {
859 			/* Load the send data into the aux channel data registers */
860 			for (i = 0; i < send_bytes; i += 4)
861 				I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
862 					   intel_dp_pack_aux(send + i,
863 							     send_bytes - i));
864 
865 			/* Send the command and wait for it to complete */
866 			I915_WRITE(ch_ctl, send_ctl);
867 
868 			status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
869 
870 			/* Clear done status and any errors */
871 			I915_WRITE(ch_ctl,
872 				   status |
873 				   DP_AUX_CH_CTL_DONE |
874 				   DP_AUX_CH_CTL_TIME_OUT_ERROR |
875 				   DP_AUX_CH_CTL_RECEIVE_ERROR);
876 
877 			if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
878 				continue;
879 
880 			/* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
881 			 *   400us delay required for errors and timeouts
882 			 *   Timeout errors from the HW already meet this
883 			 *   requirement so skip to next iteration
884 			 */
885 			if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
886 				usleep_range(400, 500);
887 				continue;
888 			}
889 			if (status & DP_AUX_CH_CTL_DONE)
890 				goto done;
891 		}
892 	}
893 
894 	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
895 		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
896 		ret = -EBUSY;
897 		goto out;
898 	}
899 
900 done:
901 	/* Check for timeout or receive error.
902 	 * Timeouts occur when the sink is not connected
903 	 */
904 	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
905 		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
906 		ret = -EIO;
907 		goto out;
908 	}
909 
910 	/* Timeouts occur when the device isn't connected, so they're
911 	 * "normal" -- don't fill the kernel log with these */
912 	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
913 		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
914 		ret = -ETIMEDOUT;
915 		goto out;
916 	}
917 
918 	/* Unload any bytes sent back from the other side */
919 	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
920 		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
921 
922 	/*
923 	 * By BSpec: "Message sizes of 0 or >20 are not allowed."
924 	 * We have no idea of what happened so we return -EBUSY so
925 	 * drm layer takes care for the necessary retries.
926 	 */
927 	if (recv_bytes == 0 || recv_bytes > 20) {
928 		DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
929 			      recv_bytes);
930 		/*
931 		 * FIXME: This patch was created on top of a series that
932 		 * organize the retries at drm level. There EBUSY should
933 		 * also take care for 1ms wait before retrying.
934 		 * That aux retries re-org is still needed and after that is
935 		 * merged we remove this sleep from here.
936 		 */
937 		usleep_range(1000, 1500);
938 		ret = -EBUSY;
939 		goto out;
940 	}
941 
942 	if (recv_bytes > recv_size)
943 		recv_bytes = recv_size;
944 
945 	for (i = 0; i < recv_bytes; i += 4)
946 		intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
947 				    recv + i, recv_bytes - i);
948 
949 	ret = recv_bytes;
950 out:
951 	pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
952 
953 	if (vdd)
954 		edp_panel_vdd_off(intel_dp, false);
955 
956 	pps_unlock(intel_dp);
957 
958 	return ret;
959 }
960 
961 #define BARE_ADDRESS_SIZE	3
962 #define HEADER_SIZE		(BARE_ADDRESS_SIZE + 1)
963 static ssize_t
964 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
965 {
966 	struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
967 	uint8_t txbuf[20], rxbuf[20];
968 	size_t txsize, rxsize;
969 	int ret;
970 
971 	txbuf[0] = (msg->request << 4) |
972 		((msg->address >> 16) & 0xf);
973 	txbuf[1] = (msg->address >> 8) & 0xff;
974 	txbuf[2] = msg->address & 0xff;
975 	txbuf[3] = msg->size - 1;
976 
977 	switch (msg->request & ~DP_AUX_I2C_MOT) {
978 	case DP_AUX_NATIVE_WRITE:
979 	case DP_AUX_I2C_WRITE:
980 	case DP_AUX_I2C_WRITE_STATUS_UPDATE:
981 		txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
982 		rxsize = 2; /* 0 or 1 data bytes */
983 
984 		if (WARN_ON(txsize > 20))
985 			return -E2BIG;
986 
987 		if (msg->buffer)
988 			memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
989 		else
990 			WARN_ON(msg->size);
991 
992 		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
993 		if (ret > 0) {
994 			msg->reply = rxbuf[0] >> 4;
995 
996 			if (ret > 1) {
997 				/* Number of bytes written in a short write. */
998 				ret = clamp_t(int, rxbuf[1], 0, msg->size);
999 			} else {
1000 				/* Return payload size. */
1001 				ret = msg->size;
1002 			}
1003 		}
1004 		break;
1005 
1006 	case DP_AUX_NATIVE_READ:
1007 	case DP_AUX_I2C_READ:
1008 		txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1009 		rxsize = msg->size + 1;
1010 
1011 		if (WARN_ON(rxsize > 20))
1012 			return -E2BIG;
1013 
1014 		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1015 		if (ret > 0) {
1016 			msg->reply = rxbuf[0] >> 4;
1017 			/*
1018 			 * Assume happy day, and copy the data. The caller is
1019 			 * expected to check msg->reply before touching it.
1020 			 *
1021 			 * Return payload size.
1022 			 */
1023 			ret--;
1024 			memcpy(msg->buffer, rxbuf + 1, ret);
1025 		}
1026 		break;
1027 
1028 	default:
1029 		ret = -EINVAL;
1030 		break;
1031 	}
1032 
1033 	return ret;
1034 }
1035 
1036 static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1037 				       enum port port)
1038 {
1039 	switch (port) {
1040 	case PORT_B:
1041 	case PORT_C:
1042 	case PORT_D:
1043 		return DP_AUX_CH_CTL(port);
1044 	default:
1045 		MISSING_CASE(port);
1046 		return DP_AUX_CH_CTL(PORT_B);
1047 	}
1048 }
1049 
1050 static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1051 					enum port port, int index)
1052 {
1053 	switch (port) {
1054 	case PORT_B:
1055 	case PORT_C:
1056 	case PORT_D:
1057 		return DP_AUX_CH_DATA(port, index);
1058 	default:
1059 		MISSING_CASE(port);
1060 		return DP_AUX_CH_DATA(PORT_B, index);
1061 	}
1062 }
1063 
1064 static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1065 				       enum port port)
1066 {
1067 	switch (port) {
1068 	case PORT_A:
1069 		return DP_AUX_CH_CTL(port);
1070 	case PORT_B:
1071 	case PORT_C:
1072 	case PORT_D:
1073 		return PCH_DP_AUX_CH_CTL(port);
1074 	default:
1075 		MISSING_CASE(port);
1076 		return DP_AUX_CH_CTL(PORT_A);
1077 	}
1078 }
1079 
1080 static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1081 					enum port port, int index)
1082 {
1083 	switch (port) {
1084 	case PORT_A:
1085 		return DP_AUX_CH_DATA(port, index);
1086 	case PORT_B:
1087 	case PORT_C:
1088 	case PORT_D:
1089 		return PCH_DP_AUX_CH_DATA(port, index);
1090 	default:
1091 		MISSING_CASE(port);
1092 		return DP_AUX_CH_DATA(PORT_A, index);
1093 	}
1094 }
1095 
1096 /*
1097  * On SKL we don't have Aux for port E so we rely
1098  * on VBT to set a proper alternate aux channel.
1099  */
1100 static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1101 {
1102 	const struct ddi_vbt_port_info *info =
1103 		&dev_priv->vbt.ddi_port_info[PORT_E];
1104 
1105 	switch (info->alternate_aux_channel) {
1106 	case DP_AUX_A:
1107 		return PORT_A;
1108 	case DP_AUX_B:
1109 		return PORT_B;
1110 	case DP_AUX_C:
1111 		return PORT_C;
1112 	case DP_AUX_D:
1113 		return PORT_D;
1114 	default:
1115 		MISSING_CASE(info->alternate_aux_channel);
1116 		return PORT_A;
1117 	}
1118 }
1119 
1120 static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1121 				       enum port port)
1122 {
1123 	if (port == PORT_E)
1124 		port = skl_porte_aux_port(dev_priv);
1125 
1126 	switch (port) {
1127 	case PORT_A:
1128 	case PORT_B:
1129 	case PORT_C:
1130 	case PORT_D:
1131 		return DP_AUX_CH_CTL(port);
1132 	default:
1133 		MISSING_CASE(port);
1134 		return DP_AUX_CH_CTL(PORT_A);
1135 	}
1136 }
1137 
1138 static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1139 					enum port port, int index)
1140 {
1141 	if (port == PORT_E)
1142 		port = skl_porte_aux_port(dev_priv);
1143 
1144 	switch (port) {
1145 	case PORT_A:
1146 	case PORT_B:
1147 	case PORT_C:
1148 	case PORT_D:
1149 		return DP_AUX_CH_DATA(port, index);
1150 	default:
1151 		MISSING_CASE(port);
1152 		return DP_AUX_CH_DATA(PORT_A, index);
1153 	}
1154 }
1155 
1156 static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1157 					 enum port port)
1158 {
1159 	if (INTEL_INFO(dev_priv)->gen >= 9)
1160 		return skl_aux_ctl_reg(dev_priv, port);
1161 	else if (HAS_PCH_SPLIT(dev_priv))
1162 		return ilk_aux_ctl_reg(dev_priv, port);
1163 	else
1164 		return g4x_aux_ctl_reg(dev_priv, port);
1165 }
1166 
1167 static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1168 					  enum port port, int index)
1169 {
1170 	if (INTEL_INFO(dev_priv)->gen >= 9)
1171 		return skl_aux_data_reg(dev_priv, port, index);
1172 	else if (HAS_PCH_SPLIT(dev_priv))
1173 		return ilk_aux_data_reg(dev_priv, port, index);
1174 	else
1175 		return g4x_aux_data_reg(dev_priv, port, index);
1176 }
1177 
1178 static void intel_aux_reg_init(struct intel_dp *intel_dp)
1179 {
1180 	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1181 	enum port port = dp_to_dig_port(intel_dp)->port;
1182 	int i;
1183 
1184 	intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1185 	for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1186 		intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1187 }
1188 
1189 static void
1190 intel_dp_aux_fini(struct intel_dp *intel_dp)
1191 {
1192 	drm_dp_aux_unregister(&intel_dp->aux);
1193 	kfree(intel_dp->aux.name);
1194 }
1195 
1196 static int
1197 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1198 {
1199 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1200 	enum port port = intel_dig_port->port;
1201 	int ret;
1202 
1203 	intel_aux_reg_init(intel_dp);
1204 
1205 	intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1206 	if (!intel_dp->aux.name)
1207 		return -ENOMEM;
1208 
1209 	intel_dp->aux.dev = connector->base.kdev;
1210 	intel_dp->aux.transfer = intel_dp_aux_transfer;
1211 
1212 #if 0
1213 	DRM_DEBUG_KMS("registering %s bus for %s\n",
1214 		      intel_dp->aux.name,
1215 		      connector->base.kdev->kobj.name);
1216 #endif
1217 
1218 	ret = drm_dp_aux_register(&intel_dp->aux);
1219 	if (ret < 0) {
1220 		DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1221 			  intel_dp->aux.name, ret);
1222 		kfree(intel_dp->aux.name);
1223 		return ret;
1224 	}
1225 
1226 	return 0;
1227 }
1228 
1229 static void
1230 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1231 {
1232 	struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1233 
1234 	intel_dp_aux_fini(intel_dp);
1235 	intel_connector_unregister(intel_connector);
1236 }
1237 
1238 static int
1239 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1240 {
1241 	if (intel_dp->num_sink_rates) {
1242 		*sink_rates = intel_dp->sink_rates;
1243 		return intel_dp->num_sink_rates;
1244 	}
1245 
1246 	*sink_rates = default_rates;
1247 
1248 	return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1249 }
1250 
1251 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1252 {
1253 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1254 	struct drm_device *dev = dig_port->base.base.dev;
1255 
1256 	/* WaDisableHBR2:skl */
1257 	if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
1258 		return false;
1259 
1260 	if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1261 	    (INTEL_INFO(dev)->gen >= 9))
1262 		return true;
1263 	else
1264 		return false;
1265 }
1266 
1267 static int
1268 intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
1269 {
1270 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1271 	struct drm_device *dev = dig_port->base.base.dev;
1272 	int size;
1273 
1274 	if (IS_BROXTON(dev)) {
1275 		*source_rates = bxt_rates;
1276 		size = ARRAY_SIZE(bxt_rates);
1277 	} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
1278 		*source_rates = skl_rates;
1279 		size = ARRAY_SIZE(skl_rates);
1280 	} else {
1281 		*source_rates = default_rates;
1282 		size = ARRAY_SIZE(default_rates);
1283 	}
1284 
1285 	/* This depends on the fact that 5.4 is last value in the array */
1286 	if (!intel_dp_source_supports_hbr2(intel_dp))
1287 		size--;
1288 
1289 	return size;
1290 }
1291 
1292 static void
1293 intel_dp_set_clock(struct intel_encoder *encoder,
1294 		   struct intel_crtc_state *pipe_config)
1295 {
1296 	struct drm_device *dev = encoder->base.dev;
1297 	const struct dp_link_dpll *divisor = NULL;
1298 	int i, count = 0;
1299 
1300 	if (IS_G4X(dev)) {
1301 		divisor = gen4_dpll;
1302 		count = ARRAY_SIZE(gen4_dpll);
1303 	} else if (HAS_PCH_SPLIT(dev)) {
1304 		divisor = pch_dpll;
1305 		count = ARRAY_SIZE(pch_dpll);
1306 	} else if (IS_CHERRYVIEW(dev)) {
1307 		divisor = chv_dpll;
1308 		count = ARRAY_SIZE(chv_dpll);
1309 	} else if (IS_VALLEYVIEW(dev)) {
1310 		divisor = vlv_dpll;
1311 		count = ARRAY_SIZE(vlv_dpll);
1312 	}
1313 
1314 	if (divisor && count) {
1315 		for (i = 0; i < count; i++) {
1316 			if (pipe_config->port_clock == divisor[i].clock) {
1317 				pipe_config->dpll = divisor[i].dpll;
1318 				pipe_config->clock_set = true;
1319 				break;
1320 			}
1321 		}
1322 	}
1323 }
1324 
1325 static int intersect_rates(const int *source_rates, int source_len,
1326 			   const int *sink_rates, int sink_len,
1327 			   int *common_rates)
1328 {
1329 	int i = 0, j = 0, k = 0;
1330 
1331 	while (i < source_len && j < sink_len) {
1332 		if (source_rates[i] == sink_rates[j]) {
1333 			if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1334 				return k;
1335 			common_rates[k] = source_rates[i];
1336 			++k;
1337 			++i;
1338 			++j;
1339 		} else if (source_rates[i] < sink_rates[j]) {
1340 			++i;
1341 		} else {
1342 			++j;
1343 		}
1344 	}
1345 	return k;
1346 }
1347 
1348 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1349 				 int *common_rates)
1350 {
1351 	const int *source_rates, *sink_rates;
1352 	int source_len, sink_len;
1353 
1354 	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1355 	source_len = intel_dp_source_rates(intel_dp, &source_rates);
1356 
1357 	return intersect_rates(source_rates, source_len,
1358 			       sink_rates, sink_len,
1359 			       common_rates);
1360 }
1361 
1362 static void snprintf_int_array(char *str, size_t len,
1363 			       const int *array, int nelem)
1364 {
1365 	int i;
1366 
1367 	str[0] = '\0';
1368 
1369 	for (i = 0; i < nelem; i++) {
1370 		int r = ksnprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1371 		if (r >= len)
1372 			return;
1373 		str += r;
1374 		len -= r;
1375 	}
1376 }
1377 
1378 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1379 {
1380 	const int *source_rates, *sink_rates;
1381 	int source_len, sink_len, common_len;
1382 	int common_rates[DP_MAX_SUPPORTED_RATES];
1383 	char str[128]; /* FIXME: too big for stack? */
1384 
1385 	if ((drm_debug & DRM_UT_KMS) == 0)
1386 		return;
1387 
1388 	source_len = intel_dp_source_rates(intel_dp, &source_rates);
1389 	snprintf_int_array(str, sizeof(str), source_rates, source_len);
1390 	DRM_DEBUG_KMS("source rates: %s\n", str);
1391 
1392 	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1393 	snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1394 	DRM_DEBUG_KMS("sink rates: %s\n", str);
1395 
1396 	common_len = intel_dp_common_rates(intel_dp, common_rates);
1397 	snprintf_int_array(str, sizeof(str), common_rates, common_len);
1398 	DRM_DEBUG_KMS("common rates: %s\n", str);
1399 }
1400 
1401 static int rate_to_index(int find, const int *rates)
1402 {
1403 	int i = 0;
1404 
1405 	for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1406 		if (find == rates[i])
1407 			break;
1408 
1409 	return i;
1410 }
1411 
1412 int
1413 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1414 {
1415 	int rates[DP_MAX_SUPPORTED_RATES] = {};
1416 	int len;
1417 
1418 	len = intel_dp_common_rates(intel_dp, rates);
1419 	if (WARN_ON(len <= 0))
1420 		return 162000;
1421 
1422 	return rates[rate_to_index(0, rates) - 1];
1423 }
1424 
1425 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1426 {
1427 	return rate_to_index(rate, intel_dp->sink_rates);
1428 }
1429 
1430 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1431 			   uint8_t *link_bw, uint8_t *rate_select)
1432 {
1433 	if (intel_dp->num_sink_rates) {
1434 		*link_bw = 0;
1435 		*rate_select =
1436 			intel_dp_rate_select(intel_dp, port_clock);
1437 	} else {
1438 		*link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1439 		*rate_select = 0;
1440 	}
1441 }
1442 
1443 bool
1444 intel_dp_compute_config(struct intel_encoder *encoder,
1445 			struct intel_crtc_state *pipe_config)
1446 {
1447 	struct drm_device *dev = encoder->base.dev;
1448 	struct drm_i915_private *dev_priv = dev->dev_private;
1449 	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1450 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1451 	enum port port = dp_to_dig_port(intel_dp)->port;
1452 	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1453 	struct intel_connector *intel_connector = intel_dp->attached_connector;
1454 	int lane_count, clock;
1455 	int min_lane_count = 1;
1456 	int max_lane_count = intel_dp_max_lane_count(intel_dp);
1457 	/* Conveniently, the link BW constants become indices with a shift...*/
1458 	int min_clock = 0;
1459 	int max_clock;
1460 	int bpp, mode_rate;
1461 	int link_avail, link_clock;
1462 	int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1463 	int common_len;
1464 	uint8_t link_bw, rate_select;
1465 
1466 	common_len = intel_dp_common_rates(intel_dp, common_rates);
1467 
1468 	/* No common link rates between source and sink */
1469 	WARN_ON(common_len <= 0);
1470 
1471 	max_clock = common_len - 1;
1472 
1473 	if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1474 		pipe_config->has_pch_encoder = true;
1475 
1476 	pipe_config->has_dp_encoder = true;
1477 	pipe_config->has_drrs = false;
1478 	pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1479 
1480 	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1481 		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1482 				       adjusted_mode);
1483 
1484 		if (INTEL_INFO(dev)->gen >= 9) {
1485 			int ret;
1486 			ret = skl_update_scaler_crtc(pipe_config);
1487 			if (ret)
1488 				return ret;
1489 		}
1490 
1491 		if (HAS_GMCH_DISPLAY(dev))
1492 			intel_gmch_panel_fitting(intel_crtc, pipe_config,
1493 						 intel_connector->panel.fitting_mode);
1494 		else
1495 			intel_pch_panel_fitting(intel_crtc, pipe_config,
1496 						intel_connector->panel.fitting_mode);
1497 	}
1498 
1499 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1500 		return false;
1501 
1502 	DRM_DEBUG_KMS("DP link computation with max lane count %i "
1503 		      "max bw %d pixel clock %iKHz\n",
1504 		      max_lane_count, common_rates[max_clock],
1505 		      adjusted_mode->crtc_clock);
1506 
1507 	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
1508 	 * bpc in between. */
1509 	bpp = pipe_config->pipe_bpp;
1510 	if (is_edp(intel_dp)) {
1511 
1512 		/* Get bpp from vbt only for panels that dont have bpp in edid */
1513 		if (intel_connector->base.display_info.bpc == 0 &&
1514 			(dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp)) {
1515 			DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1516 				      dev_priv->vbt.edp.bpp);
1517 			bpp = dev_priv->vbt.edp.bpp;
1518 		}
1519 
1520 		/*
1521 		 * Use the maximum clock and number of lanes the eDP panel
1522 		 * advertizes being capable of. The panels are generally
1523 		 * designed to support only a single clock and lane
1524 		 * configuration, and typically these values correspond to the
1525 		 * native resolution of the panel.
1526 		 */
1527 		min_lane_count = max_lane_count;
1528 		min_clock = max_clock;
1529 	}
1530 
1531 	for (; bpp >= 6*3; bpp -= 2*3) {
1532 		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1533 						   bpp);
1534 
1535 		for (clock = min_clock; clock <= max_clock; clock++) {
1536 			for (lane_count = min_lane_count;
1537 				lane_count <= max_lane_count;
1538 				lane_count <<= 1) {
1539 
1540 				link_clock = common_rates[clock];
1541 				link_avail = intel_dp_max_data_rate(link_clock,
1542 								    lane_count);
1543 
1544 				if (mode_rate <= link_avail) {
1545 					goto found;
1546 				}
1547 			}
1548 		}
1549 	}
1550 
1551 	return false;
1552 
1553 found:
1554 	if (intel_dp->color_range_auto) {
1555 		/*
1556 		 * See:
1557 		 * CEA-861-E - 5.1 Default Encoding Parameters
1558 		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1559 		 */
1560 		pipe_config->limited_color_range =
1561 			bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1562 	} else {
1563 		pipe_config->limited_color_range =
1564 			intel_dp->limited_color_range;
1565 	}
1566 
1567 	pipe_config->lane_count = lane_count;
1568 
1569 	pipe_config->pipe_bpp = bpp;
1570 	pipe_config->port_clock = common_rates[clock];
1571 
1572 	intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1573 			      &link_bw, &rate_select);
1574 
1575 	DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1576 		      link_bw, rate_select, pipe_config->lane_count,
1577 		      pipe_config->port_clock, bpp);
1578 	DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1579 		      mode_rate, link_avail);
1580 
1581 	intel_link_compute_m_n(bpp, lane_count,
1582 			       adjusted_mode->crtc_clock,
1583 			       pipe_config->port_clock,
1584 			       &pipe_config->dp_m_n);
1585 
1586 	if (intel_connector->panel.downclock_mode != NULL &&
1587 		dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1588 			pipe_config->has_drrs = true;
1589 			intel_link_compute_m_n(bpp, lane_count,
1590 				intel_connector->panel.downclock_mode->clock,
1591 				pipe_config->port_clock,
1592 				&pipe_config->dp_m2_n2);
1593 	}
1594 
1595 	if (!HAS_DDI(dev))
1596 		intel_dp_set_clock(encoder, pipe_config);
1597 
1598 	return true;
1599 }
1600 
1601 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1602 			      const struct intel_crtc_state *pipe_config)
1603 {
1604 	intel_dp->link_rate = pipe_config->port_clock;
1605 	intel_dp->lane_count = pipe_config->lane_count;
1606 }
1607 
1608 static void intel_dp_prepare(struct intel_encoder *encoder)
1609 {
1610 	struct drm_device *dev = encoder->base.dev;
1611 	struct drm_i915_private *dev_priv = dev->dev_private;
1612 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1613 	enum port port = dp_to_dig_port(intel_dp)->port;
1614 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1615 	const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1616 
1617 	intel_dp_set_link_params(intel_dp, crtc->config);
1618 
1619 	/*
1620 	 * There are four kinds of DP registers:
1621 	 *
1622 	 * 	IBX PCH
1623 	 * 	SNB CPU
1624 	 *	IVB CPU
1625 	 * 	CPT PCH
1626 	 *
1627 	 * IBX PCH and CPU are the same for almost everything,
1628 	 * except that the CPU DP PLL is configured in this
1629 	 * register
1630 	 *
1631 	 * CPT PCH is quite different, having many bits moved
1632 	 * to the TRANS_DP_CTL register instead. That
1633 	 * configuration happens (oddly) in ironlake_pch_enable
1634 	 */
1635 
1636 	/* Preserve the BIOS-computed detected bit. This is
1637 	 * supposed to be read-only.
1638 	 */
1639 	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1640 
1641 	/* Handle DP bits in common between all three register formats */
1642 	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1643 	intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1644 
1645 	/* Split out the IBX/CPU vs CPT settings */
1646 
1647 	if (IS_GEN7(dev) && port == PORT_A) {
1648 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1649 			intel_dp->DP |= DP_SYNC_HS_HIGH;
1650 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1651 			intel_dp->DP |= DP_SYNC_VS_HIGH;
1652 		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1653 
1654 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1655 			intel_dp->DP |= DP_ENHANCED_FRAMING;
1656 
1657 		intel_dp->DP |= crtc->pipe << 29;
1658 	} else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1659 		u32 trans_dp;
1660 
1661 		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1662 
1663 		trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1664 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1665 			trans_dp |= TRANS_DP_ENH_FRAMING;
1666 		else
1667 			trans_dp &= ~TRANS_DP_ENH_FRAMING;
1668 		I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1669 	} else {
1670 		if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1671 		    !IS_CHERRYVIEW(dev) && crtc->config->limited_color_range)
1672 			intel_dp->DP |= DP_COLOR_RANGE_16_235;
1673 
1674 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1675 			intel_dp->DP |= DP_SYNC_HS_HIGH;
1676 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1677 			intel_dp->DP |= DP_SYNC_VS_HIGH;
1678 		intel_dp->DP |= DP_LINK_TRAIN_OFF;
1679 
1680 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1681 			intel_dp->DP |= DP_ENHANCED_FRAMING;
1682 
1683 		if (IS_CHERRYVIEW(dev))
1684 			intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1685 		else if (crtc->pipe == PIPE_B)
1686 			intel_dp->DP |= DP_PIPEB_SELECT;
1687 	}
1688 }
1689 
1690 #define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1691 #define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1692 
1693 #define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1694 #define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
1695 
1696 #define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1697 #define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1698 
1699 static void wait_panel_status(struct intel_dp *intel_dp,
1700 				       u32 mask,
1701 				       u32 value)
1702 {
1703 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1704 	struct drm_i915_private *dev_priv = dev->dev_private;
1705 	i915_reg_t pp_stat_reg, pp_ctrl_reg;
1706 
1707 	lockdep_assert_held(&dev_priv->pps_mutex);
1708 
1709 	pp_stat_reg = _pp_stat_reg(intel_dp);
1710 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1711 
1712 	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1713 			mask, value,
1714 			I915_READ(pp_stat_reg),
1715 			I915_READ(pp_ctrl_reg));
1716 
1717 	if (_wait_for((I915_READ(pp_stat_reg) & mask) == value,
1718 		      5 * USEC_PER_SEC, 10 * USEC_PER_MSEC))
1719 		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1720 				I915_READ(pp_stat_reg),
1721 				I915_READ(pp_ctrl_reg));
1722 
1723 	DRM_DEBUG_KMS("Wait complete\n");
1724 }
1725 
1726 static void wait_panel_on(struct intel_dp *intel_dp)
1727 {
1728 	DRM_DEBUG_KMS("Wait for panel power on\n");
1729 	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1730 }
1731 
1732 static void wait_panel_off(struct intel_dp *intel_dp)
1733 {
1734 	DRM_DEBUG_KMS("Wait for panel power off time\n");
1735 	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1736 }
1737 
1738 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1739 {
1740 	ktime_t panel_power_on_time;
1741 	s64 panel_power_off_duration;
1742 
1743 	DRM_DEBUG_KMS("Wait for panel power cycle\n");
1744 
1745 	/* take the difference of currrent time and panel power off time
1746 	 * and then make panel wait for t11_t12 if needed. */
1747 	panel_power_on_time = ktime_get_boottime();
1748 	panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
1749 
1750 	/* When we disable the VDD override bit last we have to do the manual
1751 	 * wait. */
1752 	if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
1753 		wait_remaining_ms_from_jiffies(jiffies,
1754 				       intel_dp->panel_power_cycle_delay - panel_power_off_duration);
1755 
1756 	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1757 }
1758 
1759 static void wait_backlight_on(struct intel_dp *intel_dp)
1760 {
1761 	wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1762 				       intel_dp->backlight_on_delay);
1763 }
1764 
1765 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1766 {
1767 	wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1768 				       intel_dp->backlight_off_delay);
1769 }
1770 
1771 /* Read the current pp_control value, unlocking the register if it
1772  * is locked
1773  */
1774 
1775 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1776 {
1777 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1778 	struct drm_i915_private *dev_priv = dev->dev_private;
1779 	u32 control;
1780 
1781 	lockdep_assert_held(&dev_priv->pps_mutex);
1782 
1783 	control = I915_READ(_pp_ctrl_reg(intel_dp));
1784 	if (!IS_BROXTON(dev)) {
1785 		control &= ~PANEL_UNLOCK_MASK;
1786 		control |= PANEL_UNLOCK_REGS;
1787 	}
1788 	return control;
1789 }
1790 
1791 /*
1792  * Must be paired with edp_panel_vdd_off().
1793  * Must hold pps_mutex around the whole on/off sequence.
1794  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1795  */
1796 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1797 {
1798 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1799 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1800 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1801 	struct drm_i915_private *dev_priv = dev->dev_private;
1802 	enum intel_display_power_domain power_domain;
1803 	u32 pp;
1804 	i915_reg_t pp_stat_reg, pp_ctrl_reg;
1805 	bool need_to_disable = !intel_dp->want_panel_vdd;
1806 
1807 	lockdep_assert_held(&dev_priv->pps_mutex);
1808 
1809 	if (!is_edp(intel_dp))
1810 		return false;
1811 
1812 	cancel_delayed_work(&intel_dp->panel_vdd_work);
1813 	intel_dp->want_panel_vdd = true;
1814 
1815 	if (edp_have_panel_vdd(intel_dp))
1816 		return need_to_disable;
1817 
1818 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
1819 	intel_display_power_get(dev_priv, power_domain);
1820 
1821 	DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1822 		      port_name(intel_dig_port->port));
1823 
1824 	if (!edp_have_panel_power(intel_dp))
1825 		wait_panel_power_cycle(intel_dp);
1826 
1827 	pp = ironlake_get_pp_control(intel_dp);
1828 	pp |= EDP_FORCE_VDD;
1829 
1830 	pp_stat_reg = _pp_stat_reg(intel_dp);
1831 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1832 
1833 	I915_WRITE(pp_ctrl_reg, pp);
1834 	POSTING_READ(pp_ctrl_reg);
1835 	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1836 			I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1837 	/*
1838 	 * If the panel wasn't on, delay before accessing aux channel
1839 	 */
1840 	if (!edp_have_panel_power(intel_dp)) {
1841 		DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1842 			      port_name(intel_dig_port->port));
1843 		msleep(intel_dp->panel_power_up_delay);
1844 	}
1845 
1846 	return need_to_disable;
1847 }
1848 
1849 /*
1850  * Must be paired with intel_edp_panel_vdd_off() or
1851  * intel_edp_panel_off().
1852  * Nested calls to these functions are not allowed since
1853  * we drop the lock. Caller must use some higher level
1854  * locking to prevent nested calls from other threads.
1855  */
1856 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1857 {
1858 	bool vdd;
1859 
1860 	if (!is_edp(intel_dp))
1861 		return;
1862 
1863 	pps_lock(intel_dp);
1864 	vdd = edp_panel_vdd_on(intel_dp);
1865 	pps_unlock(intel_dp);
1866 
1867 #ifdef __DragonFly__
1868 /* XXX: limit dmesg spam to 16 warnings instead of 137, where is the bug? */
1869 	if(!vdd)
1870 		DRM_ERROR_RATELIMITED("eDP port %c VDD already requested on\n",
1871 		    port_name(dp_to_dig_port(intel_dp)->port));
1872 #else
1873 	I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1874 	     port_name(dp_to_dig_port(intel_dp)->port));
1875 #endif
1876 }
1877 
1878 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1879 {
1880 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1881 	struct drm_i915_private *dev_priv = dev->dev_private;
1882 	struct intel_digital_port *intel_dig_port =
1883 		dp_to_dig_port(intel_dp);
1884 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1885 	enum intel_display_power_domain power_domain;
1886 	u32 pp;
1887 	i915_reg_t pp_stat_reg, pp_ctrl_reg;
1888 
1889 	lockdep_assert_held(&dev_priv->pps_mutex);
1890 
1891 	WARN_ON(intel_dp->want_panel_vdd);
1892 
1893 	if (!edp_have_panel_vdd(intel_dp))
1894 		return;
1895 
1896 	DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1897 		      port_name(intel_dig_port->port));
1898 
1899 	pp = ironlake_get_pp_control(intel_dp);
1900 	pp &= ~EDP_FORCE_VDD;
1901 
1902 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1903 	pp_stat_reg = _pp_stat_reg(intel_dp);
1904 
1905 	I915_WRITE(pp_ctrl_reg, pp);
1906 	POSTING_READ(pp_ctrl_reg);
1907 
1908 	/* Make sure sequencer is idle before allowing subsequent activity */
1909 	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1910 	I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1911 
1912 	if ((pp & POWER_TARGET_ON) == 0)
1913 		intel_dp->panel_power_off_time = ktime_get_boottime();
1914 
1915 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
1916 	intel_display_power_put(dev_priv, power_domain);
1917 }
1918 
1919 static void edp_panel_vdd_work(struct work_struct *__work)
1920 {
1921 	struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1922 						 struct intel_dp, panel_vdd_work);
1923 
1924 	pps_lock(intel_dp);
1925 	if (!intel_dp->want_panel_vdd)
1926 		edp_panel_vdd_off_sync(intel_dp);
1927 	pps_unlock(intel_dp);
1928 }
1929 
1930 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1931 {
1932 	unsigned long delay;
1933 
1934 	/*
1935 	 * Queue the timer to fire a long time from now (relative to the power
1936 	 * down delay) to keep the panel power up across a sequence of
1937 	 * operations.
1938 	 */
1939 	delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1940 	schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1941 }
1942 
1943 /*
1944  * Must be paired with edp_panel_vdd_on().
1945  * Must hold pps_mutex around the whole on/off sequence.
1946  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1947  */
1948 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1949 {
1950 	struct drm_i915_private *dev_priv =
1951 		intel_dp_to_dev(intel_dp)->dev_private;
1952 
1953 	lockdep_assert_held(&dev_priv->pps_mutex);
1954 
1955 	if (!is_edp(intel_dp))
1956 		return;
1957 
1958 	I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1959 	     port_name(dp_to_dig_port(intel_dp)->port));
1960 
1961 	intel_dp->want_panel_vdd = false;
1962 
1963 	if (sync)
1964 		edp_panel_vdd_off_sync(intel_dp);
1965 	else
1966 		edp_panel_vdd_schedule_off(intel_dp);
1967 }
1968 
1969 static void edp_panel_on(struct intel_dp *intel_dp)
1970 {
1971 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1972 	struct drm_i915_private *dev_priv = dev->dev_private;
1973 	u32 pp;
1974 	i915_reg_t pp_ctrl_reg;
1975 
1976 	lockdep_assert_held(&dev_priv->pps_mutex);
1977 
1978 	if (!is_edp(intel_dp))
1979 		return;
1980 
1981 	DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1982 		      port_name(dp_to_dig_port(intel_dp)->port));
1983 
1984 	if (WARN(edp_have_panel_power(intel_dp),
1985 		 "eDP port %c panel power already on\n",
1986 		 port_name(dp_to_dig_port(intel_dp)->port)))
1987 		return;
1988 
1989 	wait_panel_power_cycle(intel_dp);
1990 
1991 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1992 	pp = ironlake_get_pp_control(intel_dp);
1993 	if (IS_GEN5(dev)) {
1994 		/* ILK workaround: disable reset around power sequence */
1995 		pp &= ~PANEL_POWER_RESET;
1996 		I915_WRITE(pp_ctrl_reg, pp);
1997 		POSTING_READ(pp_ctrl_reg);
1998 	}
1999 
2000 	pp |= POWER_TARGET_ON;
2001 	if (!IS_GEN5(dev))
2002 		pp |= PANEL_POWER_RESET;
2003 
2004 	I915_WRITE(pp_ctrl_reg, pp);
2005 	POSTING_READ(pp_ctrl_reg);
2006 
2007 	wait_panel_on(intel_dp);
2008 	intel_dp->last_power_on = jiffies;
2009 
2010 	if (IS_GEN5(dev)) {
2011 		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2012 		I915_WRITE(pp_ctrl_reg, pp);
2013 		POSTING_READ(pp_ctrl_reg);
2014 	}
2015 }
2016 
2017 void intel_edp_panel_on(struct intel_dp *intel_dp)
2018 {
2019 	if (!is_edp(intel_dp))
2020 		return;
2021 
2022 	pps_lock(intel_dp);
2023 	edp_panel_on(intel_dp);
2024 	pps_unlock(intel_dp);
2025 }
2026 
2027 
2028 static void edp_panel_off(struct intel_dp *intel_dp)
2029 {
2030 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2031 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
2032 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2033 	struct drm_i915_private *dev_priv = dev->dev_private;
2034 	enum intel_display_power_domain power_domain;
2035 	u32 pp;
2036 	i915_reg_t pp_ctrl_reg;
2037 
2038 	lockdep_assert_held(&dev_priv->pps_mutex);
2039 
2040 	if (!is_edp(intel_dp))
2041 		return;
2042 
2043 	DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2044 		      port_name(dp_to_dig_port(intel_dp)->port));
2045 
2046 	WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2047 	     port_name(dp_to_dig_port(intel_dp)->port));
2048 
2049 	pp = ironlake_get_pp_control(intel_dp);
2050 	/* We need to switch off panel power _and_ force vdd, for otherwise some
2051 	 * panels get very unhappy and cease to work. */
2052 	pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2053 		EDP_BLC_ENABLE);
2054 
2055 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2056 
2057 	intel_dp->want_panel_vdd = false;
2058 
2059 	I915_WRITE(pp_ctrl_reg, pp);
2060 	POSTING_READ(pp_ctrl_reg);
2061 
2062 	intel_dp->panel_power_off_time = ktime_get_boottime();
2063 	wait_panel_off(intel_dp);
2064 
2065 	/* We got a reference when we enabled the VDD. */
2066 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
2067 	intel_display_power_put(dev_priv, power_domain);
2068 }
2069 
2070 void intel_edp_panel_off(struct intel_dp *intel_dp)
2071 {
2072 	if (!is_edp(intel_dp))
2073 		return;
2074 
2075 	pps_lock(intel_dp);
2076 	edp_panel_off(intel_dp);
2077 	pps_unlock(intel_dp);
2078 }
2079 
2080 /* Enable backlight in the panel power control. */
2081 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2082 {
2083 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2084 	struct drm_device *dev = intel_dig_port->base.base.dev;
2085 	struct drm_i915_private *dev_priv = dev->dev_private;
2086 	u32 pp;
2087 	i915_reg_t pp_ctrl_reg;
2088 
2089 	/*
2090 	 * If we enable the backlight right away following a panel power
2091 	 * on, we may see slight flicker as the panel syncs with the eDP
2092 	 * link.  So delay a bit to make sure the image is solid before
2093 	 * allowing it to appear.
2094 	 */
2095 	wait_backlight_on(intel_dp);
2096 
2097 	pps_lock(intel_dp);
2098 
2099 	pp = ironlake_get_pp_control(intel_dp);
2100 	pp |= EDP_BLC_ENABLE;
2101 
2102 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2103 
2104 	I915_WRITE(pp_ctrl_reg, pp);
2105 	POSTING_READ(pp_ctrl_reg);
2106 
2107 	pps_unlock(intel_dp);
2108 }
2109 
2110 /* Enable backlight PWM and backlight PP control. */
2111 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2112 {
2113 	if (!is_edp(intel_dp))
2114 		return;
2115 
2116 	DRM_DEBUG_KMS("\n");
2117 
2118 	intel_panel_enable_backlight(intel_dp->attached_connector);
2119 	_intel_edp_backlight_on(intel_dp);
2120 }
2121 
2122 /* Disable backlight in the panel power control. */
2123 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2124 {
2125 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2126 	struct drm_i915_private *dev_priv = dev->dev_private;
2127 	u32 pp;
2128 	i915_reg_t pp_ctrl_reg;
2129 
2130 	if (!is_edp(intel_dp))
2131 		return;
2132 
2133 	pps_lock(intel_dp);
2134 
2135 	pp = ironlake_get_pp_control(intel_dp);
2136 	pp &= ~EDP_BLC_ENABLE;
2137 
2138 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2139 
2140 	I915_WRITE(pp_ctrl_reg, pp);
2141 	POSTING_READ(pp_ctrl_reg);
2142 
2143 	pps_unlock(intel_dp);
2144 
2145 	intel_dp->last_backlight_off = jiffies;
2146 	edp_wait_backlight_off(intel_dp);
2147 }
2148 
2149 /* Disable backlight PP control and backlight PWM. */
2150 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2151 {
2152 	if (!is_edp(intel_dp))
2153 		return;
2154 
2155 	DRM_DEBUG_KMS("\n");
2156 
2157 	_intel_edp_backlight_off(intel_dp);
2158 	intel_panel_disable_backlight(intel_dp->attached_connector);
2159 }
2160 
2161 /*
2162  * Hook for controlling the panel power control backlight through the bl_power
2163  * sysfs attribute. Take care to handle multiple calls.
2164  */
2165 static void intel_edp_backlight_power(struct intel_connector *connector,
2166 				      bool enable)
2167 {
2168 	struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2169 	bool is_enabled;
2170 
2171 	pps_lock(intel_dp);
2172 	is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2173 	pps_unlock(intel_dp);
2174 
2175 	if (is_enabled == enable)
2176 		return;
2177 
2178 	DRM_DEBUG_KMS("panel power control backlight %s\n",
2179 		      enable ? "enable" : "disable");
2180 
2181 	if (enable)
2182 		_intel_edp_backlight_on(intel_dp);
2183 	else
2184 		_intel_edp_backlight_off(intel_dp);
2185 }
2186 
2187 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2188 {
2189 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2190 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2191 	bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2192 
2193 	I915_STATE_WARN(cur_state != state,
2194 			"DP port %c state assertion failure (expected %s, current %s)\n",
2195 			port_name(dig_port->port),
2196 			onoff(state), onoff(cur_state));
2197 }
2198 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2199 
2200 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2201 {
2202 	bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2203 
2204 	I915_STATE_WARN(cur_state != state,
2205 			"eDP PLL state assertion failure (expected %s, current %s)\n",
2206 			onoff(state), onoff(cur_state));
2207 }
2208 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2209 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2210 
2211 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2212 {
2213 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2214 	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2215 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2216 
2217 	assert_pipe_disabled(dev_priv, crtc->pipe);
2218 	assert_dp_port_disabled(intel_dp);
2219 	assert_edp_pll_disabled(dev_priv);
2220 
2221 	DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2222 		      crtc->config->port_clock);
2223 
2224 	intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2225 
2226 	if (crtc->config->port_clock == 162000)
2227 		intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2228 	else
2229 		intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2230 
2231 	I915_WRITE(DP_A, intel_dp->DP);
2232 	POSTING_READ(DP_A);
2233 	udelay(500);
2234 
2235 	/*
2236 	 * [DevILK] Work around required when enabling DP PLL
2237 	 * while a pipe is enabled going to FDI:
2238 	 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
2239 	 * 2. Program DP PLL enable
2240 	 */
2241 	if (IS_GEN5(dev_priv))
2242 		intel_wait_for_vblank_if_active(dev_priv->dev, !crtc->pipe);
2243 
2244 	intel_dp->DP |= DP_PLL_ENABLE;
2245 
2246 	I915_WRITE(DP_A, intel_dp->DP);
2247 	POSTING_READ(DP_A);
2248 	udelay(200);
2249 }
2250 
2251 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2252 {
2253 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2254 	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2255 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2256 
2257 	assert_pipe_disabled(dev_priv, crtc->pipe);
2258 	assert_dp_port_disabled(intel_dp);
2259 	assert_edp_pll_enabled(dev_priv);
2260 
2261 	DRM_DEBUG_KMS("disabling eDP PLL\n");
2262 
2263 	intel_dp->DP &= ~DP_PLL_ENABLE;
2264 
2265 	I915_WRITE(DP_A, intel_dp->DP);
2266 	POSTING_READ(DP_A);
2267 	udelay(200);
2268 }
2269 
2270 /* If the sink supports it, try to set the power state appropriately */
2271 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2272 {
2273 	int ret, i;
2274 
2275 	/* Should have a valid DPCD by this point */
2276 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2277 		return;
2278 
2279 	if (mode != DRM_MODE_DPMS_ON) {
2280 		ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2281 					 DP_SET_POWER_D3);
2282 	} else {
2283 		/*
2284 		 * When turning on, we need to retry for 1ms to give the sink
2285 		 * time to wake up.
2286 		 */
2287 		for (i = 0; i < 3; i++) {
2288 			ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2289 						 DP_SET_POWER_D0);
2290 			if (ret == 1)
2291 				break;
2292 			msleep(1);
2293 		}
2294 	}
2295 
2296 	if (ret != 1)
2297 		DRM_DEBUG_KMS("failed to %s sink power state\n",
2298 			      mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2299 }
2300 
2301 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2302 				  enum i915_pipe *pipe)
2303 {
2304 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2305 	enum port port = dp_to_dig_port(intel_dp)->port;
2306 	struct drm_device *dev = encoder->base.dev;
2307 	struct drm_i915_private *dev_priv = dev->dev_private;
2308 	enum intel_display_power_domain power_domain;
2309 	u32 tmp;
2310 	bool ret;
2311 
2312 	power_domain = intel_display_port_power_domain(encoder);
2313 	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
2314 		return false;
2315 
2316 	ret = false;
2317 
2318 	tmp = I915_READ(intel_dp->output_reg);
2319 
2320 	if (!(tmp & DP_PORT_EN))
2321 		goto out;
2322 
2323 	if (IS_GEN7(dev) && port == PORT_A) {
2324 		*pipe = PORT_TO_PIPE_CPT(tmp);
2325 	} else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2326 		enum i915_pipe p;
2327 
2328 		for_each_pipe(dev_priv, p) {
2329 			u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2330 			if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2331 				*pipe = p;
2332 				ret = true;
2333 
2334 				goto out;
2335 			}
2336 		}
2337 
2338 		DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2339 			      i915_mmio_reg_offset(intel_dp->output_reg));
2340 	} else if (IS_CHERRYVIEW(dev)) {
2341 		*pipe = DP_PORT_TO_PIPE_CHV(tmp);
2342 	} else {
2343 		*pipe = PORT_TO_PIPE(tmp);
2344 	}
2345 
2346 	ret = true;
2347 
2348 out:
2349 	intel_display_power_put(dev_priv, power_domain);
2350 
2351 	return ret;
2352 }
2353 
2354 static void intel_dp_get_config(struct intel_encoder *encoder,
2355 				struct intel_crtc_state *pipe_config)
2356 {
2357 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2358 	u32 tmp, flags = 0;
2359 	struct drm_device *dev = encoder->base.dev;
2360 	struct drm_i915_private *dev_priv = dev->dev_private;
2361 	enum port port = dp_to_dig_port(intel_dp)->port;
2362 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2363 
2364 	tmp = I915_READ(intel_dp->output_reg);
2365 
2366 	pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2367 
2368 	if (HAS_PCH_CPT(dev) && port != PORT_A) {
2369 		u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2370 
2371 		if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2372 			flags |= DRM_MODE_FLAG_PHSYNC;
2373 		else
2374 			flags |= DRM_MODE_FLAG_NHSYNC;
2375 
2376 		if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2377 			flags |= DRM_MODE_FLAG_PVSYNC;
2378 		else
2379 			flags |= DRM_MODE_FLAG_NVSYNC;
2380 	} else {
2381 		if (tmp & DP_SYNC_HS_HIGH)
2382 			flags |= DRM_MODE_FLAG_PHSYNC;
2383 		else
2384 			flags |= DRM_MODE_FLAG_NHSYNC;
2385 
2386 		if (tmp & DP_SYNC_VS_HIGH)
2387 			flags |= DRM_MODE_FLAG_PVSYNC;
2388 		else
2389 			flags |= DRM_MODE_FLAG_NVSYNC;
2390 	}
2391 
2392 	pipe_config->base.adjusted_mode.flags |= flags;
2393 
2394 	if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2395 	    !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
2396 		pipe_config->limited_color_range = true;
2397 
2398 	pipe_config->has_dp_encoder = true;
2399 
2400 	pipe_config->lane_count =
2401 		((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2402 
2403 	intel_dp_get_m_n(crtc, pipe_config);
2404 
2405 	if (port == PORT_A) {
2406 		if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
2407 			pipe_config->port_clock = 162000;
2408 		else
2409 			pipe_config->port_clock = 270000;
2410 	}
2411 
2412 	pipe_config->base.adjusted_mode.crtc_clock =
2413 		intel_dotclock_calculate(pipe_config->port_clock,
2414 					 &pipe_config->dp_m_n);
2415 
2416 	if (is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
2417 	    pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
2418 		/*
2419 		 * This is a big fat ugly hack.
2420 		 *
2421 		 * Some machines in UEFI boot mode provide us a VBT that has 18
2422 		 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2423 		 * unknown we fail to light up. Yet the same BIOS boots up with
2424 		 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2425 		 * max, not what it tells us to use.
2426 		 *
2427 		 * Note: This will still be broken if the eDP panel is not lit
2428 		 * up by the BIOS, and thus we can't get the mode at module
2429 		 * load.
2430 		 */
2431 		DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2432 			      pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
2433 		dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
2434 	}
2435 }
2436 
2437 static void intel_disable_dp(struct intel_encoder *encoder)
2438 {
2439 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2440 	struct drm_device *dev = encoder->base.dev;
2441 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2442 
2443 	if (crtc->config->has_audio)
2444 		intel_audio_codec_disable(encoder);
2445 
2446 	if (HAS_PSR(dev) && !HAS_DDI(dev))
2447 		intel_psr_disable(intel_dp);
2448 
2449 	/* Make sure the panel is off before trying to change the mode. But also
2450 	 * ensure that we have vdd while we switch off the panel. */
2451 	intel_edp_panel_vdd_on(intel_dp);
2452 	intel_edp_backlight_off(intel_dp);
2453 	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2454 	intel_edp_panel_off(intel_dp);
2455 
2456 	/* disable the port before the pipe on g4x */
2457 	if (INTEL_INFO(dev)->gen < 5)
2458 		intel_dp_link_down(intel_dp);
2459 }
2460 
2461 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2462 {
2463 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2464 	enum port port = dp_to_dig_port(intel_dp)->port;
2465 
2466 	intel_dp_link_down(intel_dp);
2467 
2468 	/* Only ilk+ has port A */
2469 	if (port == PORT_A)
2470 		ironlake_edp_pll_off(intel_dp);
2471 }
2472 
2473 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2474 {
2475 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2476 
2477 	intel_dp_link_down(intel_dp);
2478 }
2479 
2480 static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2481 				     bool reset)
2482 {
2483 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2484 	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2485 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2486 	enum i915_pipe pipe = crtc->pipe;
2487 	uint32_t val;
2488 
2489 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2490 	if (reset)
2491 		val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2492 	else
2493 		val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2494 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2495 
2496 	if (crtc->config->lane_count > 2) {
2497 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2498 		if (reset)
2499 			val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2500 		else
2501 			val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2502 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2503 	}
2504 
2505 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2506 	val |= CHV_PCS_REQ_SOFTRESET_EN;
2507 	if (reset)
2508 		val &= ~DPIO_PCS_CLK_SOFT_RESET;
2509 	else
2510 		val |= DPIO_PCS_CLK_SOFT_RESET;
2511 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2512 
2513 	if (crtc->config->lane_count > 2) {
2514 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2515 		val |= CHV_PCS_REQ_SOFTRESET_EN;
2516 		if (reset)
2517 			val &= ~DPIO_PCS_CLK_SOFT_RESET;
2518 		else
2519 			val |= DPIO_PCS_CLK_SOFT_RESET;
2520 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2521 	}
2522 }
2523 
2524 static void chv_post_disable_dp(struct intel_encoder *encoder)
2525 {
2526 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2527 	struct drm_device *dev = encoder->base.dev;
2528 	struct drm_i915_private *dev_priv = dev->dev_private;
2529 
2530 	intel_dp_link_down(intel_dp);
2531 
2532 	mutex_lock(&dev_priv->sb_lock);
2533 
2534 	/* Assert data lane reset */
2535 	chv_data_lane_soft_reset(encoder, true);
2536 
2537 	mutex_unlock(&dev_priv->sb_lock);
2538 }
2539 
2540 static void
2541 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2542 			 uint32_t *DP,
2543 			 uint8_t dp_train_pat)
2544 {
2545 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2546 	struct drm_device *dev = intel_dig_port->base.base.dev;
2547 	struct drm_i915_private *dev_priv = dev->dev_private;
2548 	enum port port = intel_dig_port->port;
2549 
2550 	if (HAS_DDI(dev)) {
2551 		uint32_t temp = I915_READ(DP_TP_CTL(port));
2552 
2553 		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2554 			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2555 		else
2556 			temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2557 
2558 		temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2559 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2560 		case DP_TRAINING_PATTERN_DISABLE:
2561 			temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2562 
2563 			break;
2564 		case DP_TRAINING_PATTERN_1:
2565 			temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2566 			break;
2567 		case DP_TRAINING_PATTERN_2:
2568 			temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2569 			break;
2570 		case DP_TRAINING_PATTERN_3:
2571 			temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2572 			break;
2573 		}
2574 		I915_WRITE(DP_TP_CTL(port), temp);
2575 
2576 	} else if ((IS_GEN7(dev) && port == PORT_A) ||
2577 		   (HAS_PCH_CPT(dev) && port != PORT_A)) {
2578 		*DP &= ~DP_LINK_TRAIN_MASK_CPT;
2579 
2580 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2581 		case DP_TRAINING_PATTERN_DISABLE:
2582 			*DP |= DP_LINK_TRAIN_OFF_CPT;
2583 			break;
2584 		case DP_TRAINING_PATTERN_1:
2585 			*DP |= DP_LINK_TRAIN_PAT_1_CPT;
2586 			break;
2587 		case DP_TRAINING_PATTERN_2:
2588 			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2589 			break;
2590 		case DP_TRAINING_PATTERN_3:
2591 			DRM_ERROR("DP training pattern 3 not supported\n");
2592 			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2593 			break;
2594 		}
2595 
2596 	} else {
2597 		if (IS_CHERRYVIEW(dev))
2598 			*DP &= ~DP_LINK_TRAIN_MASK_CHV;
2599 		else
2600 			*DP &= ~DP_LINK_TRAIN_MASK;
2601 
2602 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2603 		case DP_TRAINING_PATTERN_DISABLE:
2604 			*DP |= DP_LINK_TRAIN_OFF;
2605 			break;
2606 		case DP_TRAINING_PATTERN_1:
2607 			*DP |= DP_LINK_TRAIN_PAT_1;
2608 			break;
2609 		case DP_TRAINING_PATTERN_2:
2610 			*DP |= DP_LINK_TRAIN_PAT_2;
2611 			break;
2612 		case DP_TRAINING_PATTERN_3:
2613 			if (IS_CHERRYVIEW(dev)) {
2614 				*DP |= DP_LINK_TRAIN_PAT_3_CHV;
2615 			} else {
2616 				DRM_ERROR("DP training pattern 3 not supported\n");
2617 				*DP |= DP_LINK_TRAIN_PAT_2;
2618 			}
2619 			break;
2620 		}
2621 	}
2622 }
2623 
2624 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2625 {
2626 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2627 	struct drm_i915_private *dev_priv = dev->dev_private;
2628 	struct intel_crtc *crtc =
2629 		to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
2630 
2631 	/* enable with pattern 1 (as per spec) */
2632 	_intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2633 				 DP_TRAINING_PATTERN_1);
2634 
2635 	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2636 	POSTING_READ(intel_dp->output_reg);
2637 
2638 	/*
2639 	 * Magic for VLV/CHV. We _must_ first set up the register
2640 	 * without actually enabling the port, and then do another
2641 	 * write to enable the port. Otherwise link training will
2642 	 * fail when the power sequencer is freshly used for this port.
2643 	 */
2644 	intel_dp->DP |= DP_PORT_EN;
2645 	if (crtc->config->has_audio)
2646 		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
2647 
2648 	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2649 	POSTING_READ(intel_dp->output_reg);
2650 }
2651 
2652 static void intel_enable_dp(struct intel_encoder *encoder)
2653 {
2654 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2655 	struct drm_device *dev = encoder->base.dev;
2656 	struct drm_i915_private *dev_priv = dev->dev_private;
2657 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2658 	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2659 	enum i915_pipe pipe = crtc->pipe;
2660 
2661 	if (WARN_ON(dp_reg & DP_PORT_EN))
2662 		return;
2663 
2664 	pps_lock(intel_dp);
2665 
2666 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
2667 		vlv_init_panel_power_sequencer(intel_dp);
2668 
2669 	intel_dp_enable_port(intel_dp);
2670 
2671 	edp_panel_vdd_on(intel_dp);
2672 	edp_panel_on(intel_dp);
2673 	edp_panel_vdd_off(intel_dp, true);
2674 
2675 	pps_unlock(intel_dp);
2676 
2677 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
2678 		unsigned int lane_mask = 0x0;
2679 
2680 		if (IS_CHERRYVIEW(dev))
2681 			lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2682 
2683 		vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2684 				    lane_mask);
2685 	}
2686 
2687 	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2688 	intel_dp_start_link_train(intel_dp);
2689 	intel_dp_stop_link_train(intel_dp);
2690 
2691 	if (crtc->config->has_audio) {
2692 		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2693 				 pipe_name(pipe));
2694 		intel_audio_codec_enable(encoder);
2695 	}
2696 }
2697 
2698 static void g4x_enable_dp(struct intel_encoder *encoder)
2699 {
2700 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2701 
2702 	intel_enable_dp(encoder);
2703 	intel_edp_backlight_on(intel_dp);
2704 }
2705 
2706 static void vlv_enable_dp(struct intel_encoder *encoder)
2707 {
2708 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2709 
2710 	intel_edp_backlight_on(intel_dp);
2711 	intel_psr_enable(intel_dp);
2712 }
2713 
2714 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2715 {
2716 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2717 	enum port port = dp_to_dig_port(intel_dp)->port;
2718 
2719 	intel_dp_prepare(encoder);
2720 
2721 	/* Only ilk+ has port A */
2722 	if (port == PORT_A)
2723 		ironlake_edp_pll_on(intel_dp);
2724 }
2725 
2726 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2727 {
2728 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2729 	struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2730 	enum i915_pipe pipe = intel_dp->pps_pipe;
2731 	i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2732 
2733 	edp_panel_vdd_off_sync(intel_dp);
2734 
2735 	/*
2736 	 * VLV seems to get confused when multiple power seqeuencers
2737 	 * have the same port selected (even if only one has power/vdd
2738 	 * enabled). The failure manifests as vlv_wait_port_ready() failing
2739 	 * CHV on the other hand doesn't seem to mind having the same port
2740 	 * selected in multiple power seqeuencers, but let's clear the
2741 	 * port select always when logically disconnecting a power sequencer
2742 	 * from a port.
2743 	 */
2744 	DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2745 		      pipe_name(pipe), port_name(intel_dig_port->port));
2746 	I915_WRITE(pp_on_reg, 0);
2747 	POSTING_READ(pp_on_reg);
2748 
2749 	intel_dp->pps_pipe = INVALID_PIPE;
2750 }
2751 
2752 static void vlv_steal_power_sequencer(struct drm_device *dev,
2753 				      enum i915_pipe pipe)
2754 {
2755 	struct drm_i915_private *dev_priv = dev->dev_private;
2756 	struct intel_encoder *encoder;
2757 
2758 	lockdep_assert_held(&dev_priv->pps_mutex);
2759 
2760 	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2761 		return;
2762 
2763 	for_each_intel_encoder(dev, encoder) {
2764 		struct intel_dp *intel_dp;
2765 		enum port port;
2766 
2767 		if (encoder->type != INTEL_OUTPUT_EDP)
2768 			continue;
2769 
2770 		intel_dp = enc_to_intel_dp(&encoder->base);
2771 		port = dp_to_dig_port(intel_dp)->port;
2772 
2773 		if (intel_dp->pps_pipe != pipe)
2774 			continue;
2775 
2776 		DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2777 			      pipe_name(pipe), port_name(port));
2778 
2779 		WARN(encoder->base.crtc,
2780 		     "stealing pipe %c power sequencer from active eDP port %c\n",
2781 		     pipe_name(pipe), port_name(port));
2782 
2783 		/* make sure vdd is off before we steal it */
2784 		vlv_detach_power_sequencer(intel_dp);
2785 	}
2786 }
2787 
2788 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2789 {
2790 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2791 	struct intel_encoder *encoder = &intel_dig_port->base;
2792 	struct drm_device *dev = encoder->base.dev;
2793 	struct drm_i915_private *dev_priv = dev->dev_private;
2794 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2795 
2796 	lockdep_assert_held(&dev_priv->pps_mutex);
2797 
2798 	if (!is_edp(intel_dp))
2799 		return;
2800 
2801 	if (intel_dp->pps_pipe == crtc->pipe)
2802 		return;
2803 
2804 	/*
2805 	 * If another power sequencer was being used on this
2806 	 * port previously make sure to turn off vdd there while
2807 	 * we still have control of it.
2808 	 */
2809 	if (intel_dp->pps_pipe != INVALID_PIPE)
2810 		vlv_detach_power_sequencer(intel_dp);
2811 
2812 	/*
2813 	 * We may be stealing the power
2814 	 * sequencer from another port.
2815 	 */
2816 	vlv_steal_power_sequencer(dev, crtc->pipe);
2817 
2818 	/* now it's all ours */
2819 	intel_dp->pps_pipe = crtc->pipe;
2820 
2821 	DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2822 		      pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2823 
2824 	/* init power sequencer on this pipe and port */
2825 	intel_dp_init_panel_power_sequencer(dev, intel_dp);
2826 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2827 }
2828 
2829 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2830 {
2831 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2832 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2833 	struct drm_device *dev = encoder->base.dev;
2834 	struct drm_i915_private *dev_priv = dev->dev_private;
2835 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2836 	enum dpio_channel port = vlv_dport_to_channel(dport);
2837 	int pipe = intel_crtc->pipe;
2838 	u32 val;
2839 
2840 	mutex_lock(&dev_priv->sb_lock);
2841 
2842 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2843 	val = 0;
2844 	if (pipe)
2845 		val |= (1<<21);
2846 	else
2847 		val &= ~(1<<21);
2848 	val |= 0x001000c4;
2849 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2850 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2851 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2852 
2853 	mutex_unlock(&dev_priv->sb_lock);
2854 
2855 	intel_enable_dp(encoder);
2856 }
2857 
2858 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2859 {
2860 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2861 	struct drm_device *dev = encoder->base.dev;
2862 	struct drm_i915_private *dev_priv = dev->dev_private;
2863 	struct intel_crtc *intel_crtc =
2864 		to_intel_crtc(encoder->base.crtc);
2865 	enum dpio_channel port = vlv_dport_to_channel(dport);
2866 	int pipe = intel_crtc->pipe;
2867 
2868 	intel_dp_prepare(encoder);
2869 
2870 	/* Program Tx lane resets to default */
2871 	mutex_lock(&dev_priv->sb_lock);
2872 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2873 			 DPIO_PCS_TX_LANE2_RESET |
2874 			 DPIO_PCS_TX_LANE1_RESET);
2875 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2876 			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2877 			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2878 			 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2879 				 DPIO_PCS_CLK_SOFT_RESET);
2880 
2881 	/* Fix up inter-pair skew failure */
2882 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2883 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2884 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2885 	mutex_unlock(&dev_priv->sb_lock);
2886 }
2887 
2888 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2889 {
2890 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2891 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2892 	struct drm_device *dev = encoder->base.dev;
2893 	struct drm_i915_private *dev_priv = dev->dev_private;
2894 	struct intel_crtc *intel_crtc =
2895 		to_intel_crtc(encoder->base.crtc);
2896 	enum dpio_channel ch = vlv_dport_to_channel(dport);
2897 	int pipe = intel_crtc->pipe;
2898 	int data, i, stagger;
2899 	u32 val;
2900 
2901 	mutex_lock(&dev_priv->sb_lock);
2902 
2903 	/* allow hardware to manage TX FIFO reset source */
2904 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2905 	val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2906 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2907 
2908 	if (intel_crtc->config->lane_count > 2) {
2909 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2910 		val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2911 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2912 	}
2913 
2914 	/* Program Tx lane latency optimal setting*/
2915 	for (i = 0; i < intel_crtc->config->lane_count; i++) {
2916 		/* Set the upar bit */
2917 		if (intel_crtc->config->lane_count == 1)
2918 			data = 0x0;
2919 		else
2920 			data = (i == 1) ? 0x0 : 0x1;
2921 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2922 				data << DPIO_UPAR_SHIFT);
2923 	}
2924 
2925 	/* Data lane stagger programming */
2926 	if (intel_crtc->config->port_clock > 270000)
2927 		stagger = 0x18;
2928 	else if (intel_crtc->config->port_clock > 135000)
2929 		stagger = 0xd;
2930 	else if (intel_crtc->config->port_clock > 67500)
2931 		stagger = 0x7;
2932 	else if (intel_crtc->config->port_clock > 33750)
2933 		stagger = 0x4;
2934 	else
2935 		stagger = 0x2;
2936 
2937 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2938 	val |= DPIO_TX2_STAGGER_MASK(0x1f);
2939 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2940 
2941 	if (intel_crtc->config->lane_count > 2) {
2942 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2943 		val |= DPIO_TX2_STAGGER_MASK(0x1f);
2944 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2945 	}
2946 
2947 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2948 		       DPIO_LANESTAGGER_STRAP(stagger) |
2949 		       DPIO_LANESTAGGER_STRAP_OVRD |
2950 		       DPIO_TX1_STAGGER_MASK(0x1f) |
2951 		       DPIO_TX1_STAGGER_MULT(6) |
2952 		       DPIO_TX2_STAGGER_MULT(0));
2953 
2954 	if (intel_crtc->config->lane_count > 2) {
2955 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2956 			       DPIO_LANESTAGGER_STRAP(stagger) |
2957 			       DPIO_LANESTAGGER_STRAP_OVRD |
2958 			       DPIO_TX1_STAGGER_MASK(0x1f) |
2959 			       DPIO_TX1_STAGGER_MULT(7) |
2960 			       DPIO_TX2_STAGGER_MULT(5));
2961 	}
2962 
2963 	/* Deassert data lane reset */
2964 	chv_data_lane_soft_reset(encoder, false);
2965 
2966 	mutex_unlock(&dev_priv->sb_lock);
2967 
2968 	intel_enable_dp(encoder);
2969 
2970 	/* Second common lane will stay alive on its own now */
2971 	if (dport->release_cl2_override) {
2972 		chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2973 		dport->release_cl2_override = false;
2974 	}
2975 }
2976 
2977 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2978 {
2979 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2980 	struct drm_device *dev = encoder->base.dev;
2981 	struct drm_i915_private *dev_priv = dev->dev_private;
2982 	struct intel_crtc *intel_crtc =
2983 		to_intel_crtc(encoder->base.crtc);
2984 	enum dpio_channel ch = vlv_dport_to_channel(dport);
2985 	enum i915_pipe pipe = intel_crtc->pipe;
2986 	unsigned int lane_mask =
2987 		intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
2988 	u32 val;
2989 
2990 	intel_dp_prepare(encoder);
2991 
2992 	/*
2993 	 * Must trick the second common lane into life.
2994 	 * Otherwise we can't even access the PLL.
2995 	 */
2996 	if (ch == DPIO_CH0 && pipe == PIPE_B)
2997 		dport->release_cl2_override =
2998 			!chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
2999 
3000 	chv_phy_powergate_lanes(encoder, true, lane_mask);
3001 
3002 	mutex_lock(&dev_priv->sb_lock);
3003 
3004 	/* Assert data lane reset */
3005 	chv_data_lane_soft_reset(encoder, true);
3006 
3007 	/* program left/right clock distribution */
3008 	if (pipe != PIPE_B) {
3009 		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3010 		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3011 		if (ch == DPIO_CH0)
3012 			val |= CHV_BUFLEFTENA1_FORCE;
3013 		if (ch == DPIO_CH1)
3014 			val |= CHV_BUFRIGHTENA1_FORCE;
3015 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3016 	} else {
3017 		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3018 		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3019 		if (ch == DPIO_CH0)
3020 			val |= CHV_BUFLEFTENA2_FORCE;
3021 		if (ch == DPIO_CH1)
3022 			val |= CHV_BUFRIGHTENA2_FORCE;
3023 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3024 	}
3025 
3026 	/* program clock channel usage */
3027 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3028 	val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3029 	if (pipe != PIPE_B)
3030 		val &= ~CHV_PCS_USEDCLKCHANNEL;
3031 	else
3032 		val |= CHV_PCS_USEDCLKCHANNEL;
3033 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3034 
3035 	if (intel_crtc->config->lane_count > 2) {
3036 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3037 		val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3038 		if (pipe != PIPE_B)
3039 			val &= ~CHV_PCS_USEDCLKCHANNEL;
3040 		else
3041 			val |= CHV_PCS_USEDCLKCHANNEL;
3042 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3043 	}
3044 
3045 	/*
3046 	 * This a a bit weird since generally CL
3047 	 * matches the pipe, but here we need to
3048 	 * pick the CL based on the port.
3049 	 */
3050 	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3051 	if (pipe != PIPE_B)
3052 		val &= ~CHV_CMN_USEDCLKCHANNEL;
3053 	else
3054 		val |= CHV_CMN_USEDCLKCHANNEL;
3055 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3056 
3057 	mutex_unlock(&dev_priv->sb_lock);
3058 }
3059 
3060 static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3061 {
3062 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3063 	enum i915_pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3064 	u32 val;
3065 
3066 	mutex_lock(&dev_priv->sb_lock);
3067 
3068 	/* disable left/right clock distribution */
3069 	if (pipe != PIPE_B) {
3070 		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3071 		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3072 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3073 	} else {
3074 		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3075 		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3076 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3077 	}
3078 
3079 	mutex_unlock(&dev_priv->sb_lock);
3080 
3081 	/*
3082 	 * Leave the power down bit cleared for at least one
3083 	 * lane so that chv_powergate_phy_ch() will power
3084 	 * on something when the channel is otherwise unused.
3085 	 * When the port is off and the override is removed
3086 	 * the lanes power down anyway, so otherwise it doesn't
3087 	 * really matter what the state of power down bits is
3088 	 * after this.
3089 	 */
3090 	chv_phy_powergate_lanes(encoder, false, 0x0);
3091 }
3092 
3093 /*
3094  * Fetch AUX CH registers 0x202 - 0x207 which contain
3095  * link status information
3096  */
3097 bool
3098 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3099 {
3100 	return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
3101 				DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3102 }
3103 
3104 /* These are source-specific values. */
3105 uint8_t
3106 intel_dp_voltage_max(struct intel_dp *intel_dp)
3107 {
3108 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3109 	struct drm_i915_private *dev_priv = dev->dev_private;
3110 	enum port port = dp_to_dig_port(intel_dp)->port;
3111 
3112 	if (IS_BROXTON(dev))
3113 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3114 	else if (INTEL_INFO(dev)->gen >= 9) {
3115 		if (dev_priv->vbt.edp.low_vswing && port == PORT_A)
3116 			return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3117 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3118 	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
3119 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3120 	else if (IS_GEN7(dev) && port == PORT_A)
3121 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3122 	else if (HAS_PCH_CPT(dev) && port != PORT_A)
3123 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3124 	else
3125 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3126 }
3127 
3128 uint8_t
3129 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3130 {
3131 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3132 	enum port port = dp_to_dig_port(intel_dp)->port;
3133 
3134 	if (INTEL_INFO(dev)->gen >= 9) {
3135 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3136 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3137 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3138 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3139 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3140 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3141 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3142 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3143 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3144 		default:
3145 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3146 		}
3147 	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3148 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3149 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3150 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3151 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3152 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3153 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3154 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3155 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3156 		default:
3157 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3158 		}
3159 	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
3160 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3161 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3162 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3163 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3164 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3165 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3166 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3167 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3168 		default:
3169 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3170 		}
3171 	} else if (IS_GEN7(dev) && port == PORT_A) {
3172 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3173 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3174 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3175 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3176 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3177 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3178 		default:
3179 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3180 		}
3181 	} else {
3182 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3183 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3184 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3185 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3186 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3187 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3188 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3189 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3190 		default:
3191 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3192 		}
3193 	}
3194 }
3195 
3196 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3197 {
3198 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3199 	struct drm_i915_private *dev_priv = dev->dev_private;
3200 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3201 	struct intel_crtc *intel_crtc =
3202 		to_intel_crtc(dport->base.base.crtc);
3203 	unsigned long demph_reg_value, preemph_reg_value,
3204 		uniqtranscale_reg_value;
3205 	uint8_t train_set = intel_dp->train_set[0];
3206 	enum dpio_channel port = vlv_dport_to_channel(dport);
3207 	int pipe = intel_crtc->pipe;
3208 
3209 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3210 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3211 		preemph_reg_value = 0x0004000;
3212 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3213 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3214 			demph_reg_value = 0x2B405555;
3215 			uniqtranscale_reg_value = 0x552AB83A;
3216 			break;
3217 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3218 			demph_reg_value = 0x2B404040;
3219 			uniqtranscale_reg_value = 0x5548B83A;
3220 			break;
3221 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3222 			demph_reg_value = 0x2B245555;
3223 			uniqtranscale_reg_value = 0x5560B83A;
3224 			break;
3225 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3226 			demph_reg_value = 0x2B405555;
3227 			uniqtranscale_reg_value = 0x5598DA3A;
3228 			break;
3229 		default:
3230 			return 0;
3231 		}
3232 		break;
3233 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3234 		preemph_reg_value = 0x0002000;
3235 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3236 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3237 			demph_reg_value = 0x2B404040;
3238 			uniqtranscale_reg_value = 0x5552B83A;
3239 			break;
3240 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3241 			demph_reg_value = 0x2B404848;
3242 			uniqtranscale_reg_value = 0x5580B83A;
3243 			break;
3244 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3245 			demph_reg_value = 0x2B404040;
3246 			uniqtranscale_reg_value = 0x55ADDA3A;
3247 			break;
3248 		default:
3249 			return 0;
3250 		}
3251 		break;
3252 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3253 		preemph_reg_value = 0x0000000;
3254 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3255 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3256 			demph_reg_value = 0x2B305555;
3257 			uniqtranscale_reg_value = 0x5570B83A;
3258 			break;
3259 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3260 			demph_reg_value = 0x2B2B4040;
3261 			uniqtranscale_reg_value = 0x55ADDA3A;
3262 			break;
3263 		default:
3264 			return 0;
3265 		}
3266 		break;
3267 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3268 		preemph_reg_value = 0x0006000;
3269 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3270 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3271 			demph_reg_value = 0x1B405555;
3272 			uniqtranscale_reg_value = 0x55ADDA3A;
3273 			break;
3274 		default:
3275 			return 0;
3276 		}
3277 		break;
3278 	default:
3279 		return 0;
3280 	}
3281 
3282 	mutex_lock(&dev_priv->sb_lock);
3283 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3284 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3285 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3286 			 uniqtranscale_reg_value);
3287 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3288 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3289 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3290 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3291 	mutex_unlock(&dev_priv->sb_lock);
3292 
3293 	return 0;
3294 }
3295 
3296 static bool chv_need_uniq_trans_scale(uint8_t train_set)
3297 {
3298 	return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3299 		(train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3300 }
3301 
3302 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3303 {
3304 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3305 	struct drm_i915_private *dev_priv = dev->dev_private;
3306 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3307 	struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3308 	u32 deemph_reg_value, margin_reg_value, val;
3309 	uint8_t train_set = intel_dp->train_set[0];
3310 	enum dpio_channel ch = vlv_dport_to_channel(dport);
3311 	enum i915_pipe pipe = intel_crtc->pipe;
3312 	int i;
3313 
3314 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3315 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3316 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3317 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3318 			deemph_reg_value = 128;
3319 			margin_reg_value = 52;
3320 			break;
3321 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3322 			deemph_reg_value = 128;
3323 			margin_reg_value = 77;
3324 			break;
3325 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3326 			deemph_reg_value = 128;
3327 			margin_reg_value = 102;
3328 			break;
3329 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3330 			deemph_reg_value = 128;
3331 			margin_reg_value = 154;
3332 			/* FIXME extra to set for 1200 */
3333 			break;
3334 		default:
3335 			return 0;
3336 		}
3337 		break;
3338 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3339 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3340 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3341 			deemph_reg_value = 85;
3342 			margin_reg_value = 78;
3343 			break;
3344 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3345 			deemph_reg_value = 85;
3346 			margin_reg_value = 116;
3347 			break;
3348 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3349 			deemph_reg_value = 85;
3350 			margin_reg_value = 154;
3351 			break;
3352 		default:
3353 			return 0;
3354 		}
3355 		break;
3356 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3357 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3358 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3359 			deemph_reg_value = 64;
3360 			margin_reg_value = 104;
3361 			break;
3362 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3363 			deemph_reg_value = 64;
3364 			margin_reg_value = 154;
3365 			break;
3366 		default:
3367 			return 0;
3368 		}
3369 		break;
3370 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3371 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3372 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3373 			deemph_reg_value = 43;
3374 			margin_reg_value = 154;
3375 			break;
3376 		default:
3377 			return 0;
3378 		}
3379 		break;
3380 	default:
3381 		return 0;
3382 	}
3383 
3384 	mutex_lock(&dev_priv->sb_lock);
3385 
3386 	/* Clear calc init */
3387 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3388 	val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3389 	val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3390 	val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3391 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3392 
3393 	if (intel_crtc->config->lane_count > 2) {
3394 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3395 		val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3396 		val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3397 		val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3398 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3399 	}
3400 
3401 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3402 	val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3403 	val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3404 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3405 
3406 	if (intel_crtc->config->lane_count > 2) {
3407 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3408 		val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3409 		val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3410 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3411 	}
3412 
3413 	/* Program swing deemph */
3414 	for (i = 0; i < intel_crtc->config->lane_count; i++) {
3415 		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3416 		val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3417 		val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3418 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3419 	}
3420 
3421 	/* Program swing margin */
3422 	for (i = 0; i < intel_crtc->config->lane_count; i++) {
3423 		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3424 
3425 		val &= ~DPIO_SWING_MARGIN000_MASK;
3426 		val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3427 
3428 		/*
3429 		 * Supposedly this value shouldn't matter when unique transition
3430 		 * scale is disabled, but in fact it does matter. Let's just
3431 		 * always program the same value and hope it's OK.
3432 		 */
3433 		val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3434 		val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3435 
3436 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3437 	}
3438 
3439 	/*
3440 	 * The document said it needs to set bit 27 for ch0 and bit 26
3441 	 * for ch1. Might be a typo in the doc.
3442 	 * For now, for this unique transition scale selection, set bit
3443 	 * 27 for ch0 and ch1.
3444 	 */
3445 	for (i = 0; i < intel_crtc->config->lane_count; i++) {
3446 		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3447 		if (chv_need_uniq_trans_scale(train_set))
3448 			val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3449 		else
3450 			val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3451 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3452 	}
3453 
3454 	/* Start swing calculation */
3455 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3456 	val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3457 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3458 
3459 	if (intel_crtc->config->lane_count > 2) {
3460 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3461 		val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3462 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3463 	}
3464 
3465 	mutex_unlock(&dev_priv->sb_lock);
3466 
3467 	return 0;
3468 }
3469 
3470 static uint32_t
3471 gen4_signal_levels(uint8_t train_set)
3472 {
3473 	uint32_t	signal_levels = 0;
3474 
3475 	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3476 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3477 	default:
3478 		signal_levels |= DP_VOLTAGE_0_4;
3479 		break;
3480 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3481 		signal_levels |= DP_VOLTAGE_0_6;
3482 		break;
3483 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3484 		signal_levels |= DP_VOLTAGE_0_8;
3485 		break;
3486 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3487 		signal_levels |= DP_VOLTAGE_1_2;
3488 		break;
3489 	}
3490 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3491 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3492 	default:
3493 		signal_levels |= DP_PRE_EMPHASIS_0;
3494 		break;
3495 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3496 		signal_levels |= DP_PRE_EMPHASIS_3_5;
3497 		break;
3498 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3499 		signal_levels |= DP_PRE_EMPHASIS_6;
3500 		break;
3501 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3502 		signal_levels |= DP_PRE_EMPHASIS_9_5;
3503 		break;
3504 	}
3505 	return signal_levels;
3506 }
3507 
3508 /* Gen6's DP voltage swing and pre-emphasis control */
3509 static uint32_t
3510 gen6_edp_signal_levels(uint8_t train_set)
3511 {
3512 	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3513 					 DP_TRAIN_PRE_EMPHASIS_MASK);
3514 	switch (signal_levels) {
3515 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3516 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3517 		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3518 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3519 		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3520 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3521 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3522 		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3523 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3524 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3525 		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3526 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3527 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3528 		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3529 	default:
3530 		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3531 			      "0x%x\n", signal_levels);
3532 		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3533 	}
3534 }
3535 
3536 /* Gen7's DP voltage swing and pre-emphasis control */
3537 static uint32_t
3538 gen7_edp_signal_levels(uint8_t train_set)
3539 {
3540 	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3541 					 DP_TRAIN_PRE_EMPHASIS_MASK);
3542 	switch (signal_levels) {
3543 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3544 		return EDP_LINK_TRAIN_400MV_0DB_IVB;
3545 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3546 		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3547 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3548 		return EDP_LINK_TRAIN_400MV_6DB_IVB;
3549 
3550 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3551 		return EDP_LINK_TRAIN_600MV_0DB_IVB;
3552 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3553 		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3554 
3555 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3556 		return EDP_LINK_TRAIN_800MV_0DB_IVB;
3557 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3558 		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3559 
3560 	default:
3561 		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3562 			      "0x%x\n", signal_levels);
3563 		return EDP_LINK_TRAIN_500MV_0DB_IVB;
3564 	}
3565 }
3566 
3567 void
3568 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3569 {
3570 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3571 	enum port port = intel_dig_port->port;
3572 	struct drm_device *dev = intel_dig_port->base.base.dev;
3573 	struct drm_i915_private *dev_priv = to_i915(dev);
3574 	uint32_t signal_levels, mask = 0;
3575 	uint8_t train_set = intel_dp->train_set[0];
3576 
3577 	if (HAS_DDI(dev)) {
3578 		signal_levels = ddi_signal_levels(intel_dp);
3579 
3580 		if (IS_BROXTON(dev))
3581 			signal_levels = 0;
3582 		else
3583 			mask = DDI_BUF_EMP_MASK;
3584 	} else if (IS_CHERRYVIEW(dev)) {
3585 		signal_levels = chv_signal_levels(intel_dp);
3586 	} else if (IS_VALLEYVIEW(dev)) {
3587 		signal_levels = vlv_signal_levels(intel_dp);
3588 	} else if (IS_GEN7(dev) && port == PORT_A) {
3589 		signal_levels = gen7_edp_signal_levels(train_set);
3590 		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3591 	} else if (IS_GEN6(dev) && port == PORT_A) {
3592 		signal_levels = gen6_edp_signal_levels(train_set);
3593 		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3594 	} else {
3595 		signal_levels = gen4_signal_levels(train_set);
3596 		mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3597 	}
3598 
3599 	if (mask)
3600 		DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3601 
3602 	DRM_DEBUG_KMS("Using vswing level %d\n",
3603 		train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3604 	DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3605 		(train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3606 			DP_TRAIN_PRE_EMPHASIS_SHIFT);
3607 
3608 	intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3609 
3610 	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3611 	POSTING_READ(intel_dp->output_reg);
3612 }
3613 
3614 void
3615 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3616 				       uint8_t dp_train_pat)
3617 {
3618 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3619 	struct drm_i915_private *dev_priv =
3620 		to_i915(intel_dig_port->base.base.dev);
3621 
3622 	_intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3623 
3624 	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3625 	POSTING_READ(intel_dp->output_reg);
3626 }
3627 
3628 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3629 {
3630 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3631 	struct drm_device *dev = intel_dig_port->base.base.dev;
3632 	struct drm_i915_private *dev_priv = dev->dev_private;
3633 	enum port port = intel_dig_port->port;
3634 	uint32_t val;
3635 
3636 	if (!HAS_DDI(dev))
3637 		return;
3638 
3639 	val = I915_READ(DP_TP_CTL(port));
3640 	val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3641 	val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3642 	I915_WRITE(DP_TP_CTL(port), val);
3643 
3644 	/*
3645 	 * On PORT_A we can have only eDP in SST mode. There the only reason
3646 	 * we need to set idle transmission mode is to work around a HW issue
3647 	 * where we enable the pipe while not in idle link-training mode.
3648 	 * In this case there is requirement to wait for a minimum number of
3649 	 * idle patterns to be sent.
3650 	 */
3651 	if (port == PORT_A)
3652 		return;
3653 
3654 	if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3655 		     1))
3656 		DRM_ERROR("Timed out waiting for DP idle patterns\n");
3657 }
3658 
3659 static void
3660 intel_dp_link_down(struct intel_dp *intel_dp)
3661 {
3662 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3663 	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3664 	enum port port = intel_dig_port->port;
3665 	struct drm_device *dev = intel_dig_port->base.base.dev;
3666 	struct drm_i915_private *dev_priv = dev->dev_private;
3667 	uint32_t DP = intel_dp->DP;
3668 
3669 	if (WARN_ON(HAS_DDI(dev)))
3670 		return;
3671 
3672 	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3673 		return;
3674 
3675 	DRM_DEBUG_KMS("\n");
3676 
3677 	if ((IS_GEN7(dev) && port == PORT_A) ||
3678 	    (HAS_PCH_CPT(dev) && port != PORT_A)) {
3679 		DP &= ~DP_LINK_TRAIN_MASK_CPT;
3680 		DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3681 	} else {
3682 		if (IS_CHERRYVIEW(dev))
3683 			DP &= ~DP_LINK_TRAIN_MASK_CHV;
3684 		else
3685 			DP &= ~DP_LINK_TRAIN_MASK;
3686 		DP |= DP_LINK_TRAIN_PAT_IDLE;
3687 	}
3688 	I915_WRITE(intel_dp->output_reg, DP);
3689 	POSTING_READ(intel_dp->output_reg);
3690 
3691 	DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3692 	I915_WRITE(intel_dp->output_reg, DP);
3693 	POSTING_READ(intel_dp->output_reg);
3694 
3695 	/*
3696 	 * HW workaround for IBX, we need to move the port
3697 	 * to transcoder A after disabling it to allow the
3698 	 * matching HDMI port to be enabled on transcoder A.
3699 	 */
3700 	if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3701 		/*
3702 		 * We get CPU/PCH FIFO underruns on the other pipe when
3703 		 * doing the workaround. Sweep them under the rug.
3704 		 */
3705 		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3706 		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3707 
3708 		/* always enable with pattern 1 (as per spec) */
3709 		DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3710 		DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3711 		I915_WRITE(intel_dp->output_reg, DP);
3712 		POSTING_READ(intel_dp->output_reg);
3713 
3714 		DP &= ~DP_PORT_EN;
3715 		I915_WRITE(intel_dp->output_reg, DP);
3716 		POSTING_READ(intel_dp->output_reg);
3717 
3718 		intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3719 		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3720 		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3721 	}
3722 
3723 	msleep(intel_dp->panel_power_down_delay);
3724 
3725 	intel_dp->DP = DP;
3726 }
3727 
3728 static bool
3729 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3730 {
3731 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3732 	struct drm_device *dev = dig_port->base.base.dev;
3733 	struct drm_i915_private *dev_priv = dev->dev_private;
3734 	uint8_t rev;
3735 
3736 	if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
3737 			     sizeof(intel_dp->dpcd)) < 0)
3738 		return false; /* aux transfer failed */
3739 
3740 #ifdef __DragonFly__
3741 	char dpcd_hex_dump[DP_RECEIVER_CAP_SIZE * 3];
3742 	DRM_DEBUG_KMS("DPCD: %s\n", hexncpy(intel_dp->dpcd, sizeof(intel_dp->dpcd),
3743 		      dpcd_hex_dump, sizeof(dpcd_hex_dump), " "));
3744 #else
3745 	DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3746 #endif
3747 
3748 	if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3749 		return false; /* DPCD not present */
3750 
3751 	if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT,
3752 			     &intel_dp->sink_count, 1) < 0)
3753 		return false;
3754 
3755 	/*
3756 	 * Sink count can change between short pulse hpd hence
3757 	 * a member variable in intel_dp will track any changes
3758 	 * between short pulse interrupts.
3759 	 */
3760 	intel_dp->sink_count = DP_GET_SINK_COUNT(intel_dp->sink_count);
3761 
3762 	/*
3763 	 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
3764 	 * a dongle is present but no display. Unless we require to know
3765 	 * if a dongle is present or not, we don't need to update
3766 	 * downstream port information. So, an early return here saves
3767 	 * time from performing other operations which are not required.
3768 	 */
3769 	if (!is_edp(intel_dp) && !intel_dp->sink_count)
3770 		return false;
3771 
3772 	/* Check if the panel supports PSR */
3773 	memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3774 	if (is_edp(intel_dp)) {
3775 		drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT,
3776 				 intel_dp->psr_dpcd,
3777 				 sizeof(intel_dp->psr_dpcd));
3778 		if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3779 			dev_priv->psr.sink_support = true;
3780 			DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3781 		}
3782 
3783 		if (INTEL_INFO(dev)->gen >= 9 &&
3784 			(intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3785 			uint8_t frame_sync_cap;
3786 
3787 			dev_priv->psr.sink_support = true;
3788 			drm_dp_dpcd_read(&intel_dp->aux,
3789 					 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3790 					 &frame_sync_cap, 1);
3791 			dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3792 			/* PSR2 needs frame sync as well */
3793 			dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3794 			DRM_DEBUG_KMS("PSR2 %s on sink",
3795 				dev_priv->psr.psr2_support ? "supported" : "not supported");
3796 		}
3797 	}
3798 
3799 	DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
3800 		      yesno(intel_dp_source_supports_hbr2(intel_dp)),
3801 		      yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
3802 
3803 	/* Intermediate frequency support */
3804 	if (is_edp(intel_dp) &&
3805 	    (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] &	DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3806 	    (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3807 	    (rev >= 0x03)) { /* eDp v1.4 or higher */
3808 		__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3809 		int i;
3810 
3811 		drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
3812 				sink_rates, sizeof(sink_rates));
3813 
3814 		for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3815 			int val = le16_to_cpu(sink_rates[i]);
3816 
3817 			if (val == 0)
3818 				break;
3819 
3820 			/* Value read is in kHz while drm clock is saved in deca-kHz */
3821 			intel_dp->sink_rates[i] = (val * 200) / 10;
3822 		}
3823 		intel_dp->num_sink_rates = i;
3824 	}
3825 
3826 	intel_dp_print_rates(intel_dp);
3827 
3828 	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3829 	      DP_DWN_STRM_PORT_PRESENT))
3830 		return true; /* native DP sink */
3831 
3832 	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3833 		return true; /* no per-port downstream info */
3834 
3835 	if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3836 			     intel_dp->downstream_ports,
3837 			     DP_MAX_DOWNSTREAM_PORTS) < 0)
3838 		return false; /* downstream port status fetch failed */
3839 
3840 	return true;
3841 }
3842 
3843 static void
3844 intel_dp_probe_oui(struct intel_dp *intel_dp)
3845 {
3846 	u8 buf[3];
3847 
3848 	if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3849 		return;
3850 
3851 	if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3852 		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3853 			      buf[0], buf[1], buf[2]);
3854 
3855 	if (drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3856 		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3857 			      buf[0], buf[1], buf[2]);
3858 }
3859 
3860 static bool
3861 intel_dp_probe_mst(struct intel_dp *intel_dp)
3862 {
3863 	u8 buf[1];
3864 
3865 	if (!i915.enable_dp_mst)
3866 		return false;
3867 
3868 	if (!intel_dp->can_mst)
3869 		return false;
3870 
3871 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3872 		return false;
3873 
3874 	if (drm_dp_dpcd_read(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3875 		if (buf[0] & DP_MST_CAP) {
3876 			DRM_DEBUG_KMS("Sink is MST capable\n");
3877 			intel_dp->is_mst = true;
3878 		} else {
3879 			DRM_DEBUG_KMS("Sink is not MST capable\n");
3880 			intel_dp->is_mst = false;
3881 		}
3882 	}
3883 
3884 	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3885 	return intel_dp->is_mst;
3886 }
3887 
3888 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
3889 {
3890 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3891 	struct drm_device *dev = dig_port->base.base.dev;
3892 	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3893 	u8 buf;
3894 	int ret = 0;
3895 	int count = 0;
3896 	int attempts = 10;
3897 
3898 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
3899 		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3900 		ret = -EIO;
3901 		goto out;
3902 	}
3903 
3904 	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3905 			       buf & ~DP_TEST_SINK_START) < 0) {
3906 		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3907 		ret = -EIO;
3908 		goto out;
3909 	}
3910 
3911 	do {
3912 		intel_wait_for_vblank(dev, intel_crtc->pipe);
3913 
3914 		if (drm_dp_dpcd_readb(&intel_dp->aux,
3915 				      DP_TEST_SINK_MISC, &buf) < 0) {
3916 			ret = -EIO;
3917 			goto out;
3918 		}
3919 		count = buf & DP_TEST_COUNT_MASK;
3920 	} while (--attempts && count);
3921 
3922 	if (attempts == 0) {
3923 		DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
3924 		ret = -ETIMEDOUT;
3925 	}
3926 
3927  out:
3928 	hsw_enable_ips(intel_crtc);
3929 	return ret;
3930 }
3931 
3932 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
3933 {
3934 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3935 	struct drm_device *dev = dig_port->base.base.dev;
3936 	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3937 	u8 buf;
3938 	int ret;
3939 
3940 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3941 		return -EIO;
3942 
3943 	if (!(buf & DP_TEST_CRC_SUPPORTED))
3944 		return -ENOTTY;
3945 
3946 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3947 		return -EIO;
3948 
3949 	if (buf & DP_TEST_SINK_START) {
3950 		ret = intel_dp_sink_crc_stop(intel_dp);
3951 		if (ret)
3952 			return ret;
3953 	}
3954 
3955 	hsw_disable_ips(intel_crtc);
3956 
3957 	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3958 			       buf | DP_TEST_SINK_START) < 0) {
3959 		hsw_enable_ips(intel_crtc);
3960 		return -EIO;
3961 	}
3962 
3963 	intel_wait_for_vblank(dev, intel_crtc->pipe);
3964 	return 0;
3965 }
3966 
3967 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3968 {
3969 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3970 	struct drm_device *dev = dig_port->base.base.dev;
3971 	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3972 	u8 buf;
3973 	int count, ret;
3974 	int attempts = 6;
3975 
3976 	ret = intel_dp_sink_crc_start(intel_dp);
3977 	if (ret)
3978 		return ret;
3979 
3980 	do {
3981 		intel_wait_for_vblank(dev, intel_crtc->pipe);
3982 
3983 		if (drm_dp_dpcd_readb(&intel_dp->aux,
3984 				      DP_TEST_SINK_MISC, &buf) < 0) {
3985 			ret = -EIO;
3986 			goto stop;
3987 		}
3988 		count = buf & DP_TEST_COUNT_MASK;
3989 
3990 	} while (--attempts && count == 0);
3991 
3992 	if (attempts == 0) {
3993 		DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
3994 		ret = -ETIMEDOUT;
3995 		goto stop;
3996 	}
3997 
3998 	if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
3999 		ret = -EIO;
4000 		goto stop;
4001 	}
4002 
4003 stop:
4004 	intel_dp_sink_crc_stop(intel_dp);
4005 	return ret;
4006 }
4007 
4008 static bool
4009 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4010 {
4011 	return drm_dp_dpcd_read(&intel_dp->aux,
4012 				       DP_DEVICE_SERVICE_IRQ_VECTOR,
4013 				       sink_irq_vector, 1) == 1;
4014 }
4015 
4016 static bool
4017 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4018 {
4019 	int ret;
4020 
4021 	ret = drm_dp_dpcd_read(&intel_dp->aux,
4022 					     DP_SINK_COUNT_ESI,
4023 					     sink_irq_vector, 14);
4024 	if (ret != 14)
4025 		return false;
4026 
4027 	return true;
4028 }
4029 
4030 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4031 {
4032 	uint8_t test_result = DP_TEST_ACK;
4033 	return test_result;
4034 }
4035 
4036 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4037 {
4038 	uint8_t test_result = DP_TEST_NAK;
4039 	return test_result;
4040 }
4041 
4042 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4043 {
4044 	uint8_t test_result = DP_TEST_NAK;
4045 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4046 	struct drm_connector *connector = &intel_connector->base;
4047 
4048 	if (intel_connector->detect_edid == NULL ||
4049 	    connector->edid_corrupt ||
4050 	    intel_dp->aux.i2c_defer_count > 6) {
4051 		/* Check EDID read for NACKs, DEFERs and corruption
4052 		 * (DP CTS 1.2 Core r1.1)
4053 		 *    4.2.2.4 : Failed EDID read, I2C_NAK
4054 		 *    4.2.2.5 : Failed EDID read, I2C_DEFER
4055 		 *    4.2.2.6 : EDID corruption detected
4056 		 * Use failsafe mode for all cases
4057 		 */
4058 		if (intel_dp->aux.i2c_nack_count > 0 ||
4059 			intel_dp->aux.i2c_defer_count > 0)
4060 			DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4061 				      intel_dp->aux.i2c_nack_count,
4062 				      intel_dp->aux.i2c_defer_count);
4063 		intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4064 	} else {
4065 		struct edid *block = intel_connector->detect_edid;
4066 
4067 		/* We have to write the checksum
4068 		 * of the last block read
4069 		 */
4070 		block += intel_connector->detect_edid->extensions;
4071 
4072 		if (!drm_dp_dpcd_write(&intel_dp->aux,
4073 					DP_TEST_EDID_CHECKSUM,
4074 					&block->checksum,
4075 					1))
4076 			DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4077 
4078 		test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4079 		intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4080 	}
4081 
4082 	/* Set test active flag here so userspace doesn't interrupt things */
4083 	intel_dp->compliance_test_active = 1;
4084 
4085 	return test_result;
4086 }
4087 
4088 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4089 {
4090 	uint8_t test_result = DP_TEST_NAK;
4091 	return test_result;
4092 }
4093 
4094 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4095 {
4096 	uint8_t response = DP_TEST_NAK;
4097 	uint8_t rxdata = 0;
4098 	int status = 0;
4099 
4100 	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4101 	if (status <= 0) {
4102 		DRM_DEBUG_KMS("Could not read test request from sink\n");
4103 		goto update_status;
4104 	}
4105 
4106 	switch (rxdata) {
4107 	case DP_TEST_LINK_TRAINING:
4108 		DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4109 		intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4110 		response = intel_dp_autotest_link_training(intel_dp);
4111 		break;
4112 	case DP_TEST_LINK_VIDEO_PATTERN:
4113 		DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4114 		intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4115 		response = intel_dp_autotest_video_pattern(intel_dp);
4116 		break;
4117 	case DP_TEST_LINK_EDID_READ:
4118 		DRM_DEBUG_KMS("EDID test requested\n");
4119 		intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4120 		response = intel_dp_autotest_edid(intel_dp);
4121 		break;
4122 	case DP_TEST_LINK_PHY_TEST_PATTERN:
4123 		DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4124 		intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4125 		response = intel_dp_autotest_phy_pattern(intel_dp);
4126 		break;
4127 	default:
4128 		DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4129 		break;
4130 	}
4131 
4132 update_status:
4133 	status = drm_dp_dpcd_write(&intel_dp->aux,
4134 				   DP_TEST_RESPONSE,
4135 				   &response, 1);
4136 	if (status <= 0)
4137 		DRM_DEBUG_KMS("Could not write test response to sink\n");
4138 }
4139 
4140 static int
4141 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4142 {
4143 	bool bret;
4144 
4145 	if (intel_dp->is_mst) {
4146 		u8 esi[16] = { 0 };
4147 		int ret = 0;
4148 		int retry;
4149 		bool handled;
4150 		bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4151 go_again:
4152 		if (bret == true) {
4153 
4154 			/* check link status - esi[10] = 0x200c */
4155 			if (intel_dp->active_mst_links &&
4156 			    !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4157 				DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4158 				intel_dp_start_link_train(intel_dp);
4159 				intel_dp_stop_link_train(intel_dp);
4160 			}
4161 
4162 			DRM_DEBUG_KMS("got esi %3ph\n", esi);
4163 			ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4164 
4165 			if (handled) {
4166 				for (retry = 0; retry < 3; retry++) {
4167 					int wret;
4168 					wret = drm_dp_dpcd_write(&intel_dp->aux,
4169 								 DP_SINK_COUNT_ESI+1,
4170 								 &esi[1], 3);
4171 					if (wret == 3) {
4172 						break;
4173 					}
4174 				}
4175 
4176 				bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4177 				if (bret == true) {
4178 					DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4179 					goto go_again;
4180 				}
4181 			} else
4182 				ret = 0;
4183 
4184 			return ret;
4185 		} else {
4186 			struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4187 			DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4188 			intel_dp->is_mst = false;
4189 			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4190 			/* send a hotplug event */
4191 			drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4192 		}
4193 	}
4194 	return -EINVAL;
4195 }
4196 
4197 static void
4198 intel_dp_check_link_status(struct intel_dp *intel_dp)
4199 {
4200 	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4201 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4202 	u8 link_status[DP_LINK_STATUS_SIZE];
4203 
4204 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4205 
4206 	if (!intel_dp_get_link_status(intel_dp, link_status)) {
4207 		DRM_ERROR("Failed to get link status\n");
4208 		return;
4209 	}
4210 
4211 	if (!intel_encoder->base.crtc)
4212 		return;
4213 
4214 	if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4215 		return;
4216 
4217 	/* if link training is requested we should perform it always */
4218 	if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
4219 	    (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
4220 		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4221 			      intel_encoder->base.name);
4222 		intel_dp_start_link_train(intel_dp);
4223 		intel_dp_stop_link_train(intel_dp);
4224 	}
4225 }
4226 
4227 /*
4228  * According to DP spec
4229  * 5.1.2:
4230  *  1. Read DPCD
4231  *  2. Configure link according to Receiver Capabilities
4232  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4233  *  4. Check link status on receipt of hot-plug interrupt
4234  *
4235  * intel_dp_short_pulse -  handles short pulse interrupts
4236  * when full detection is not required.
4237  * Returns %true if short pulse is handled and full detection
4238  * is NOT required and %false otherwise.
4239  */
4240 static bool
4241 intel_dp_short_pulse(struct intel_dp *intel_dp)
4242 {
4243 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4244 	u8 sink_irq_vector;
4245 	u8 old_sink_count = intel_dp->sink_count;
4246 	bool ret;
4247 
4248 	/*
4249 	 * Clearing compliance test variables to allow capturing
4250 	 * of values for next automated test request.
4251 	 */
4252 	intel_dp->compliance_test_active = 0;
4253 	intel_dp->compliance_test_type = 0;
4254 	intel_dp->compliance_test_data = 0;
4255 
4256 	/*
4257 	 * Now read the DPCD to see if it's actually running
4258 	 * If the current value of sink count doesn't match with
4259 	 * the value that was stored earlier or dpcd read failed
4260 	 * we need to do full detection
4261 	 */
4262 	ret = intel_dp_get_dpcd(intel_dp);
4263 
4264 	if ((old_sink_count != intel_dp->sink_count) || !ret) {
4265 		/* No need to proceed if we are going to do full detect */
4266 		return false;
4267 	}
4268 
4269 	/* Try to read the source of the interrupt */
4270 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4271 	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4272 		/* Clear interrupt source */
4273 		drm_dp_dpcd_writeb(&intel_dp->aux,
4274 				   DP_DEVICE_SERVICE_IRQ_VECTOR,
4275 				   sink_irq_vector);
4276 
4277 		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4278 			DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4279 		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4280 			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4281 	}
4282 
4283 	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4284 	intel_dp_check_link_status(intel_dp);
4285 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
4286 
4287 	return true;
4288 }
4289 
4290 /* XXX this is probably wrong for multiple downstream ports */
4291 static enum drm_connector_status
4292 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4293 {
4294 	uint8_t *dpcd = intel_dp->dpcd;
4295 	uint8_t type;
4296 
4297 	if (!intel_dp_get_dpcd(intel_dp))
4298 		return connector_status_disconnected;
4299 
4300 	if (is_edp(intel_dp))
4301 		return connector_status_connected;
4302 
4303 	/* if there's no downstream port, we're done */
4304 	if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4305 		return connector_status_connected;
4306 
4307 	/* If we're HPD-aware, SINK_COUNT changes dynamically */
4308 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4309 	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4310 
4311 		return intel_dp->sink_count ?
4312 		connector_status_connected : connector_status_disconnected;
4313 	}
4314 
4315 	/* If no HPD, poke DDC gently */
4316 	if (drm_probe_ddc(&intel_dp->aux.ddc))
4317 		return connector_status_connected;
4318 
4319 	/* Well we tried, say unknown for unreliable port types */
4320 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4321 		type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4322 		if (type == DP_DS_PORT_TYPE_VGA ||
4323 		    type == DP_DS_PORT_TYPE_NON_EDID)
4324 			return connector_status_unknown;
4325 	} else {
4326 		type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4327 			DP_DWN_STRM_PORT_TYPE_MASK;
4328 		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4329 		    type == DP_DWN_STRM_PORT_TYPE_OTHER)
4330 			return connector_status_unknown;
4331 	}
4332 
4333 	/* Anything else is out of spec, warn and ignore */
4334 	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4335 	return connector_status_disconnected;
4336 }
4337 
4338 static enum drm_connector_status
4339 edp_detect(struct intel_dp *intel_dp)
4340 {
4341 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4342 	enum drm_connector_status status;
4343 
4344 	status = intel_panel_detect(dev);
4345 	if (status == connector_status_unknown)
4346 		status = connector_status_connected;
4347 
4348 	return status;
4349 }
4350 
4351 static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4352 				       struct intel_digital_port *port)
4353 {
4354 	u32 bit;
4355 
4356 	switch (port->port) {
4357 	case PORT_A:
4358 		return true;
4359 	case PORT_B:
4360 		bit = SDE_PORTB_HOTPLUG;
4361 		break;
4362 	case PORT_C:
4363 		bit = SDE_PORTC_HOTPLUG;
4364 		break;
4365 	case PORT_D:
4366 		bit = SDE_PORTD_HOTPLUG;
4367 		break;
4368 	default:
4369 		MISSING_CASE(port->port);
4370 		return false;
4371 	}
4372 
4373 	return I915_READ(SDEISR) & bit;
4374 }
4375 
4376 static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4377 				       struct intel_digital_port *port)
4378 {
4379 	u32 bit;
4380 
4381 	switch (port->port) {
4382 	case PORT_A:
4383 		return true;
4384 	case PORT_B:
4385 		bit = SDE_PORTB_HOTPLUG_CPT;
4386 		break;
4387 	case PORT_C:
4388 		bit = SDE_PORTC_HOTPLUG_CPT;
4389 		break;
4390 	case PORT_D:
4391 		bit = SDE_PORTD_HOTPLUG_CPT;
4392 		break;
4393 	case PORT_E:
4394 		bit = SDE_PORTE_HOTPLUG_SPT;
4395 		break;
4396 	default:
4397 		MISSING_CASE(port->port);
4398 		return false;
4399 	}
4400 
4401 	return I915_READ(SDEISR) & bit;
4402 }
4403 
4404 static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4405 				       struct intel_digital_port *port)
4406 {
4407 	u32 bit;
4408 
4409 	switch (port->port) {
4410 	case PORT_B:
4411 		bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4412 		break;
4413 	case PORT_C:
4414 		bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4415 		break;
4416 	case PORT_D:
4417 		bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4418 		break;
4419 	default:
4420 		MISSING_CASE(port->port);
4421 		return false;
4422 	}
4423 
4424 	return I915_READ(PORT_HOTPLUG_STAT) & bit;
4425 }
4426 
4427 static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
4428 					struct intel_digital_port *port)
4429 {
4430 	u32 bit;
4431 
4432 	switch (port->port) {
4433 	case PORT_B:
4434 		bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
4435 		break;
4436 	case PORT_C:
4437 		bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
4438 		break;
4439 	case PORT_D:
4440 		bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
4441 		break;
4442 	default:
4443 		MISSING_CASE(port->port);
4444 		return false;
4445 	}
4446 
4447 	return I915_READ(PORT_HOTPLUG_STAT) & bit;
4448 }
4449 
4450 static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4451 				       struct intel_digital_port *intel_dig_port)
4452 {
4453 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4454 	enum port port;
4455 	u32 bit;
4456 
4457 	intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4458 	switch (port) {
4459 	case PORT_A:
4460 		bit = BXT_DE_PORT_HP_DDIA;
4461 		break;
4462 	case PORT_B:
4463 		bit = BXT_DE_PORT_HP_DDIB;
4464 		break;
4465 	case PORT_C:
4466 		bit = BXT_DE_PORT_HP_DDIC;
4467 		break;
4468 	default:
4469 		MISSING_CASE(port);
4470 		return false;
4471 	}
4472 
4473 	return I915_READ(GEN8_DE_PORT_ISR) & bit;
4474 }
4475 
4476 /*
4477  * intel_digital_port_connected - is the specified port connected?
4478  * @dev_priv: i915 private structure
4479  * @port: the port to test
4480  *
4481  * Return %true if @port is connected, %false otherwise.
4482  */
4483 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4484 					 struct intel_digital_port *port)
4485 {
4486 	if (HAS_PCH_IBX(dev_priv))
4487 		return ibx_digital_port_connected(dev_priv, port);
4488 	else if (HAS_PCH_SPLIT(dev_priv))
4489 		return cpt_digital_port_connected(dev_priv, port);
4490 	else if (IS_BROXTON(dev_priv))
4491 		return bxt_digital_port_connected(dev_priv, port);
4492 	else if (IS_GM45(dev_priv))
4493 		return gm45_digital_port_connected(dev_priv, port);
4494 	else
4495 		return g4x_digital_port_connected(dev_priv, port);
4496 }
4497 
4498 static struct edid *
4499 intel_dp_get_edid(struct intel_dp *intel_dp)
4500 {
4501 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4502 
4503 	/* use cached edid if we have one */
4504 	if (intel_connector->edid) {
4505 		/* invalid edid */
4506 		if (IS_ERR(intel_connector->edid))
4507 			return NULL;
4508 
4509 		return drm_edid_duplicate(intel_connector->edid);
4510 	} else
4511 		return drm_get_edid(&intel_connector->base,
4512 				    &intel_dp->aux.ddc);
4513 }
4514 
4515 static void
4516 intel_dp_set_edid(struct intel_dp *intel_dp)
4517 {
4518 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4519 	struct edid *edid;
4520 
4521 	intel_dp_unset_edid(intel_dp);
4522 	edid = intel_dp_get_edid(intel_dp);
4523 	intel_connector->detect_edid = edid;
4524 
4525 	if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4526 		intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4527 	else
4528 		intel_dp->has_audio = drm_detect_monitor_audio(edid);
4529 }
4530 
4531 static void
4532 intel_dp_unset_edid(struct intel_dp *intel_dp)
4533 {
4534 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4535 
4536 	kfree(intel_connector->detect_edid);
4537 	intel_connector->detect_edid = NULL;
4538 
4539 	intel_dp->has_audio = false;
4540 }
4541 
4542 static void
4543 intel_dp_long_pulse(struct intel_connector *intel_connector)
4544 {
4545 	struct drm_connector *connector = &intel_connector->base;
4546 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4547 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4548 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4549 	struct drm_device *dev = connector->dev;
4550 	enum drm_connector_status status;
4551 	enum intel_display_power_domain power_domain;
4552 	bool ret;
4553 	u8 sink_irq_vector;
4554 
4555 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
4556 	intel_display_power_get(to_i915(dev), power_domain);
4557 
4558 	/* Can't disconnect eDP, but you can close the lid... */
4559 	if (is_edp(intel_dp))
4560 		status = edp_detect(intel_dp);
4561 	else if (intel_digital_port_connected(to_i915(dev),
4562 					      dp_to_dig_port(intel_dp)))
4563 		status = intel_dp_detect_dpcd(intel_dp);
4564 	else
4565 		status = connector_status_disconnected;
4566 
4567 	if (status != connector_status_connected) {
4568 		intel_dp->compliance_test_active = 0;
4569 		intel_dp->compliance_test_type = 0;
4570 		intel_dp->compliance_test_data = 0;
4571 
4572 		if (intel_dp->is_mst) {
4573 			DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
4574 				      intel_dp->is_mst,
4575 				      intel_dp->mst_mgr.mst_state);
4576 			intel_dp->is_mst = false;
4577 			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4578 							intel_dp->is_mst);
4579 		}
4580 
4581 		goto out;
4582 	}
4583 
4584 	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4585 		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4586 
4587 	intel_dp_probe_oui(intel_dp);
4588 
4589 	ret = intel_dp_probe_mst(intel_dp);
4590 	if (ret) {
4591 		/*
4592 		 * If we are in MST mode then this connector
4593 		 * won't appear connected or have anything
4594 		 * with EDID on it
4595 		 */
4596 		status = connector_status_disconnected;
4597 		goto out;
4598 	} else if (connector->status == connector_status_connected) {
4599 		/*
4600 		 * If display was connected already and is still connected
4601 		 * check links status, there has been known issues of
4602 		 * link loss triggerring long pulse!!!!
4603 		 */
4604 		drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4605 		intel_dp_check_link_status(intel_dp);
4606 		drm_modeset_unlock(&dev->mode_config.connection_mutex);
4607 		goto out;
4608 	}
4609 
4610 	/*
4611 	 * Clearing NACK and defer counts to get their exact values
4612 	 * while reading EDID which are required by Compliance tests
4613 	 * 4.2.2.4 and 4.2.2.5
4614 	 */
4615 	intel_dp->aux.i2c_nack_count = 0;
4616 	intel_dp->aux.i2c_defer_count = 0;
4617 
4618 	intel_dp_set_edid(intel_dp);
4619 
4620 	status = connector_status_connected;
4621 	intel_dp->detect_done = true;
4622 
4623 	/* Try to read the source of the interrupt */
4624 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4625 	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4626 		/* Clear interrupt source */
4627 		drm_dp_dpcd_writeb(&intel_dp->aux,
4628 				   DP_DEVICE_SERVICE_IRQ_VECTOR,
4629 				   sink_irq_vector);
4630 
4631 		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4632 			intel_dp_handle_test_request(intel_dp);
4633 		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4634 			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4635 	}
4636 
4637 out:
4638 	if ((status != connector_status_connected) &&
4639 	    (intel_dp->is_mst == false))
4640 		intel_dp_unset_edid(intel_dp);
4641 
4642 	intel_display_power_put(to_i915(dev), power_domain);
4643 	return;
4644 }
4645 
4646 static enum drm_connector_status
4647 intel_dp_detect(struct drm_connector *connector, bool force)
4648 {
4649 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4650 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4651 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4652 	struct intel_connector *intel_connector = to_intel_connector(connector);
4653 
4654 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4655 		      connector->base.id, connector->name);
4656 
4657 	if (intel_dp->is_mst) {
4658 		/* MST devices are disconnected from a monitor POV */
4659 		intel_dp_unset_edid(intel_dp);
4660 		if (intel_encoder->type != INTEL_OUTPUT_EDP)
4661 			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4662 		return connector_status_disconnected;
4663 	}
4664 
4665 	/* If full detect is not performed yet, do a full detect */
4666 	if (!intel_dp->detect_done)
4667 		intel_dp_long_pulse(intel_dp->attached_connector);
4668 
4669 	intel_dp->detect_done = false;
4670 
4671 	if (is_edp(intel_dp) || intel_connector->detect_edid)
4672 		return connector_status_connected;
4673 	else
4674 		return connector_status_disconnected;
4675 }
4676 
4677 static void
4678 intel_dp_force(struct drm_connector *connector)
4679 {
4680 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4681 	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4682 	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4683 	enum intel_display_power_domain power_domain;
4684 
4685 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4686 		      connector->base.id, connector->name);
4687 	intel_dp_unset_edid(intel_dp);
4688 
4689 	if (connector->status != connector_status_connected)
4690 		return;
4691 
4692 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
4693 	intel_display_power_get(dev_priv, power_domain);
4694 
4695 	intel_dp_set_edid(intel_dp);
4696 
4697 	intel_display_power_put(dev_priv, power_domain);
4698 
4699 	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4700 		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4701 }
4702 
4703 static int intel_dp_get_modes(struct drm_connector *connector)
4704 {
4705 	struct intel_connector *intel_connector = to_intel_connector(connector);
4706 	struct edid *edid;
4707 
4708 	edid = intel_connector->detect_edid;
4709 	if (edid) {
4710 		int ret = intel_connector_update_modes(connector, edid);
4711 		if (ret)
4712 			return ret;
4713 	}
4714 
4715 	/* if eDP has no EDID, fall back to fixed mode */
4716 	if (is_edp(intel_attached_dp(connector)) &&
4717 	    intel_connector->panel.fixed_mode) {
4718 		struct drm_display_mode *mode;
4719 
4720 		mode = drm_mode_duplicate(connector->dev,
4721 					  intel_connector->panel.fixed_mode);
4722 		if (mode) {
4723 			drm_mode_probed_add(connector, mode);
4724 			return 1;
4725 		}
4726 	}
4727 
4728 	return 0;
4729 }
4730 
4731 static bool
4732 intel_dp_detect_audio(struct drm_connector *connector)
4733 {
4734 	bool has_audio = false;
4735 	struct edid *edid;
4736 
4737 	edid = to_intel_connector(connector)->detect_edid;
4738 	if (edid)
4739 		has_audio = drm_detect_monitor_audio(edid);
4740 
4741 	return has_audio;
4742 }
4743 
4744 static int
4745 intel_dp_set_property(struct drm_connector *connector,
4746 		      struct drm_property *property,
4747 		      uint64_t val)
4748 {
4749 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
4750 	struct intel_connector *intel_connector = to_intel_connector(connector);
4751 	struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4752 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4753 	int ret;
4754 
4755 	ret = drm_object_property_set_value(&connector->base, property, val);
4756 	if (ret)
4757 		return ret;
4758 
4759 	if (property == dev_priv->force_audio_property) {
4760 		int i = val;
4761 		bool has_audio;
4762 
4763 		if (i == intel_dp->force_audio)
4764 			return 0;
4765 
4766 		intel_dp->force_audio = i;
4767 
4768 		if (i == HDMI_AUDIO_AUTO)
4769 			has_audio = intel_dp_detect_audio(connector);
4770 		else
4771 			has_audio = (i == HDMI_AUDIO_ON);
4772 
4773 		if (has_audio == intel_dp->has_audio)
4774 			return 0;
4775 
4776 		intel_dp->has_audio = has_audio;
4777 		goto done;
4778 	}
4779 
4780 	if (property == dev_priv->broadcast_rgb_property) {
4781 		bool old_auto = intel_dp->color_range_auto;
4782 		bool old_range = intel_dp->limited_color_range;
4783 
4784 		switch (val) {
4785 		case INTEL_BROADCAST_RGB_AUTO:
4786 			intel_dp->color_range_auto = true;
4787 			break;
4788 		case INTEL_BROADCAST_RGB_FULL:
4789 			intel_dp->color_range_auto = false;
4790 			intel_dp->limited_color_range = false;
4791 			break;
4792 		case INTEL_BROADCAST_RGB_LIMITED:
4793 			intel_dp->color_range_auto = false;
4794 			intel_dp->limited_color_range = true;
4795 			break;
4796 		default:
4797 			return -EINVAL;
4798 		}
4799 
4800 		if (old_auto == intel_dp->color_range_auto &&
4801 		    old_range == intel_dp->limited_color_range)
4802 			return 0;
4803 
4804 		goto done;
4805 	}
4806 
4807 	if (is_edp(intel_dp) &&
4808 	    property == connector->dev->mode_config.scaling_mode_property) {
4809 		if (val == DRM_MODE_SCALE_NONE) {
4810 			DRM_DEBUG_KMS("no scaling not supported\n");
4811 			return -EINVAL;
4812 		}
4813 		if (HAS_GMCH_DISPLAY(dev_priv) &&
4814 		    val == DRM_MODE_SCALE_CENTER) {
4815 			DRM_DEBUG_KMS("centering not supported\n");
4816 			return -EINVAL;
4817 		}
4818 
4819 		if (intel_connector->panel.fitting_mode == val) {
4820 			/* the eDP scaling property is not changed */
4821 			return 0;
4822 		}
4823 		intel_connector->panel.fitting_mode = val;
4824 
4825 		goto done;
4826 	}
4827 
4828 	return -EINVAL;
4829 
4830 done:
4831 	if (intel_encoder->base.crtc)
4832 		intel_crtc_restore_mode(intel_encoder->base.crtc);
4833 
4834 	return 0;
4835 }
4836 
4837 static void
4838 intel_dp_connector_destroy(struct drm_connector *connector)
4839 {
4840 	struct intel_connector *intel_connector = to_intel_connector(connector);
4841 
4842 	kfree(intel_connector->detect_edid);
4843 
4844 	if (!IS_ERR_OR_NULL(intel_connector->edid))
4845 		kfree(intel_connector->edid);
4846 
4847 	/* Can't call is_edp() since the encoder may have been destroyed
4848 	 * already. */
4849 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4850 		intel_panel_fini(&intel_connector->panel);
4851 
4852 	drm_connector_cleanup(connector);
4853 	kfree(connector);
4854 }
4855 
4856 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4857 {
4858 	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4859 	struct intel_dp *intel_dp = &intel_dig_port->dp;
4860 
4861 	intel_dp_mst_encoder_cleanup(intel_dig_port);
4862 	if (is_edp(intel_dp)) {
4863 		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4864 		/*
4865 		 * vdd might still be enabled do to the delayed vdd off.
4866 		 * Make sure vdd is actually turned off here.
4867 		 */
4868 		pps_lock(intel_dp);
4869 		edp_panel_vdd_off_sync(intel_dp);
4870 		pps_unlock(intel_dp);
4871 
4872 #if 0
4873 		if (intel_dp->edp_notifier.notifier_call) {
4874 			unregister_reboot_notifier(&intel_dp->edp_notifier);
4875 			intel_dp->edp_notifier.notifier_call = NULL;
4876 		}
4877 #endif
4878 	}
4879 	drm_encoder_cleanup(encoder);
4880 	kfree(intel_dig_port);
4881 }
4882 
4883 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4884 {
4885 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4886 
4887 	if (!is_edp(intel_dp))
4888 		return;
4889 
4890 	/*
4891 	 * vdd might still be enabled do to the delayed vdd off.
4892 	 * Make sure vdd is actually turned off here.
4893 	 */
4894 	cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4895 	pps_lock(intel_dp);
4896 	edp_panel_vdd_off_sync(intel_dp);
4897 	pps_unlock(intel_dp);
4898 }
4899 
4900 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4901 {
4902 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4903 	struct drm_device *dev = intel_dig_port->base.base.dev;
4904 	struct drm_i915_private *dev_priv = dev->dev_private;
4905 	enum intel_display_power_domain power_domain;
4906 
4907 	lockdep_assert_held(&dev_priv->pps_mutex);
4908 
4909 	if (!edp_have_panel_vdd(intel_dp))
4910 		return;
4911 
4912 	/*
4913 	 * The VDD bit needs a power domain reference, so if the bit is
4914 	 * already enabled when we boot or resume, grab this reference and
4915 	 * schedule a vdd off, so we don't hold on to the reference
4916 	 * indefinitely.
4917 	 */
4918 	DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4919 	power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
4920 	intel_display_power_get(dev_priv, power_domain);
4921 
4922 	edp_panel_vdd_schedule_off(intel_dp);
4923 }
4924 
4925 void intel_dp_encoder_reset(struct drm_encoder *encoder)
4926 {
4927 	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
4928 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4929 
4930 	if (!HAS_DDI(dev_priv))
4931 		intel_dp->DP = I915_READ(intel_dp->output_reg);
4932 
4933 	if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4934 		return;
4935 
4936 	pps_lock(intel_dp);
4937 
4938 	/*
4939 	 * Read out the current power sequencer assignment,
4940 	 * in case the BIOS did something with it.
4941 	 */
4942 	if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev))
4943 		vlv_initial_power_sequencer_setup(intel_dp);
4944 
4945 	intel_edp_panel_vdd_sanitize(intel_dp);
4946 
4947 	pps_unlock(intel_dp);
4948 }
4949 
4950 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4951 	.dpms = drm_atomic_helper_connector_dpms,
4952 	.detect = intel_dp_detect,
4953 	.force = intel_dp_force,
4954 	.fill_modes = drm_helper_probe_single_connector_modes,
4955 	.set_property = intel_dp_set_property,
4956 	.atomic_get_property = intel_connector_atomic_get_property,
4957 	.destroy = intel_dp_connector_destroy,
4958 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4959 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4960 };
4961 
4962 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4963 	.get_modes = intel_dp_get_modes,
4964 	.mode_valid = intel_dp_mode_valid,
4965 	.best_encoder = intel_best_encoder,
4966 };
4967 
4968 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4969 	.reset = intel_dp_encoder_reset,
4970 	.destroy = intel_dp_encoder_destroy,
4971 };
4972 
4973 enum irqreturn
4974 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4975 {
4976 	struct intel_dp *intel_dp = &intel_dig_port->dp;
4977 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4978 	struct drm_device *dev = intel_dig_port->base.base.dev;
4979 	struct drm_i915_private *dev_priv = dev->dev_private;
4980 	enum intel_display_power_domain power_domain;
4981 	enum irqreturn ret = IRQ_NONE;
4982 
4983 	if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
4984 	    intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
4985 		intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4986 
4987 	if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4988 		/*
4989 		 * vdd off can generate a long pulse on eDP which
4990 		 * would require vdd on to handle it, and thus we
4991 		 * would end up in an endless cycle of
4992 		 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4993 		 */
4994 		DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4995 			      port_name(intel_dig_port->port));
4996 		return IRQ_HANDLED;
4997 	}
4998 
4999 	DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5000 		      port_name(intel_dig_port->port),
5001 		      long_hpd ? "long" : "short");
5002 
5003 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
5004 	intel_display_power_get(dev_priv, power_domain);
5005 
5006 	if (long_hpd) {
5007 		intel_dp_long_pulse(intel_dp->attached_connector);
5008 		if (intel_dp->is_mst)
5009 			ret = IRQ_HANDLED;
5010 		goto put_power;
5011 
5012 	} else {
5013 		if (intel_dp->is_mst) {
5014 			if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
5015 				/*
5016 				 * If we were in MST mode, and device is not
5017 				 * there, get out of MST mode
5018 				 */
5019 				DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
5020 					      intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5021 				intel_dp->is_mst = false;
5022 				drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5023 								intel_dp->is_mst);
5024 				goto put_power;
5025 			}
5026 		}
5027 
5028 		if (!intel_dp->is_mst) {
5029 			if (!intel_dp_short_pulse(intel_dp)) {
5030 				intel_dp_long_pulse(intel_dp->attached_connector);
5031 				goto put_power;
5032 			}
5033 		}
5034 	}
5035 
5036 	ret = IRQ_HANDLED;
5037 
5038 put_power:
5039 	intel_display_power_put(dev_priv, power_domain);
5040 
5041 	return ret;
5042 }
5043 
5044 /* check the VBT to see whether the eDP is on another port */
5045 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5046 {
5047 	struct drm_i915_private *dev_priv = dev->dev_private;
5048 
5049 	/*
5050 	 * eDP not supported on g4x. so bail out early just
5051 	 * for a bit extra safety in case the VBT is bonkers.
5052 	 */
5053 	if (INTEL_INFO(dev)->gen < 5)
5054 		return false;
5055 
5056 	if (port == PORT_A)
5057 		return true;
5058 
5059 	return intel_bios_is_port_edp(dev_priv, port);
5060 }
5061 
5062 void
5063 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5064 {
5065 	struct intel_connector *intel_connector = to_intel_connector(connector);
5066 
5067 	intel_attach_force_audio_property(connector);
5068 	intel_attach_broadcast_rgb_property(connector);
5069 	intel_dp->color_range_auto = true;
5070 
5071 	if (is_edp(intel_dp)) {
5072 		drm_mode_create_scaling_mode_property(connector->dev);
5073 		drm_object_attach_property(
5074 			&connector->base,
5075 			connector->dev->mode_config.scaling_mode_property,
5076 			DRM_MODE_SCALE_ASPECT);
5077 		intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5078 	}
5079 }
5080 
5081 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5082 {
5083 	intel_dp->panel_power_off_time = ktime_get_boottime();
5084 	intel_dp->last_power_on = jiffies;
5085 	intel_dp->last_backlight_off = jiffies;
5086 }
5087 
5088 static void
5089 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5090 				    struct intel_dp *intel_dp)
5091 {
5092 	struct drm_i915_private *dev_priv = dev->dev_private;
5093 	struct edp_power_seq cur, vbt, spec,
5094 		*final = &intel_dp->pps_delays;
5095 	u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5096 	i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
5097 
5098 	lockdep_assert_held(&dev_priv->pps_mutex);
5099 
5100 	/* already initialized? */
5101 	if (final->t11_t12 != 0)
5102 		return;
5103 
5104 	if (IS_BROXTON(dev)) {
5105 		/*
5106 		 * TODO: BXT has 2 sets of PPS registers.
5107 		 * Correct Register for Broxton need to be identified
5108 		 * using VBT. hardcoding for now
5109 		 */
5110 		pp_ctrl_reg = BXT_PP_CONTROL(0);
5111 		pp_on_reg = BXT_PP_ON_DELAYS(0);
5112 		pp_off_reg = BXT_PP_OFF_DELAYS(0);
5113 	} else if (HAS_PCH_SPLIT(dev)) {
5114 		pp_ctrl_reg = PCH_PP_CONTROL;
5115 		pp_on_reg = PCH_PP_ON_DELAYS;
5116 		pp_off_reg = PCH_PP_OFF_DELAYS;
5117 		pp_div_reg = PCH_PP_DIVISOR;
5118 	} else {
5119 		enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5120 
5121 		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5122 		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5123 		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5124 		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5125 	}
5126 
5127 	/* Workaround: Need to write PP_CONTROL with the unlock key as
5128 	 * the very first thing. */
5129 	pp_ctl = ironlake_get_pp_control(intel_dp);
5130 
5131 	pp_on = I915_READ(pp_on_reg);
5132 	pp_off = I915_READ(pp_off_reg);
5133 	if (!IS_BROXTON(dev)) {
5134 		I915_WRITE(pp_ctrl_reg, pp_ctl);
5135 		pp_div = I915_READ(pp_div_reg);
5136 	}
5137 
5138 	/* Pull timing values out of registers */
5139 	cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5140 		PANEL_POWER_UP_DELAY_SHIFT;
5141 
5142 	cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5143 		PANEL_LIGHT_ON_DELAY_SHIFT;
5144 
5145 	cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5146 		PANEL_LIGHT_OFF_DELAY_SHIFT;
5147 
5148 	cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5149 		PANEL_POWER_DOWN_DELAY_SHIFT;
5150 
5151 	if (IS_BROXTON(dev)) {
5152 		u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5153 			BXT_POWER_CYCLE_DELAY_SHIFT;
5154 		if (tmp > 0)
5155 			cur.t11_t12 = (tmp - 1) * 1000;
5156 		else
5157 			cur.t11_t12 = 0;
5158 	} else {
5159 		cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5160 		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5161 	}
5162 
5163 	DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5164 		      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5165 
5166 	vbt = dev_priv->vbt.edp.pps;
5167 
5168 	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5169 	 * our hw here, which are all in 100usec. */
5170 	spec.t1_t3 = 210 * 10;
5171 	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5172 	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5173 	spec.t10 = 500 * 10;
5174 	/* This one is special and actually in units of 100ms, but zero
5175 	 * based in the hw (so we need to add 100 ms). But the sw vbt
5176 	 * table multiplies it with 1000 to make it in units of 100usec,
5177 	 * too. */
5178 	spec.t11_t12 = (510 + 100) * 10;
5179 
5180 	DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5181 		      vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5182 
5183 	/* Use the max of the register settings and vbt. If both are
5184 	 * unset, fall back to the spec limits. */
5185 #define assign_final(field)	final->field = (max(cur.field, vbt.field) == 0 ? \
5186 				       spec.field : \
5187 				       max(cur.field, vbt.field))
5188 	assign_final(t1_t3);
5189 	assign_final(t8);
5190 	assign_final(t9);
5191 	assign_final(t10);
5192 	assign_final(t11_t12);
5193 #undef assign_final
5194 
5195 #define get_delay(field)	(DIV_ROUND_UP(final->field, 10))
5196 	intel_dp->panel_power_up_delay = get_delay(t1_t3);
5197 	intel_dp->backlight_on_delay = get_delay(t8);
5198 	intel_dp->backlight_off_delay = get_delay(t9);
5199 	intel_dp->panel_power_down_delay = get_delay(t10);
5200 	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5201 #undef get_delay
5202 
5203 	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5204 		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5205 		      intel_dp->panel_power_cycle_delay);
5206 
5207 	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5208 		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5209 }
5210 
5211 static void
5212 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5213 					      struct intel_dp *intel_dp)
5214 {
5215 	struct drm_i915_private *dev_priv = dev->dev_private;
5216 	u32 pp_on, pp_off, pp_div, port_sel = 0;
5217 	int div = dev_priv->rawclk_freq / 1000;
5218 	i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
5219 	enum port port = dp_to_dig_port(intel_dp)->port;
5220 	const struct edp_power_seq *seq = &intel_dp->pps_delays;
5221 
5222 	lockdep_assert_held(&dev_priv->pps_mutex);
5223 
5224 	if (IS_BROXTON(dev)) {
5225 		/*
5226 		 * TODO: BXT has 2 sets of PPS registers.
5227 		 * Correct Register for Broxton need to be identified
5228 		 * using VBT. hardcoding for now
5229 		 */
5230 		pp_ctrl_reg = BXT_PP_CONTROL(0);
5231 		pp_on_reg = BXT_PP_ON_DELAYS(0);
5232 		pp_off_reg = BXT_PP_OFF_DELAYS(0);
5233 
5234 	} else if (HAS_PCH_SPLIT(dev)) {
5235 		pp_on_reg = PCH_PP_ON_DELAYS;
5236 		pp_off_reg = PCH_PP_OFF_DELAYS;
5237 		pp_div_reg = PCH_PP_DIVISOR;
5238 	} else {
5239 		enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5240 
5241 		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5242 		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5243 		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5244 	}
5245 
5246 	/*
5247 	 * And finally store the new values in the power sequencer. The
5248 	 * backlight delays are set to 1 because we do manual waits on them. For
5249 	 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5250 	 * we'll end up waiting for the backlight off delay twice: once when we
5251 	 * do the manual sleep, and once when we disable the panel and wait for
5252 	 * the PP_STATUS bit to become zero.
5253 	 */
5254 	pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5255 		(1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5256 	pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5257 		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5258 	/* Compute the divisor for the pp clock, simply match the Bspec
5259 	 * formula. */
5260 	if (IS_BROXTON(dev)) {
5261 		pp_div = I915_READ(pp_ctrl_reg);
5262 		pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5263 		pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5264 				<< BXT_POWER_CYCLE_DELAY_SHIFT);
5265 	} else {
5266 		pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5267 		pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5268 				<< PANEL_POWER_CYCLE_DELAY_SHIFT);
5269 	}
5270 
5271 	/* Haswell doesn't have any port selection bits for the panel
5272 	 * power sequencer any more. */
5273 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5274 		port_sel = PANEL_PORT_SELECT_VLV(port);
5275 	} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5276 		if (port == PORT_A)
5277 			port_sel = PANEL_PORT_SELECT_DPA;
5278 		else
5279 			port_sel = PANEL_PORT_SELECT_DPD;
5280 	}
5281 
5282 	pp_on |= port_sel;
5283 
5284 	I915_WRITE(pp_on_reg, pp_on);
5285 	I915_WRITE(pp_off_reg, pp_off);
5286 	if (IS_BROXTON(dev))
5287 		I915_WRITE(pp_ctrl_reg, pp_div);
5288 	else
5289 		I915_WRITE(pp_div_reg, pp_div);
5290 
5291 	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5292 		      I915_READ(pp_on_reg),
5293 		      I915_READ(pp_off_reg),
5294 		      IS_BROXTON(dev) ?
5295 		      (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5296 		      I915_READ(pp_div_reg));
5297 }
5298 
5299 /**
5300  * intel_dp_set_drrs_state - program registers for RR switch to take effect
5301  * @dev: DRM device
5302  * @refresh_rate: RR to be programmed
5303  *
5304  * This function gets called when refresh rate (RR) has to be changed from
5305  * one frequency to another. Switches can be between high and low RR
5306  * supported by the panel or to any other RR based on media playback (in
5307  * this case, RR value needs to be passed from user space).
5308  *
5309  * The caller of this function needs to take a lock on dev_priv->drrs.
5310  */
5311 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5312 {
5313 	struct drm_i915_private *dev_priv = dev->dev_private;
5314 	struct intel_encoder *encoder;
5315 	struct intel_digital_port *dig_port = NULL;
5316 	struct intel_dp *intel_dp = dev_priv->drrs.dp;
5317 	struct intel_crtc_state *config = NULL;
5318 	struct intel_crtc *intel_crtc = NULL;
5319 	enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5320 
5321 	if (refresh_rate <= 0) {
5322 		DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5323 		return;
5324 	}
5325 
5326 	if (intel_dp == NULL) {
5327 		DRM_DEBUG_KMS("DRRS not supported.\n");
5328 		return;
5329 	}
5330 
5331 	/*
5332 	 * FIXME: This needs proper synchronization with psr state for some
5333 	 * platforms that cannot have PSR and DRRS enabled at the same time.
5334 	 */
5335 
5336 	dig_port = dp_to_dig_port(intel_dp);
5337 	encoder = &dig_port->base;
5338 	intel_crtc = to_intel_crtc(encoder->base.crtc);
5339 
5340 	if (!intel_crtc) {
5341 		DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5342 		return;
5343 	}
5344 
5345 	config = intel_crtc->config;
5346 
5347 	if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5348 		DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5349 		return;
5350 	}
5351 
5352 	if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5353 			refresh_rate)
5354 		index = DRRS_LOW_RR;
5355 
5356 	if (index == dev_priv->drrs.refresh_rate_type) {
5357 		DRM_DEBUG_KMS(
5358 			"DRRS requested for previously set RR...ignoring\n");
5359 		return;
5360 	}
5361 
5362 	if (!intel_crtc->active) {
5363 		DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5364 		return;
5365 	}
5366 
5367 	if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5368 		switch (index) {
5369 		case DRRS_HIGH_RR:
5370 			intel_dp_set_m_n(intel_crtc, M1_N1);
5371 			break;
5372 		case DRRS_LOW_RR:
5373 			intel_dp_set_m_n(intel_crtc, M2_N2);
5374 			break;
5375 		case DRRS_MAX_RR:
5376 		default:
5377 			DRM_ERROR("Unsupported refreshrate type\n");
5378 		}
5379 	} else if (INTEL_INFO(dev)->gen > 6) {
5380 		i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5381 		u32 val;
5382 
5383 		val = I915_READ(reg);
5384 		if (index > DRRS_HIGH_RR) {
5385 			if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5386 				val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5387 			else
5388 				val |= PIPECONF_EDP_RR_MODE_SWITCH;
5389 		} else {
5390 			if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5391 				val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5392 			else
5393 				val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5394 		}
5395 		I915_WRITE(reg, val);
5396 	}
5397 
5398 	dev_priv->drrs.refresh_rate_type = index;
5399 
5400 	DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5401 }
5402 
5403 /**
5404  * intel_edp_drrs_enable - init drrs struct if supported
5405  * @intel_dp: DP struct
5406  *
5407  * Initializes frontbuffer_bits and drrs.dp
5408  */
5409 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5410 {
5411 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5412 	struct drm_i915_private *dev_priv = dev->dev_private;
5413 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5414 	struct drm_crtc *crtc = dig_port->base.base.crtc;
5415 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5416 
5417 	if (!intel_crtc->config->has_drrs) {
5418 		DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5419 		return;
5420 	}
5421 
5422 	mutex_lock(&dev_priv->drrs.mutex);
5423 	if (WARN_ON(dev_priv->drrs.dp)) {
5424 		DRM_ERROR("DRRS already enabled\n");
5425 		goto unlock;
5426 	}
5427 
5428 	dev_priv->drrs.busy_frontbuffer_bits = 0;
5429 
5430 	dev_priv->drrs.dp = intel_dp;
5431 
5432 unlock:
5433 	mutex_unlock(&dev_priv->drrs.mutex);
5434 }
5435 
5436 /**
5437  * intel_edp_drrs_disable - Disable DRRS
5438  * @intel_dp: DP struct
5439  *
5440  */
5441 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5442 {
5443 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5444 	struct drm_i915_private *dev_priv = dev->dev_private;
5445 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5446 	struct drm_crtc *crtc = dig_port->base.base.crtc;
5447 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5448 
5449 	if (!intel_crtc->config->has_drrs)
5450 		return;
5451 
5452 	mutex_lock(&dev_priv->drrs.mutex);
5453 	if (!dev_priv->drrs.dp) {
5454 		mutex_unlock(&dev_priv->drrs.mutex);
5455 		return;
5456 	}
5457 
5458 	if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5459 		intel_dp_set_drrs_state(dev_priv->dev,
5460 			intel_dp->attached_connector->panel.
5461 			fixed_mode->vrefresh);
5462 
5463 	dev_priv->drrs.dp = NULL;
5464 	mutex_unlock(&dev_priv->drrs.mutex);
5465 
5466 	cancel_delayed_work_sync(&dev_priv->drrs.work);
5467 }
5468 
5469 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5470 {
5471 	struct drm_i915_private *dev_priv =
5472 		container_of(work, typeof(*dev_priv), drrs.work.work);
5473 	struct intel_dp *intel_dp;
5474 
5475 	mutex_lock(&dev_priv->drrs.mutex);
5476 
5477 	intel_dp = dev_priv->drrs.dp;
5478 
5479 	if (!intel_dp)
5480 		goto unlock;
5481 
5482 	/*
5483 	 * The delayed work can race with an invalidate hence we need to
5484 	 * recheck.
5485 	 */
5486 
5487 	if (dev_priv->drrs.busy_frontbuffer_bits)
5488 		goto unlock;
5489 
5490 	if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5491 		intel_dp_set_drrs_state(dev_priv->dev,
5492 			intel_dp->attached_connector->panel.
5493 			downclock_mode->vrefresh);
5494 
5495 unlock:
5496 	mutex_unlock(&dev_priv->drrs.mutex);
5497 }
5498 
5499 /**
5500  * intel_edp_drrs_invalidate - Disable Idleness DRRS
5501  * @dev: DRM device
5502  * @frontbuffer_bits: frontbuffer plane tracking bits
5503  *
5504  * This function gets called everytime rendering on the given planes start.
5505  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5506  *
5507  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5508  */
5509 void intel_edp_drrs_invalidate(struct drm_device *dev,
5510 		unsigned frontbuffer_bits)
5511 {
5512 	struct drm_i915_private *dev_priv = dev->dev_private;
5513 	struct drm_crtc *crtc;
5514 	enum i915_pipe pipe;
5515 
5516 	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5517 		return;
5518 
5519 	cancel_delayed_work(&dev_priv->drrs.work);
5520 
5521 	mutex_lock(&dev_priv->drrs.mutex);
5522 	if (!dev_priv->drrs.dp) {
5523 		mutex_unlock(&dev_priv->drrs.mutex);
5524 		return;
5525 	}
5526 
5527 	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5528 	pipe = to_intel_crtc(crtc)->pipe;
5529 
5530 	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5531 	dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5532 
5533 	/* invalidate means busy screen hence upclock */
5534 	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5535 		intel_dp_set_drrs_state(dev_priv->dev,
5536 				dev_priv->drrs.dp->attached_connector->panel.
5537 				fixed_mode->vrefresh);
5538 
5539 	mutex_unlock(&dev_priv->drrs.mutex);
5540 }
5541 
5542 /**
5543  * intel_edp_drrs_flush - Restart Idleness DRRS
5544  * @dev: DRM device
5545  * @frontbuffer_bits: frontbuffer plane tracking bits
5546  *
5547  * This function gets called every time rendering on the given planes has
5548  * completed or flip on a crtc is completed. So DRRS should be upclocked
5549  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5550  * if no other planes are dirty.
5551  *
5552  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5553  */
5554 void intel_edp_drrs_flush(struct drm_device *dev,
5555 		unsigned frontbuffer_bits)
5556 {
5557 	struct drm_i915_private *dev_priv = dev->dev_private;
5558 	struct drm_crtc *crtc;
5559 	enum i915_pipe pipe;
5560 
5561 	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5562 		return;
5563 
5564 	cancel_delayed_work(&dev_priv->drrs.work);
5565 
5566 	mutex_lock(&dev_priv->drrs.mutex);
5567 	if (!dev_priv->drrs.dp) {
5568 		mutex_unlock(&dev_priv->drrs.mutex);
5569 		return;
5570 	}
5571 
5572 	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5573 	pipe = to_intel_crtc(crtc)->pipe;
5574 
5575 	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5576 	dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5577 
5578 	/* flush means busy screen hence upclock */
5579 	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5580 		intel_dp_set_drrs_state(dev_priv->dev,
5581 				dev_priv->drrs.dp->attached_connector->panel.
5582 				fixed_mode->vrefresh);
5583 
5584 	/*
5585 	 * flush also means no more activity hence schedule downclock, if all
5586 	 * other fbs are quiescent too
5587 	 */
5588 	if (!dev_priv->drrs.busy_frontbuffer_bits)
5589 		schedule_delayed_work(&dev_priv->drrs.work,
5590 				msecs_to_jiffies(1000));
5591 	mutex_unlock(&dev_priv->drrs.mutex);
5592 }
5593 
5594 /**
5595  * DOC: Display Refresh Rate Switching (DRRS)
5596  *
5597  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5598  * which enables swtching between low and high refresh rates,
5599  * dynamically, based on the usage scenario. This feature is applicable
5600  * for internal panels.
5601  *
5602  * Indication that the panel supports DRRS is given by the panel EDID, which
5603  * would list multiple refresh rates for one resolution.
5604  *
5605  * DRRS is of 2 types - static and seamless.
5606  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5607  * (may appear as a blink on screen) and is used in dock-undock scenario.
5608  * Seamless DRRS involves changing RR without any visual effect to the user
5609  * and can be used during normal system usage. This is done by programming
5610  * certain registers.
5611  *
5612  * Support for static/seamless DRRS may be indicated in the VBT based on
5613  * inputs from the panel spec.
5614  *
5615  * DRRS saves power by switching to low RR based on usage scenarios.
5616  *
5617  * eDP DRRS:-
5618  *        The implementation is based on frontbuffer tracking implementation.
5619  * When there is a disturbance on the screen triggered by user activity or a
5620  * periodic system activity, DRRS is disabled (RR is changed to high RR).
5621  * When there is no movement on screen, after a timeout of 1 second, a switch
5622  * to low RR is made.
5623  *        For integration with frontbuffer tracking code,
5624  * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5625  *
5626  * DRRS can be further extended to support other internal panels and also
5627  * the scenario of video playback wherein RR is set based on the rate
5628  * requested by userspace.
5629  */
5630 
5631 /**
5632  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5633  * @intel_connector: eDP connector
5634  * @fixed_mode: preferred mode of panel
5635  *
5636  * This function is  called only once at driver load to initialize basic
5637  * DRRS stuff.
5638  *
5639  * Returns:
5640  * Downclock mode if panel supports it, else return NULL.
5641  * DRRS support is determined by the presence of downclock mode (apart
5642  * from VBT setting).
5643  */
5644 static struct drm_display_mode *
5645 intel_dp_drrs_init(struct intel_connector *intel_connector,
5646 		struct drm_display_mode *fixed_mode)
5647 {
5648 	struct drm_connector *connector = &intel_connector->base;
5649 	struct drm_device *dev = connector->dev;
5650 	struct drm_i915_private *dev_priv = dev->dev_private;
5651 	struct drm_display_mode *downclock_mode = NULL;
5652 
5653 	INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5654 	lockinit(&dev_priv->drrs.mutex, "i915dm", 0, LK_CANRECURSE);
5655 
5656 	if (INTEL_INFO(dev)->gen <= 6) {
5657 		DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5658 		return NULL;
5659 	}
5660 
5661 	if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5662 		DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5663 		return NULL;
5664 	}
5665 
5666 	downclock_mode = intel_find_panel_downclock
5667 					(dev, fixed_mode, connector);
5668 
5669 	if (!downclock_mode) {
5670 		DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5671 		return NULL;
5672 	}
5673 
5674 	dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5675 
5676 	dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5677 	DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5678 	return downclock_mode;
5679 }
5680 
5681 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5682 				     struct intel_connector *intel_connector)
5683 {
5684 	struct drm_connector *connector = &intel_connector->base;
5685 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5686 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5687 	struct drm_device *dev = intel_encoder->base.dev;
5688 	struct drm_i915_private *dev_priv = dev->dev_private;
5689 	struct drm_display_mode *fixed_mode = NULL;
5690 	struct drm_display_mode *downclock_mode = NULL;
5691 	bool has_dpcd;
5692 	struct drm_display_mode *scan;
5693 	struct edid *edid;
5694 	enum i915_pipe pipe = INVALID_PIPE;
5695 
5696 	if (!is_edp(intel_dp))
5697 		return true;
5698 
5699 	pps_lock(intel_dp);
5700 	intel_edp_panel_vdd_sanitize(intel_dp);
5701 	pps_unlock(intel_dp);
5702 
5703 	/* Cache DPCD and EDID for edp. */
5704 	has_dpcd = intel_dp_get_dpcd(intel_dp);
5705 
5706 	if (has_dpcd) {
5707 		if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5708 			dev_priv->no_aux_handshake =
5709 				intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5710 				DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5711 	} else {
5712 		/* if this fails, presume the device is a ghost */
5713 		DRM_INFO("failed to retrieve link info, disabling eDP\n");
5714 		return false;
5715 	}
5716 
5717 	/* We now know it's not a ghost, init power sequence regs. */
5718 	pps_lock(intel_dp);
5719 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5720 	pps_unlock(intel_dp);
5721 
5722 	mutex_lock(&dev->mode_config.mutex);
5723 	edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5724 	if (edid) {
5725 		if (drm_add_edid_modes(connector, edid)) {
5726 			drm_mode_connector_update_edid_property(connector,
5727 								edid);
5728 			drm_edid_to_eld(connector, edid);
5729 		} else {
5730 			kfree(edid);
5731 			edid = ERR_PTR(-EINVAL);
5732 		}
5733 	} else {
5734 		edid = ERR_PTR(-ENOENT);
5735 	}
5736 	intel_connector->edid = edid;
5737 
5738 	/* prefer fixed mode from EDID if available */
5739 	list_for_each_entry(scan, &connector->probed_modes, head) {
5740 		if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5741 			fixed_mode = drm_mode_duplicate(dev, scan);
5742 			downclock_mode = intel_dp_drrs_init(
5743 						intel_connector, fixed_mode);
5744 			break;
5745 		}
5746 	}
5747 
5748 	/* fallback to VBT if available for eDP */
5749 	if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5750 		fixed_mode = drm_mode_duplicate(dev,
5751 					dev_priv->vbt.lfp_lvds_vbt_mode);
5752 		if (fixed_mode) {
5753 			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5754 			connector->display_info.width_mm = fixed_mode->width_mm;
5755 			connector->display_info.height_mm = fixed_mode->height_mm;
5756 		}
5757 	}
5758 	mutex_unlock(&dev->mode_config.mutex);
5759 
5760 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5761 		intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5762 #if 0
5763 		register_reboot_notifier(&intel_dp->edp_notifier);
5764 #endif
5765 
5766 		/*
5767 		 * Figure out the current pipe for the initial backlight setup.
5768 		 * If the current pipe isn't valid, try the PPS pipe, and if that
5769 		 * fails just assume pipe A.
5770 		 */
5771 		if (IS_CHERRYVIEW(dev))
5772 			pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5773 		else
5774 			pipe = PORT_TO_PIPE(intel_dp->DP);
5775 
5776 		if (pipe != PIPE_A && pipe != PIPE_B)
5777 			pipe = intel_dp->pps_pipe;
5778 
5779 		if (pipe != PIPE_A && pipe != PIPE_B)
5780 			pipe = PIPE_A;
5781 
5782 		DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5783 			      pipe_name(pipe));
5784 	}
5785 
5786 	intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5787 	intel_connector->panel.backlight.power = intel_edp_backlight_power;
5788 	intel_panel_setup_backlight(connector, pipe);
5789 
5790 	return true;
5791 }
5792 
5793 bool
5794 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5795 			struct intel_connector *intel_connector)
5796 {
5797 	struct drm_connector *connector = &intel_connector->base;
5798 	struct intel_dp *intel_dp = &intel_dig_port->dp;
5799 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5800 	struct drm_device *dev = intel_encoder->base.dev;
5801 	struct drm_i915_private *dev_priv = dev->dev_private;
5802 	enum port port = intel_dig_port->port;
5803 	int type, ret;
5804 
5805 	if (WARN(intel_dig_port->max_lanes < 1,
5806 		 "Not enough lanes (%d) for DP on port %c\n",
5807 		 intel_dig_port->max_lanes, port_name(port)))
5808 		return false;
5809 
5810 	intel_dp->pps_pipe = INVALID_PIPE;
5811 
5812 	/* intel_dp vfuncs */
5813 	if (INTEL_INFO(dev)->gen >= 9)
5814 		intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5815 	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5816 		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5817 	else if (HAS_PCH_SPLIT(dev))
5818 		intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5819 	else
5820 		intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
5821 
5822 	if (INTEL_INFO(dev)->gen >= 9)
5823 		intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5824 	else
5825 		intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
5826 
5827 	if (HAS_DDI(dev))
5828 		intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5829 
5830 	/* Preserve the current hw state. */
5831 	intel_dp->DP = I915_READ(intel_dp->output_reg);
5832 	intel_dp->attached_connector = intel_connector;
5833 
5834 	if (intel_dp_is_edp(dev, port))
5835 		type = DRM_MODE_CONNECTOR_eDP;
5836 	else
5837 		type = DRM_MODE_CONNECTOR_DisplayPort;
5838 
5839 	/*
5840 	 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5841 	 * for DP the encoder type can be set by the caller to
5842 	 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5843 	 */
5844 	if (type == DRM_MODE_CONNECTOR_eDP)
5845 		intel_encoder->type = INTEL_OUTPUT_EDP;
5846 
5847 	/* eDP only on port B and/or C on vlv/chv */
5848 	if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
5849 		    is_edp(intel_dp) && port != PORT_B && port != PORT_C))
5850 		return false;
5851 
5852 	DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5853 			type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5854 			port_name(port));
5855 
5856 	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5857 	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5858 
5859 	connector->interlace_allowed = true;
5860 	connector->doublescan_allowed = 0;
5861 
5862 	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5863 			  edp_panel_vdd_work);
5864 
5865 	intel_connector_attach_encoder(intel_connector, intel_encoder);
5866 	drm_connector_register(connector);
5867 
5868 	if (HAS_DDI(dev))
5869 		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5870 	else
5871 		intel_connector->get_hw_state = intel_connector_get_hw_state;
5872 	intel_connector->unregister = intel_dp_connector_unregister;
5873 
5874 	/* Set up the hotplug pin. */
5875 	switch (port) {
5876 	case PORT_A:
5877 		intel_encoder->hpd_pin = HPD_PORT_A;
5878 		break;
5879 	case PORT_B:
5880 		intel_encoder->hpd_pin = HPD_PORT_B;
5881 		if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
5882 			intel_encoder->hpd_pin = HPD_PORT_A;
5883 		break;
5884 	case PORT_C:
5885 		intel_encoder->hpd_pin = HPD_PORT_C;
5886 		break;
5887 	case PORT_D:
5888 		intel_encoder->hpd_pin = HPD_PORT_D;
5889 		break;
5890 	case PORT_E:
5891 		intel_encoder->hpd_pin = HPD_PORT_E;
5892 		break;
5893 	default:
5894 		BUG();
5895 	}
5896 
5897 	if (is_edp(intel_dp)) {
5898 		pps_lock(intel_dp);
5899 		intel_dp_init_panel_power_timestamps(intel_dp);
5900 		if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5901 			vlv_initial_power_sequencer_setup(intel_dp);
5902 		else
5903 			intel_dp_init_panel_power_sequencer(dev, intel_dp);
5904 		pps_unlock(intel_dp);
5905 	}
5906 
5907 	ret = intel_dp_aux_init(intel_dp, intel_connector);
5908 	if (ret)
5909 		goto fail;
5910 
5911 	/* init MST on ports that can support it */
5912 	if (HAS_DP_MST(dev) &&
5913 	    (port == PORT_B || port == PORT_C || port == PORT_D))
5914 		intel_dp_mst_encoder_init(intel_dig_port,
5915 					  intel_connector->base.base.id);
5916 
5917 	if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5918 		intel_dp_aux_fini(intel_dp);
5919 		intel_dp_mst_encoder_cleanup(intel_dig_port);
5920 		goto fail;
5921 	}
5922 
5923 	intel_dp_add_properties(intel_dp, connector);
5924 
5925 	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5926 	 * 0xd.  Failure to do so will result in spurious interrupts being
5927 	 * generated on the port when a cable is not attached.
5928 	 */
5929 	if (IS_G4X(dev) && !IS_GM45(dev)) {
5930 		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5931 		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5932 	}
5933 
5934 	i915_debugfs_connector_add(connector);
5935 
5936 	return true;
5937 
5938 fail:
5939 	if (is_edp(intel_dp)) {
5940 		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5941 		/*
5942 		 * vdd might still be enabled do to the delayed vdd off.
5943 		 * Make sure vdd is actually turned off here.
5944 		 */
5945 		pps_lock(intel_dp);
5946 		edp_panel_vdd_off_sync(intel_dp);
5947 		pps_unlock(intel_dp);
5948 	}
5949 	drm_connector_unregister(connector);
5950 	drm_connector_cleanup(connector);
5951 
5952 	return false;
5953 }
5954 
5955 bool intel_dp_init(struct drm_device *dev,
5956 		   i915_reg_t output_reg,
5957 		   enum port port)
5958 {
5959 	struct drm_i915_private *dev_priv = dev->dev_private;
5960 	struct intel_digital_port *intel_dig_port;
5961 	struct intel_encoder *intel_encoder;
5962 	struct drm_encoder *encoder;
5963 	struct intel_connector *intel_connector;
5964 
5965 	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5966 	if (!intel_dig_port)
5967 		return false;
5968 
5969 	intel_connector = intel_connector_alloc();
5970 	if (!intel_connector)
5971 		goto err_connector_alloc;
5972 
5973 	intel_encoder = &intel_dig_port->base;
5974 	encoder = &intel_encoder->base;
5975 
5976 	if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5977 			     DRM_MODE_ENCODER_TMDS, NULL))
5978 		goto err_encoder_init;
5979 
5980 	intel_encoder->compute_config = intel_dp_compute_config;
5981 	intel_encoder->disable = intel_disable_dp;
5982 	intel_encoder->get_hw_state = intel_dp_get_hw_state;
5983 	intel_encoder->get_config = intel_dp_get_config;
5984 	intel_encoder->suspend = intel_dp_encoder_suspend;
5985 	if (IS_CHERRYVIEW(dev)) {
5986 		intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5987 		intel_encoder->pre_enable = chv_pre_enable_dp;
5988 		intel_encoder->enable = vlv_enable_dp;
5989 		intel_encoder->post_disable = chv_post_disable_dp;
5990 		intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
5991 	} else if (IS_VALLEYVIEW(dev)) {
5992 		intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5993 		intel_encoder->pre_enable = vlv_pre_enable_dp;
5994 		intel_encoder->enable = vlv_enable_dp;
5995 		intel_encoder->post_disable = vlv_post_disable_dp;
5996 	} else {
5997 		intel_encoder->pre_enable = g4x_pre_enable_dp;
5998 		intel_encoder->enable = g4x_enable_dp;
5999 		if (INTEL_INFO(dev)->gen >= 5)
6000 			intel_encoder->post_disable = ilk_post_disable_dp;
6001 	}
6002 
6003 	intel_dig_port->port = port;
6004 	intel_dig_port->dp.output_reg = output_reg;
6005 	intel_dig_port->max_lanes = 4;
6006 
6007 	intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
6008 	if (IS_CHERRYVIEW(dev)) {
6009 		if (port == PORT_D)
6010 			intel_encoder->crtc_mask = 1 << 2;
6011 		else
6012 			intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6013 	} else {
6014 		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6015 	}
6016 	intel_encoder->cloneable = 0;
6017 
6018 	intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6019 	dev_priv->hotplug.irq_port[port] = intel_dig_port;
6020 
6021 	if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6022 		goto err_init_connector;
6023 
6024 	return true;
6025 
6026 err_init_connector:
6027 	drm_encoder_cleanup(encoder);
6028 err_encoder_init:
6029 	kfree(intel_connector);
6030 err_connector_alloc:
6031 	kfree(intel_dig_port);
6032 	return false;
6033 }
6034 
6035 #if 0
6036 void intel_dp_mst_suspend(struct drm_device *dev)
6037 {
6038 	struct drm_i915_private *dev_priv = dev->dev_private;
6039 	int i;
6040 
6041 	/* disable MST */
6042 	for (i = 0; i < I915_MAX_PORTS; i++) {
6043 		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6044 		if (!intel_dig_port)
6045 			continue;
6046 
6047 		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6048 			if (!intel_dig_port->dp.can_mst)
6049 				continue;
6050 			if (intel_dig_port->dp.is_mst)
6051 				drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6052 		}
6053 	}
6054 }
6055 #endif
6056 
6057 void intel_dp_mst_resume(struct drm_device *dev)
6058 {
6059 	struct drm_i915_private *dev_priv = dev->dev_private;
6060 	int i;
6061 
6062 	for (i = 0; i < I915_MAX_PORTS; i++) {
6063 		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6064 		if (!intel_dig_port)
6065 			continue;
6066 		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6067 			int ret;
6068 
6069 			if (!intel_dig_port->dp.can_mst)
6070 				continue;
6071 
6072 			ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6073 			if (ret != 0) {
6074 				intel_dp_check_mst_status(&intel_dig_port->dp);
6075 			}
6076 		}
6077 	}
6078 }
6079