xref: /dragonfly/sys/dev/drm/i915/intel_dp.c (revision 279dd846)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27 
28 #include <linux/i2c.h>
29 #include <linux/export.h>
30 #include <drm/drmP.h>
31 #include <linux/slab.h>
32 #include <drm/drm_crtc.h>
33 #include <drm/drm_crtc_helper.h>
34 #include <drm/drm_edid.h>
35 #include "intel_drv.h"
36 #include <drm/i915_drm.h>
37 #include "i915_drv.h"
38 
39 #define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
40 
41 struct dp_link_dpll {
42 	int link_bw;
43 	struct dpll dpll;
44 };
45 
46 static const struct dp_link_dpll gen4_dpll[] = {
47 	{ DP_LINK_BW_1_62,
48 		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
49 	{ DP_LINK_BW_2_7,
50 		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
51 };
52 
53 static const struct dp_link_dpll pch_dpll[] = {
54 	{ DP_LINK_BW_1_62,
55 		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
56 	{ DP_LINK_BW_2_7,
57 		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
58 };
59 
60 static const struct dp_link_dpll vlv_dpll[] = {
61 	{ DP_LINK_BW_1_62,
62 		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
63 	{ DP_LINK_BW_2_7,
64 		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
65 };
66 
67 /*
68  * CHV supports eDP 1.4 that have  more link rates.
69  * Below only provides the fixed rate but exclude variable rate.
70  */
71 static const struct dp_link_dpll chv_dpll[] = {
72 	/*
73 	 * CHV requires to program fractional division for m2.
74 	 * m2 is stored in fixed point format using formula below
75 	 * (m2_int << 22) | m2_fraction
76 	 */
77 	{ DP_LINK_BW_1_62,	/* m2_int = 32, m2_fraction = 1677722 */
78 		{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
79 	{ DP_LINK_BW_2_7,	/* m2_int = 27, m2_fraction = 0 */
80 		{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
81 	{ DP_LINK_BW_5_4,	/* m2_int = 27, m2_fraction = 0 */
82 		{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
83 };
84 
85 /**
86  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
87  * @intel_dp: DP struct
88  *
89  * If a CPU or PCH DP output is attached to an eDP panel, this function
90  * will return true, and false otherwise.
91  */
92 static bool is_edp(struct intel_dp *intel_dp)
93 {
94 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
95 
96 	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
97 }
98 
99 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
100 {
101 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
102 
103 	return intel_dig_port->base.base.dev;
104 }
105 
106 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
107 {
108 	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
109 }
110 
111 static void intel_dp_link_down(struct intel_dp *intel_dp);
112 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
113 
114 int
115 intel_dp_max_link_bw(struct intel_dp *intel_dp)
116 {
117 	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
118 	struct drm_device *dev = intel_dp->attached_connector->base.dev;
119 
120 	switch (max_link_bw) {
121 	case DP_LINK_BW_1_62:
122 	case DP_LINK_BW_2_7:
123 		break;
124 	case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
125 		if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) ||
126 		     INTEL_INFO(dev)->gen >= 8) &&
127 		    intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
128 			max_link_bw = DP_LINK_BW_5_4;
129 		else
130 			max_link_bw = DP_LINK_BW_2_7;
131 		break;
132 	default:
133 		WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
134 		     max_link_bw);
135 		max_link_bw = DP_LINK_BW_1_62;
136 		break;
137 	}
138 	return max_link_bw;
139 }
140 
141 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
142 {
143 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
144 	struct drm_device *dev = intel_dig_port->base.base.dev;
145 	u8 source_max, sink_max;
146 
147 	source_max = 4;
148 	if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
149 	    (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
150 		source_max = 2;
151 
152 	sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
153 
154 	return min(source_max, sink_max);
155 }
156 
157 /*
158  * The units on the numbers in the next two are... bizarre.  Examples will
159  * make it clearer; this one parallels an example in the eDP spec.
160  *
161  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
162  *
163  *     270000 * 1 * 8 / 10 == 216000
164  *
165  * The actual data capacity of that configuration is 2.16Gbit/s, so the
166  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
167  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
168  * 119000.  At 18bpp that's 2142000 kilobits per second.
169  *
170  * Thus the strange-looking division by 10 in intel_dp_link_required, to
171  * get the result in decakilobits instead of kilobits.
172  */
173 
174 static int
175 intel_dp_link_required(int pixel_clock, int bpp)
176 {
177 	return (pixel_clock * bpp + 9) / 10;
178 }
179 
180 static int
181 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
182 {
183 	return (max_link_clock * max_lanes * 8) / 10;
184 }
185 
186 static enum drm_mode_status
187 intel_dp_mode_valid(struct drm_connector *connector,
188 		    struct drm_display_mode *mode)
189 {
190 	struct intel_dp *intel_dp = intel_attached_dp(connector);
191 	struct intel_connector *intel_connector = to_intel_connector(connector);
192 	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
193 	int target_clock = mode->clock;
194 	int max_rate, mode_rate, max_lanes, max_link_clock;
195 
196 	if (is_edp(intel_dp) && fixed_mode) {
197 		if (mode->hdisplay > fixed_mode->hdisplay)
198 			return MODE_PANEL;
199 
200 		if (mode->vdisplay > fixed_mode->vdisplay)
201 			return MODE_PANEL;
202 
203 		target_clock = fixed_mode->clock;
204 	}
205 
206 	max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
207 	max_lanes = intel_dp_max_lane_count(intel_dp);
208 
209 	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
210 	mode_rate = intel_dp_link_required(target_clock, 18);
211 
212 	if (mode_rate > max_rate)
213 		return MODE_CLOCK_HIGH;
214 
215 	if (mode->clock < 10000)
216 		return MODE_CLOCK_LOW;
217 
218 	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
219 		return MODE_H_ILLEGAL;
220 
221 	return MODE_OK;
222 }
223 
224 static uint32_t
225 pack_aux(uint8_t *src, int src_bytes)
226 {
227 	int	i;
228 	uint32_t v = 0;
229 
230 	if (src_bytes > 4)
231 		src_bytes = 4;
232 	for (i = 0; i < src_bytes; i++)
233 		v |= ((uint32_t) src[i]) << ((3-i) * 8);
234 	return v;
235 }
236 
237 static void
238 unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
239 {
240 	int i;
241 	if (dst_bytes > 4)
242 		dst_bytes = 4;
243 	for (i = 0; i < dst_bytes; i++)
244 		dst[i] = src >> ((3-i) * 8);
245 }
246 
247 /* hrawclock is 1/4 the FSB frequency */
248 static int
249 intel_hrawclk(struct drm_device *dev)
250 {
251 	struct drm_i915_private *dev_priv = dev->dev_private;
252 	uint32_t clkcfg;
253 
254 	/* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
255 	if (IS_VALLEYVIEW(dev))
256 		return 200;
257 
258 	clkcfg = I915_READ(CLKCFG);
259 	switch (clkcfg & CLKCFG_FSB_MASK) {
260 	case CLKCFG_FSB_400:
261 		return 100;
262 	case CLKCFG_FSB_533:
263 		return 133;
264 	case CLKCFG_FSB_667:
265 		return 166;
266 	case CLKCFG_FSB_800:
267 		return 200;
268 	case CLKCFG_FSB_1067:
269 		return 266;
270 	case CLKCFG_FSB_1333:
271 		return 333;
272 	/* these two are just a guess; one of them might be right */
273 	case CLKCFG_FSB_1600:
274 	case CLKCFG_FSB_1600_ALT:
275 		return 400;
276 	default:
277 		return 133;
278 	}
279 }
280 
281 static void
282 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
283 				    struct intel_dp *intel_dp,
284 				    struct edp_power_seq *out);
285 static void
286 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
287 					      struct intel_dp *intel_dp,
288 					      struct edp_power_seq *out);
289 
290 static enum i915_pipe
291 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
292 {
293 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
294 	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
295 	struct drm_device *dev = intel_dig_port->base.base.dev;
296 	struct drm_i915_private *dev_priv = dev->dev_private;
297 	enum port port = intel_dig_port->port;
298 	enum i915_pipe pipe;
299 
300 	/* modeset should have pipe */
301 	if (crtc)
302 		return to_intel_crtc(crtc)->pipe;
303 
304 	/* init time, try to find a pipe with this port selected */
305 	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
306 		u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
307 			PANEL_PORT_SELECT_MASK;
308 		if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B)
309 			return pipe;
310 		if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C)
311 			return pipe;
312 	}
313 
314 	/* shrug */
315 	return PIPE_A;
316 }
317 
318 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
319 {
320 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
321 
322 	if (HAS_PCH_SPLIT(dev))
323 		return PCH_PP_CONTROL;
324 	else
325 		return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
326 }
327 
328 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
329 {
330 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
331 
332 	if (HAS_PCH_SPLIT(dev))
333 		return PCH_PP_STATUS;
334 	else
335 		return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
336 }
337 
338 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
339    This function only applicable when panel PM state is not to be tracked */
340 #if 0
341 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
342 			      void *unused)
343 {
344 	struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
345 						 edp_notifier);
346 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
347 	struct drm_i915_private *dev_priv = dev->dev_private;
348 	u32 pp_div;
349 	u32 pp_ctrl_reg, pp_div_reg;
350 	enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
351 
352 	if (!is_edp(intel_dp) || code != SYS_RESTART)
353 		return 0;
354 
355 	if (IS_VALLEYVIEW(dev)) {
356 		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
357 		pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
358 		pp_div = I915_READ(pp_div_reg);
359 		pp_div &= PP_REFERENCE_DIVIDER_MASK;
360 
361 		/* 0x1F write to PP_DIV_REG sets max cycle delay */
362 		I915_WRITE(pp_div_reg, pp_div | 0x1F);
363 		I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
364 		msleep(intel_dp->panel_power_cycle_delay);
365 	}
366 
367 	return 0;
368 }
369 #endif
370 
371 static bool edp_have_panel_power(struct intel_dp *intel_dp)
372 {
373 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
374 	struct drm_i915_private *dev_priv = dev->dev_private;
375 
376 	return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
377 }
378 
379 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
380 {
381 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
382 	struct drm_i915_private *dev_priv = dev->dev_private;
383 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
384 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
385 	enum intel_display_power_domain power_domain;
386 
387 	power_domain = intel_display_port_power_domain(intel_encoder);
388 	return intel_display_power_enabled(dev_priv, power_domain) &&
389 	       (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
390 }
391 
392 static void
393 intel_dp_check_edp(struct intel_dp *intel_dp)
394 {
395 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
396 	struct drm_i915_private *dev_priv = dev->dev_private;
397 
398 	if (!is_edp(intel_dp))
399 		return;
400 
401 	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
402 		WARN(1, "eDP powered off while attempting aux channel communication.\n");
403 		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
404 			      I915_READ(_pp_stat_reg(intel_dp)),
405 			      I915_READ(_pp_ctrl_reg(intel_dp)));
406 	}
407 }
408 
409 static uint32_t
410 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
411 {
412 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
413 	struct drm_device *dev = intel_dig_port->base.base.dev;
414 	struct drm_i915_private *dev_priv = dev->dev_private;
415 	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
416 	uint32_t status;
417 	bool done;
418 
419 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
420 	if (has_aux_irq)
421 		done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
422 					  msecs_to_jiffies_timeout(10));
423 	else
424 		done = wait_for_atomic(C, 10) == 0;
425 	if (!done)
426 		DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
427 			  has_aux_irq);
428 #undef C
429 
430 	return status;
431 }
432 
433 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
434 {
435 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
436 	struct drm_device *dev = intel_dig_port->base.base.dev;
437 
438 	/*
439 	 * The clock divider is based off the hrawclk, and would like to run at
440 	 * 2MHz.  So, take the hrawclk value and divide by 2 and use that
441 	 */
442 	return index ? 0 : intel_hrawclk(dev) / 2;
443 }
444 
445 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
446 {
447 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
448 	struct drm_device *dev = intel_dig_port->base.base.dev;
449 
450 	if (index)
451 		return 0;
452 
453 	if (intel_dig_port->port == PORT_A) {
454 		if (IS_GEN6(dev) || IS_GEN7(dev))
455 			return 200; /* SNB & IVB eDP input clock at 400Mhz */
456 		else
457 			return 225; /* eDP input clock at 450Mhz */
458 	} else {
459 		return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
460 	}
461 }
462 
463 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
464 {
465 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
466 	struct drm_device *dev = intel_dig_port->base.base.dev;
467 	struct drm_i915_private *dev_priv = dev->dev_private;
468 
469 	if (intel_dig_port->port == PORT_A) {
470 		if (index)
471 			return 0;
472 		return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
473 	} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
474 		/* Workaround for non-ULT HSW */
475 		switch (index) {
476 		case 0: return 63;
477 		case 1: return 72;
478 		default: return 0;
479 		}
480 	} else  {
481 		return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
482 	}
483 }
484 
485 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
486 {
487 	return index ? 0 : 100;
488 }
489 
490 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
491 				      bool has_aux_irq,
492 				      int send_bytes,
493 				      uint32_t aux_clock_divider)
494 {
495 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
496 	struct drm_device *dev = intel_dig_port->base.base.dev;
497 	uint32_t precharge, timeout;
498 
499 	if (IS_GEN6(dev))
500 		precharge = 3;
501 	else
502 		precharge = 5;
503 
504 	if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
505 		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
506 	else
507 		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
508 
509 	return DP_AUX_CH_CTL_SEND_BUSY |
510 	       DP_AUX_CH_CTL_DONE |
511 	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
512 	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
513 	       timeout |
514 	       DP_AUX_CH_CTL_RECEIVE_ERROR |
515 	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
516 	       (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
517 	       (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
518 }
519 
520 static int
521 intel_dp_aux_ch(struct intel_dp *intel_dp,
522 		uint8_t *send, int send_bytes,
523 		uint8_t *recv, int recv_size)
524 {
525 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
526 	struct drm_device *dev = intel_dig_port->base.base.dev;
527 	struct drm_i915_private *dev_priv = dev->dev_private;
528 	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
529 	uint32_t ch_data = ch_ctl + 4;
530 	uint32_t aux_clock_divider;
531 	int i, ret, recv_bytes;
532 	uint32_t status;
533 	int try, clock = 0;
534 	bool has_aux_irq = HAS_AUX_IRQ(dev);
535 
536 	/* dp aux is extremely sensitive to irq latency, hence request the
537 	 * lowest possible wakeup latency and so prevent the cpu from going into
538 	 * deep sleep states.
539 	 */
540 	pm_qos_update_request(&dev_priv->pm_qos, 0);
541 
542 	intel_dp_check_edp(intel_dp);
543 
544 	intel_aux_display_runtime_get(dev_priv);
545 
546 	/* Try to wait for any previous AUX channel activity */
547 	for (try = 0; try < 3; try++) {
548 		status = I915_READ_NOTRACE(ch_ctl);
549 		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
550 			break;
551 		msleep(1);
552 	}
553 
554 	if (try == 3) {
555 		WARN(1, "dp_aux_ch not started status 0x%08x\n",
556 		     I915_READ(ch_ctl));
557 		ret = -EBUSY;
558 		goto out;
559 	}
560 
561 	/* Only 5 data registers! */
562 	if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
563 		ret = -E2BIG;
564 		goto out;
565 	}
566 
567 	while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
568 		u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
569 							  has_aux_irq,
570 							  send_bytes,
571 							  aux_clock_divider);
572 
573 		/* Must try at least 3 times according to DP spec */
574 		for (try = 0; try < 5; try++) {
575 			/* Load the send data into the aux channel data registers */
576 			for (i = 0; i < send_bytes; i += 4)
577 				I915_WRITE(ch_data + i,
578 					   pack_aux(send + i, send_bytes - i));
579 
580 			/* Send the command and wait for it to complete */
581 			I915_WRITE(ch_ctl, send_ctl);
582 
583 			status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
584 
585 			/* Clear done status and any errors */
586 			I915_WRITE(ch_ctl,
587 				   status |
588 				   DP_AUX_CH_CTL_DONE |
589 				   DP_AUX_CH_CTL_TIME_OUT_ERROR |
590 				   DP_AUX_CH_CTL_RECEIVE_ERROR);
591 
592 			if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
593 				      DP_AUX_CH_CTL_RECEIVE_ERROR))
594 				continue;
595 			if (status & DP_AUX_CH_CTL_DONE)
596 				break;
597 		}
598 		if (status & DP_AUX_CH_CTL_DONE)
599 			break;
600 	}
601 
602 	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
603 		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
604 		ret = -EBUSY;
605 		goto out;
606 	}
607 
608 	/* Check for timeout or receive error.
609 	 * Timeouts occur when the sink is not connected
610 	 */
611 	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
612 		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
613 		ret = -EIO;
614 		goto out;
615 	}
616 
617 	/* Timeouts occur when the device isn't connected, so they're
618 	 * "normal" -- don't fill the kernel log with these */
619 	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
620 		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
621 		ret = -ETIMEDOUT;
622 		goto out;
623 	}
624 
625 	/* Unload any bytes sent back from the other side */
626 	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
627 		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
628 	if (recv_bytes > recv_size)
629 		recv_bytes = recv_size;
630 
631 	for (i = 0; i < recv_bytes; i += 4)
632 		unpack_aux(I915_READ(ch_data + i),
633 			   recv + i, recv_bytes - i);
634 
635 	ret = recv_bytes;
636 out:
637 	pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
638 	intel_aux_display_runtime_put(dev_priv);
639 
640 	return ret;
641 }
642 
643 /* Write data to the aux channel in native mode */
644 static int
645 intel_dp_aux_native_write(struct intel_dp *intel_dp,
646 			  uint16_t address, uint8_t *send, int send_bytes)
647 {
648 	int ret;
649 	uint8_t	msg[20];
650 	int msg_bytes;
651 	uint8_t	ack;
652 	int retry;
653 
654 	if (WARN_ON(send_bytes > 16))
655 		return -E2BIG;
656 
657 	intel_dp_check_edp(intel_dp);
658 	msg[0] = DP_AUX_NATIVE_WRITE << 4;
659 	msg[1] = address >> 8;
660 	msg[2] = address & 0xff;
661 	msg[3] = send_bytes - 1;
662 	memcpy(&msg[4], send, send_bytes);
663 	msg_bytes = send_bytes + 4;
664 	for (retry = 0; retry < 7; retry++) {
665 		ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
666 		if (ret < 0)
667 			return ret;
668 		ack >>= 4;
669 		if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
670 			return send_bytes;
671 		else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
672 			usleep_range(400, 500);
673 		else
674 			return -EIO;
675 	}
676 
677 	DRM_ERROR("too many retries, giving up\n");
678 	return -EIO;
679 }
680 
681 /* Write a single byte to the aux channel in native mode */
682 static int
683 intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
684 			    uint16_t address, uint8_t byte)
685 {
686 	return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
687 }
688 
689 /* read bytes from a native aux channel */
690 static int
691 intel_dp_aux_native_read(struct intel_dp *intel_dp,
692 			 uint16_t address, uint8_t *recv, int recv_bytes)
693 {
694 	uint8_t msg[4];
695 	int msg_bytes;
696 	uint8_t reply[20];
697 	int reply_bytes;
698 	uint8_t ack;
699 	int ret;
700 	int retry;
701 
702 	if (WARN_ON(recv_bytes > 19))
703 		return -E2BIG;
704 
705 	intel_dp_check_edp(intel_dp);
706 	msg[0] = DP_AUX_NATIVE_READ << 4;
707 	msg[1] = address >> 8;
708 	msg[2] = address & 0xff;
709 	msg[3] = recv_bytes - 1;
710 
711 	msg_bytes = 4;
712 	reply_bytes = recv_bytes + 1;
713 
714 	for (retry = 0; retry < 7; retry++) {
715 		ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
716 				      reply, reply_bytes);
717 		if (ret == 0)
718 			return -EPROTO;
719 		if (ret < 0)
720 			return ret;
721 		ack = reply[0] >> 4;
722 		if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) {
723 			memcpy(recv, reply + 1, ret - 1);
724 			return ret - 1;
725 		}
726 		else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
727 			usleep_range(400, 500);
728 		else
729 			return -EIO;
730 	}
731 
732 	DRM_ERROR("too many retries, giving up\n");
733 	return -EIO;
734 }
735 
736 static int
737 intel_dp_i2c_aux_ch(struct device *adapter, int mode,
738 		    uint8_t write_byte, uint8_t *read_byte)
739 {
740 	struct i2c_algo_dp_aux_data *data = device_get_softc(adapter);
741 	struct intel_dp *intel_dp = data->priv;
742 	uint16_t address = data->address;
743 	uint8_t msg[5];
744 	uint8_t reply[2];
745 	unsigned retry;
746 	int msg_bytes;
747 	int reply_bytes;
748 	int ret;
749 
750 	intel_edp_panel_vdd_on(intel_dp);
751 	intel_dp_check_edp(intel_dp);
752 	/* Set up the command byte */
753 	if (mode & MODE_I2C_READ)
754 		msg[0] = DP_AUX_I2C_READ << 4;
755 	else
756 		msg[0] = DP_AUX_I2C_WRITE << 4;
757 
758 	if (!(mode & MODE_I2C_STOP))
759 		msg[0] |= DP_AUX_I2C_MOT << 4;
760 
761 	msg[1] = address >> 8;
762 	msg[2] = address;
763 
764 	switch (mode) {
765 	case MODE_I2C_WRITE:
766 		msg[3] = 0;
767 		msg[4] = write_byte;
768 		msg_bytes = 5;
769 		reply_bytes = 1;
770 		break;
771 	case MODE_I2C_READ:
772 		msg[3] = 0;
773 		msg_bytes = 4;
774 		reply_bytes = 2;
775 		break;
776 	default:
777 		msg_bytes = 3;
778 		reply_bytes = 1;
779 		break;
780 	}
781 
782 	/*
783 	 * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is
784 	 * required to retry at least seven times upon receiving AUX_DEFER
785 	 * before giving up the AUX transaction.
786 	 */
787 	for (retry = 0; retry < 7; retry++) {
788 		ret = intel_dp_aux_ch(intel_dp,
789 				      msg, msg_bytes,
790 				      reply, reply_bytes);
791 		if (ret < 0) {
792 			DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
793 			goto out;
794 		}
795 
796 		switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
797 		case DP_AUX_NATIVE_REPLY_ACK:
798 			/* I2C-over-AUX Reply field is only valid
799 			 * when paired with AUX ACK.
800 			 */
801 			break;
802 		case DP_AUX_NATIVE_REPLY_NACK:
803 			DRM_DEBUG_KMS("aux_ch native nack\n");
804 			ret = -EREMOTEIO;
805 			goto out;
806 		case DP_AUX_NATIVE_REPLY_DEFER:
807 			/*
808 			 * For now, just give more slack to branch devices. We
809 			 * could check the DPCD for I2C bit rate capabilities,
810 			 * and if available, adjust the interval. We could also
811 			 * be more careful with DP-to-Legacy adapters where a
812 			 * long legacy cable may force very low I2C bit rates.
813 			 */
814 			if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
815 			    DP_DWN_STRM_PORT_PRESENT)
816 				usleep_range(500, 600);
817 			else
818 				usleep_range(300, 400);
819 			continue;
820 		default:
821 			DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
822 				  reply[0]);
823 			ret = -EREMOTEIO;
824 			goto out;
825 		}
826 
827 		switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) {
828 		case DP_AUX_I2C_REPLY_ACK:
829 			if (mode == MODE_I2C_READ) {
830 				*read_byte = reply[1];
831 			}
832 			ret = 0;	/* reply_bytes - 1 */
833 			goto out;
834 		case DP_AUX_I2C_REPLY_NACK:
835 			DRM_DEBUG_KMS("aux_i2c nack\n");
836 			ret = -EREMOTEIO;
837 			goto out;
838 		case DP_AUX_I2C_REPLY_DEFER:
839 			DRM_DEBUG_KMS("aux_i2c defer\n");
840 			udelay(100);
841 			break;
842 		default:
843 			DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
844 			ret = -EREMOTEIO;
845 			goto out;
846 		}
847 	}
848 
849 	DRM_ERROR("too many retries, giving up\n");
850 	ret = -EREMOTEIO;
851 
852 out:
853 	return ret;
854 }
855 
856 static void
857 intel_dp_connector_unregister(struct intel_connector *intel_connector)
858 {
859 	intel_connector_unregister(intel_connector);
860 }
861 
862 static int
863 intel_dp_i2c_init(struct intel_dp *intel_dp,
864 		  struct intel_connector *intel_connector, const char *name)
865 {
866 	int	ret;
867 
868 	DRM_DEBUG_KMS("i2c_init %s\n", name);
869 #if 0
870 	memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter));
871 	intel_dp->adapter.owner = THIS_MODULE;
872 	intel_dp->adapter.class = I2C_CLASS_DDC;
873 	strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
874 	intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
875 	intel_dp->adapter.algo_data = &intel_dp->algo;
876 	intel_dp->adapter.dev.parent = intel_connector->base.dev->dev;
877 
878 	ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
879 	if (ret < 0)
880 		return ret;
881 
882 	ret = sysfs_create_link(&intel_connector->base.kdev->kobj,
883 				&intel_dp->adapter.dev.kobj,
884 				intel_dp->adapter.dev.kobj.name);
885 #endif
886 	ret = iic_dp_aux_add_bus(intel_connector->base.dev->dev, name,
887 	    intel_dp_i2c_aux_ch, intel_dp, &intel_dp->dp_iic_bus,
888 	    &intel_dp->adapter);
889 
890 	return ret;
891 }
892 
893 static void
894 hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
895 {
896 	switch (link_bw) {
897 	case DP_LINK_BW_1_62:
898 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
899 		break;
900 	case DP_LINK_BW_2_7:
901 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
902 		break;
903 	case DP_LINK_BW_5_4:
904 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
905 		break;
906 	}
907 }
908 
909 static void
910 intel_dp_set_clock(struct intel_encoder *encoder,
911 		   struct intel_crtc_config *pipe_config, int link_bw)
912 {
913 	struct drm_device *dev = encoder->base.dev;
914 	const struct dp_link_dpll *divisor = NULL;
915 	int i, count = 0;
916 
917 	if (IS_G4X(dev)) {
918 		divisor = gen4_dpll;
919 		count = ARRAY_SIZE(gen4_dpll);
920 	} else if (HAS_PCH_SPLIT(dev)) {
921 		divisor = pch_dpll;
922 		count = ARRAY_SIZE(pch_dpll);
923 	} else if (IS_CHERRYVIEW(dev)) {
924 		divisor = chv_dpll;
925 		count = ARRAY_SIZE(chv_dpll);
926 	} else if (IS_VALLEYVIEW(dev)) {
927 		divisor = vlv_dpll;
928 		count = ARRAY_SIZE(vlv_dpll);
929 	}
930 
931 	if (divisor && count) {
932 		for (i = 0; i < count; i++) {
933 			if (link_bw == divisor[i].link_bw) {
934 				pipe_config->dpll = divisor[i].dpll;
935 				pipe_config->clock_set = true;
936 				break;
937 			}
938 		}
939 	}
940 }
941 
942 static void
943 intel_dp_set_m2_n2(struct intel_crtc *crtc, struct intel_link_m_n *m_n)
944 {
945 	struct drm_device *dev = crtc->base.dev;
946 	struct drm_i915_private *dev_priv = dev->dev_private;
947 	enum transcoder transcoder = crtc->config.cpu_transcoder;
948 
949 	I915_WRITE(PIPE_DATA_M2(transcoder),
950 		TU_SIZE(m_n->tu) | m_n->gmch_m);
951 	I915_WRITE(PIPE_DATA_N2(transcoder), m_n->gmch_n);
952 	I915_WRITE(PIPE_LINK_M2(transcoder), m_n->link_m);
953 	I915_WRITE(PIPE_LINK_N2(transcoder), m_n->link_n);
954 }
955 
956 bool
957 intel_dp_compute_config(struct intel_encoder *encoder,
958 			struct intel_crtc_config *pipe_config)
959 {
960 	struct drm_device *dev = encoder->base.dev;
961 	struct drm_i915_private *dev_priv = dev->dev_private;
962 	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
963 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
964 	enum port port = dp_to_dig_port(intel_dp)->port;
965 	struct intel_crtc *intel_crtc = encoder->new_crtc;
966 	struct intel_connector *intel_connector = intel_dp->attached_connector;
967 	int lane_count, clock;
968 	int min_lane_count = 1;
969 	int max_lane_count = intel_dp_max_lane_count(intel_dp);
970 	/* Conveniently, the link BW constants become indices with a shift...*/
971 	int min_clock = 0;
972 	int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
973 	int bpp, mode_rate;
974 	static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
975 	int link_avail, link_clock;
976 
977 	if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
978 		pipe_config->has_pch_encoder = true;
979 
980 	pipe_config->has_dp_encoder = true;
981 	pipe_config->has_audio = intel_dp->has_audio;
982 
983 	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
984 		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
985 				       adjusted_mode);
986 		if (!HAS_PCH_SPLIT(dev))
987 			intel_gmch_panel_fitting(intel_crtc, pipe_config,
988 						 intel_connector->panel.fitting_mode);
989 		else
990 			intel_pch_panel_fitting(intel_crtc, pipe_config,
991 						intel_connector->panel.fitting_mode);
992 	}
993 
994 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
995 		return false;
996 
997 	DRM_DEBUG_KMS("DP link computation with max lane count %i "
998 		      "max bw %02x pixel clock %iKHz\n",
999 		      max_lane_count, bws[max_clock],
1000 		      adjusted_mode->crtc_clock);
1001 
1002 	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
1003 	 * bpc in between. */
1004 	bpp = pipe_config->pipe_bpp;
1005 	if (is_edp(intel_dp)) {
1006 		if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1007 			DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1008 				      dev_priv->vbt.edp_bpp);
1009 			bpp = dev_priv->vbt.edp_bpp;
1010 		}
1011 
1012 		if (IS_BROADWELL(dev)) {
1013 			/* Yes, it's an ugly hack. */
1014 			min_lane_count = max_lane_count;
1015 			DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n",
1016 				      min_lane_count);
1017 		} else if (dev_priv->vbt.edp_lanes) {
1018 			min_lane_count = min(dev_priv->vbt.edp_lanes,
1019 					     max_lane_count);
1020 			DRM_DEBUG_KMS("using min %u lanes per VBT\n",
1021 				      min_lane_count);
1022 		}
1023 
1024 		if (dev_priv->vbt.edp_rate) {
1025 			min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock);
1026 			DRM_DEBUG_KMS("using min %02x link bw per VBT\n",
1027 				      bws[min_clock]);
1028 		}
1029 	}
1030 
1031 	for (; bpp >= 6*3; bpp -= 2*3) {
1032 		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1033 						   bpp);
1034 
1035 		for (clock = min_clock; clock <= max_clock; clock++) {
1036 			for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
1037 				link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
1038 				link_avail = intel_dp_max_data_rate(link_clock,
1039 								    lane_count);
1040 
1041 				if (mode_rate <= link_avail) {
1042 					goto found;
1043 				}
1044 			}
1045 		}
1046 	}
1047 
1048 	return false;
1049 
1050 found:
1051 	if (intel_dp->color_range_auto) {
1052 		/*
1053 		 * See:
1054 		 * CEA-861-E - 5.1 Default Encoding Parameters
1055 		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1056 		 */
1057 		if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1058 			intel_dp->color_range = DP_COLOR_RANGE_16_235;
1059 		else
1060 			intel_dp->color_range = 0;
1061 	}
1062 
1063 	if (intel_dp->color_range)
1064 		pipe_config->limited_color_range = true;
1065 
1066 	intel_dp->link_bw = bws[clock];
1067 	intel_dp->lane_count = lane_count;
1068 	pipe_config->pipe_bpp = bpp;
1069 	pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
1070 
1071 	DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1072 		      intel_dp->link_bw, intel_dp->lane_count,
1073 		      pipe_config->port_clock, bpp);
1074 	DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1075 		      mode_rate, link_avail);
1076 
1077 	intel_link_compute_m_n(bpp, lane_count,
1078 			       adjusted_mode->crtc_clock,
1079 			       pipe_config->port_clock,
1080 			       &pipe_config->dp_m_n);
1081 
1082 	if (intel_connector->panel.downclock_mode != NULL &&
1083 		intel_dp->drrs_state.type == SEAMLESS_DRRS_SUPPORT) {
1084 			intel_link_compute_m_n(bpp, lane_count,
1085 				intel_connector->panel.downclock_mode->clock,
1086 				pipe_config->port_clock,
1087 				&pipe_config->dp_m2_n2);
1088 	}
1089 
1090 	if (HAS_DDI(dev))
1091 		hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1092 	else
1093 		intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
1094 
1095 	return true;
1096 }
1097 
1098 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1099 {
1100 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1101 	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1102 	struct drm_device *dev = crtc->base.dev;
1103 	struct drm_i915_private *dev_priv = dev->dev_private;
1104 	u32 dpa_ctl;
1105 
1106 	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock);
1107 	dpa_ctl = I915_READ(DP_A);
1108 	dpa_ctl &= ~DP_PLL_FREQ_MASK;
1109 
1110 	if (crtc->config.port_clock == 162000) {
1111 		/* For a long time we've carried around a ILK-DevA w/a for the
1112 		 * 160MHz clock. If we're really unlucky, it's still required.
1113 		 */
1114 		DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1115 		dpa_ctl |= DP_PLL_FREQ_160MHZ;
1116 		intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1117 	} else {
1118 		dpa_ctl |= DP_PLL_FREQ_270MHZ;
1119 		intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1120 	}
1121 
1122 	I915_WRITE(DP_A, dpa_ctl);
1123 
1124 	POSTING_READ(DP_A);
1125 	udelay(500);
1126 }
1127 
1128 static void intel_dp_prepare(struct intel_encoder *encoder)
1129 {
1130 	struct drm_device *dev = encoder->base.dev;
1131 	struct drm_i915_private *dev_priv = dev->dev_private;
1132 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1133 	enum port port = dp_to_dig_port(intel_dp)->port;
1134 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1135 	struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
1136 
1137 	/*
1138 	 * There are four kinds of DP registers:
1139 	 *
1140 	 * 	IBX PCH
1141 	 * 	SNB CPU
1142 	 *	IVB CPU
1143 	 * 	CPT PCH
1144 	 *
1145 	 * IBX PCH and CPU are the same for almost everything,
1146 	 * except that the CPU DP PLL is configured in this
1147 	 * register
1148 	 *
1149 	 * CPT PCH is quite different, having many bits moved
1150 	 * to the TRANS_DP_CTL register instead. That
1151 	 * configuration happens (oddly) in ironlake_pch_enable
1152 	 */
1153 
1154 	/* Preserve the BIOS-computed detected bit. This is
1155 	 * supposed to be read-only.
1156 	 */
1157 	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1158 
1159 	/* Handle DP bits in common between all three register formats */
1160 	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1161 	intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1162 
1163 	if (crtc->config.has_audio) {
1164 		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
1165 				 pipe_name(crtc->pipe));
1166 		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1167 		intel_write_eld(&encoder->base, adjusted_mode);
1168 	}
1169 
1170 	/* Split out the IBX/CPU vs CPT settings */
1171 
1172 	if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1173 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1174 			intel_dp->DP |= DP_SYNC_HS_HIGH;
1175 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1176 			intel_dp->DP |= DP_SYNC_VS_HIGH;
1177 		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1178 
1179 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1180 			intel_dp->DP |= DP_ENHANCED_FRAMING;
1181 
1182 		intel_dp->DP |= crtc->pipe << 29;
1183 	} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1184 		if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1185 			intel_dp->DP |= intel_dp->color_range;
1186 
1187 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1188 			intel_dp->DP |= DP_SYNC_HS_HIGH;
1189 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1190 			intel_dp->DP |= DP_SYNC_VS_HIGH;
1191 		intel_dp->DP |= DP_LINK_TRAIN_OFF;
1192 
1193 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1194 			intel_dp->DP |= DP_ENHANCED_FRAMING;
1195 
1196 		if (!IS_CHERRYVIEW(dev)) {
1197 			if (crtc->pipe == 1)
1198 				intel_dp->DP |= DP_PIPEB_SELECT;
1199 		} else {
1200 			intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1201 		}
1202 	} else {
1203 		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1204 	}
1205 }
1206 
1207 #define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1208 #define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1209 
1210 #define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1211 #define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
1212 
1213 #define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1214 #define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1215 
1216 static void wait_panel_status(struct intel_dp *intel_dp,
1217 				       u32 mask,
1218 				       u32 value)
1219 {
1220 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1221 	struct drm_i915_private *dev_priv = dev->dev_private;
1222 	u32 pp_stat_reg, pp_ctrl_reg;
1223 
1224 	pp_stat_reg = _pp_stat_reg(intel_dp);
1225 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1226 
1227 	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1228 			mask, value,
1229 			I915_READ(pp_stat_reg),
1230 			I915_READ(pp_ctrl_reg));
1231 
1232 	if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1233 		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1234 				I915_READ(pp_stat_reg),
1235 				I915_READ(pp_ctrl_reg));
1236 	}
1237 
1238 	DRM_DEBUG_KMS("Wait complete\n");
1239 }
1240 
1241 static void wait_panel_on(struct intel_dp *intel_dp)
1242 {
1243 	DRM_DEBUG_KMS("Wait for panel power on\n");
1244 	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1245 }
1246 
1247 static void wait_panel_off(struct intel_dp *intel_dp)
1248 {
1249 	DRM_DEBUG_KMS("Wait for panel power off time\n");
1250 	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1251 }
1252 
1253 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1254 {
1255 	DRM_DEBUG_KMS("Wait for panel power cycle\n");
1256 
1257 	/* When we disable the VDD override bit last we have to do the manual
1258 	 * wait. */
1259 	wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1260 				       intel_dp->panel_power_cycle_delay);
1261 
1262 	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1263 }
1264 
1265 static void wait_backlight_on(struct intel_dp *intel_dp)
1266 {
1267 	wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1268 				       intel_dp->backlight_on_delay);
1269 }
1270 
1271 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1272 {
1273 	wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1274 				       intel_dp->backlight_off_delay);
1275 }
1276 
1277 /* Read the current pp_control value, unlocking the register if it
1278  * is locked
1279  */
1280 
1281 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1282 {
1283 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1284 	struct drm_i915_private *dev_priv = dev->dev_private;
1285 	u32 control;
1286 
1287 	control = I915_READ(_pp_ctrl_reg(intel_dp));
1288 	control &= ~PANEL_UNLOCK_MASK;
1289 	control |= PANEL_UNLOCK_REGS;
1290 	return control;
1291 }
1292 
1293 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1294 {
1295 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1296 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1297 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1298 	struct drm_i915_private *dev_priv = dev->dev_private;
1299 	enum intel_display_power_domain power_domain;
1300 	u32 pp;
1301 	u32 pp_stat_reg, pp_ctrl_reg;
1302 
1303 	if (!is_edp(intel_dp))
1304 		return;
1305 
1306 	WARN(intel_dp->want_panel_vdd,
1307 	     "eDP VDD already requested on\n");
1308 
1309 	intel_dp->want_panel_vdd = true;
1310 
1311 	if (edp_have_panel_vdd(intel_dp))
1312 		return;
1313 
1314 	power_domain = intel_display_port_power_domain(intel_encoder);
1315 	intel_display_power_get(dev_priv, power_domain);
1316 
1317 	DRM_DEBUG_KMS("Turning eDP VDD on\n");
1318 
1319 	if (!edp_have_panel_power(intel_dp))
1320 		wait_panel_power_cycle(intel_dp);
1321 
1322 	pp = ironlake_get_pp_control(intel_dp);
1323 	pp |= EDP_FORCE_VDD;
1324 
1325 	pp_stat_reg = _pp_stat_reg(intel_dp);
1326 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1327 
1328 	I915_WRITE(pp_ctrl_reg, pp);
1329 	POSTING_READ(pp_ctrl_reg);
1330 	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1331 			I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1332 	/*
1333 	 * If the panel wasn't on, delay before accessing aux channel
1334 	 */
1335 	if (!edp_have_panel_power(intel_dp)) {
1336 		DRM_DEBUG_KMS("eDP was not running\n");
1337 		msleep(intel_dp->panel_power_up_delay);
1338 	}
1339 }
1340 
1341 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1342 {
1343 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1344 	struct drm_i915_private *dev_priv = dev->dev_private;
1345 	u32 pp;
1346 	u32 pp_stat_reg, pp_ctrl_reg;
1347 
1348 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
1349 
1350 	if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) {
1351 		struct intel_digital_port *intel_dig_port =
1352 						dp_to_dig_port(intel_dp);
1353 		struct intel_encoder *intel_encoder = &intel_dig_port->base;
1354 		enum intel_display_power_domain power_domain;
1355 
1356 		DRM_DEBUG_KMS("Turning eDP VDD off\n");
1357 
1358 		pp = ironlake_get_pp_control(intel_dp);
1359 		pp &= ~EDP_FORCE_VDD;
1360 
1361 		pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1362 		pp_stat_reg = _pp_stat_reg(intel_dp);
1363 
1364 		I915_WRITE(pp_ctrl_reg, pp);
1365 		POSTING_READ(pp_ctrl_reg);
1366 
1367 		/* Make sure sequencer is idle before allowing subsequent activity */
1368 		DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1369 		I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1370 
1371 		if ((pp & POWER_TARGET_ON) == 0)
1372 			intel_dp->last_power_cycle = jiffies;
1373 
1374 		power_domain = intel_display_port_power_domain(intel_encoder);
1375 		intel_display_power_put(dev_priv, power_domain);
1376 	}
1377 }
1378 
1379 static void edp_panel_vdd_work(struct work_struct *__work)
1380 {
1381 	struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1382 						 struct intel_dp, panel_vdd_work);
1383 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1384 
1385 	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
1386 	edp_panel_vdd_off_sync(intel_dp);
1387 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
1388 }
1389 
1390 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1391 {
1392 	unsigned long delay;
1393 
1394 	/*
1395 	 * Queue the timer to fire a long time from now (relative to the power
1396 	 * down delay) to keep the panel power up across a sequence of
1397 	 * operations.
1398 	 */
1399 	delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1400 	schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1401 }
1402 
1403 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1404 {
1405 	if (!is_edp(intel_dp))
1406 		return;
1407 
1408 	WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
1409 
1410 	intel_dp->want_panel_vdd = false;
1411 
1412 	if (sync)
1413 		edp_panel_vdd_off_sync(intel_dp);
1414 	else
1415 		edp_panel_vdd_schedule_off(intel_dp);
1416 }
1417 
1418 void intel_edp_panel_on(struct intel_dp *intel_dp)
1419 {
1420 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1421 	struct drm_i915_private *dev_priv = dev->dev_private;
1422 	u32 pp;
1423 	u32 pp_ctrl_reg;
1424 
1425 	if (!is_edp(intel_dp))
1426 		return;
1427 
1428 	DRM_DEBUG_KMS("Turn eDP power on\n");
1429 
1430 	if (edp_have_panel_power(intel_dp)) {
1431 		DRM_DEBUG_KMS("eDP power already on\n");
1432 		return;
1433 	}
1434 
1435 	wait_panel_power_cycle(intel_dp);
1436 
1437 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1438 	pp = ironlake_get_pp_control(intel_dp);
1439 	if (IS_GEN5(dev)) {
1440 		/* ILK workaround: disable reset around power sequence */
1441 		pp &= ~PANEL_POWER_RESET;
1442 		I915_WRITE(pp_ctrl_reg, pp);
1443 		POSTING_READ(pp_ctrl_reg);
1444 	}
1445 
1446 	pp |= POWER_TARGET_ON;
1447 	if (!IS_GEN5(dev))
1448 		pp |= PANEL_POWER_RESET;
1449 
1450 	I915_WRITE(pp_ctrl_reg, pp);
1451 	POSTING_READ(pp_ctrl_reg);
1452 
1453 	wait_panel_on(intel_dp);
1454 	intel_dp->last_power_on = jiffies;
1455 
1456 	if (IS_GEN5(dev)) {
1457 		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1458 		I915_WRITE(pp_ctrl_reg, pp);
1459 		POSTING_READ(pp_ctrl_reg);
1460 	}
1461 }
1462 
1463 void intel_edp_panel_off(struct intel_dp *intel_dp)
1464 {
1465 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1466 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1467 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1468 	struct drm_i915_private *dev_priv = dev->dev_private;
1469 	enum intel_display_power_domain power_domain;
1470 	u32 pp;
1471 	u32 pp_ctrl_reg;
1472 
1473 	if (!is_edp(intel_dp))
1474 		return;
1475 
1476 	DRM_DEBUG_KMS("Turn eDP power off\n");
1477 
1478 	WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
1479 
1480 	pp = ironlake_get_pp_control(intel_dp);
1481 	/* We need to switch off panel power _and_ force vdd, for otherwise some
1482 	 * panels get very unhappy and cease to work. */
1483 	pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1484 		EDP_BLC_ENABLE);
1485 
1486 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1487 
1488 	intel_dp->want_panel_vdd = false;
1489 
1490 	I915_WRITE(pp_ctrl_reg, pp);
1491 	POSTING_READ(pp_ctrl_reg);
1492 
1493 	intel_dp->last_power_cycle = jiffies;
1494 	wait_panel_off(intel_dp);
1495 
1496 	/* We got a reference when we enabled the VDD. */
1497 	power_domain = intel_display_port_power_domain(intel_encoder);
1498 	intel_display_power_put(dev_priv, power_domain);
1499 }
1500 
1501 void intel_edp_backlight_on(struct intel_dp *intel_dp)
1502 {
1503 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1504 	struct drm_device *dev = intel_dig_port->base.base.dev;
1505 	struct drm_i915_private *dev_priv = dev->dev_private;
1506 	u32 pp;
1507 	u32 pp_ctrl_reg;
1508 
1509 	if (!is_edp(intel_dp))
1510 		return;
1511 
1512 	DRM_DEBUG_KMS("\n");
1513 
1514 	intel_panel_enable_backlight(intel_dp->attached_connector);
1515 
1516 	/*
1517 	 * If we enable the backlight right away following a panel power
1518 	 * on, we may see slight flicker as the panel syncs with the eDP
1519 	 * link.  So delay a bit to make sure the image is solid before
1520 	 * allowing it to appear.
1521 	 */
1522 	wait_backlight_on(intel_dp);
1523 	pp = ironlake_get_pp_control(intel_dp);
1524 	pp |= EDP_BLC_ENABLE;
1525 
1526 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1527 
1528 	I915_WRITE(pp_ctrl_reg, pp);
1529 	POSTING_READ(pp_ctrl_reg);
1530 }
1531 
1532 void intel_edp_backlight_off(struct intel_dp *intel_dp)
1533 {
1534 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1535 	struct drm_i915_private *dev_priv = dev->dev_private;
1536 	u32 pp;
1537 	u32 pp_ctrl_reg;
1538 
1539 	if (!is_edp(intel_dp))
1540 		return;
1541 
1542 	DRM_DEBUG_KMS("\n");
1543 	pp = ironlake_get_pp_control(intel_dp);
1544 	pp &= ~EDP_BLC_ENABLE;
1545 
1546 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1547 
1548 	I915_WRITE(pp_ctrl_reg, pp);
1549 	POSTING_READ(pp_ctrl_reg);
1550 	intel_dp->last_backlight_off = jiffies;
1551 
1552 	edp_wait_backlight_off(intel_dp);
1553 
1554 	intel_panel_disable_backlight(intel_dp->attached_connector);
1555 }
1556 
1557 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
1558 {
1559 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1560 	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1561 	struct drm_device *dev = crtc->dev;
1562 	struct drm_i915_private *dev_priv = dev->dev_private;
1563 	u32 dpa_ctl;
1564 
1565 	assert_pipe_disabled(dev_priv,
1566 			     to_intel_crtc(crtc)->pipe);
1567 
1568 	DRM_DEBUG_KMS("\n");
1569 	dpa_ctl = I915_READ(DP_A);
1570 	WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
1571 	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1572 
1573 	/* We don't adjust intel_dp->DP while tearing down the link, to
1574 	 * facilitate link retraining (e.g. after hotplug). Hence clear all
1575 	 * enable bits here to ensure that we don't enable too much. */
1576 	intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
1577 	intel_dp->DP |= DP_PLL_ENABLE;
1578 	I915_WRITE(DP_A, intel_dp->DP);
1579 	POSTING_READ(DP_A);
1580 	udelay(200);
1581 }
1582 
1583 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
1584 {
1585 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1586 	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1587 	struct drm_device *dev = crtc->dev;
1588 	struct drm_i915_private *dev_priv = dev->dev_private;
1589 	u32 dpa_ctl;
1590 
1591 	assert_pipe_disabled(dev_priv,
1592 			     to_intel_crtc(crtc)->pipe);
1593 
1594 	dpa_ctl = I915_READ(DP_A);
1595 	WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
1596 	     "dp pll off, should be on\n");
1597 	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1598 
1599 	/* We can't rely on the value tracked for the DP register in
1600 	 * intel_dp->DP because link_down must not change that (otherwise link
1601 	 * re-training will fail. */
1602 	dpa_ctl &= ~DP_PLL_ENABLE;
1603 	I915_WRITE(DP_A, dpa_ctl);
1604 	POSTING_READ(DP_A);
1605 	udelay(200);
1606 }
1607 
1608 /* If the sink supports it, try to set the power state appropriately */
1609 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
1610 {
1611 	int ret, i;
1612 
1613 	/* Should have a valid DPCD by this point */
1614 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
1615 		return;
1616 
1617 	if (mode != DRM_MODE_DPMS_ON) {
1618 		ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
1619 						  DP_SET_POWER_D3);
1620 		if (ret != 1)
1621 			DRM_DEBUG_DRIVER("failed to write sink power state\n");
1622 	} else {
1623 		/*
1624 		 * When turning on, we need to retry for 1ms to give the sink
1625 		 * time to wake up.
1626 		 */
1627 		for (i = 0; i < 3; i++) {
1628 			ret = intel_dp_aux_native_write_1(intel_dp,
1629 							  DP_SET_POWER,
1630 							  DP_SET_POWER_D0);
1631 			if (ret == 1)
1632 				break;
1633 			msleep(1);
1634 		}
1635 	}
1636 }
1637 
1638 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1639 				  enum i915_pipe *pipe)
1640 {
1641 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1642 	enum port port = dp_to_dig_port(intel_dp)->port;
1643 	struct drm_device *dev = encoder->base.dev;
1644 	struct drm_i915_private *dev_priv = dev->dev_private;
1645 	enum intel_display_power_domain power_domain;
1646 	u32 tmp;
1647 
1648 	power_domain = intel_display_port_power_domain(encoder);
1649 	if (!intel_display_power_enabled(dev_priv, power_domain))
1650 		return false;
1651 
1652 	tmp = I915_READ(intel_dp->output_reg);
1653 
1654 	if (!(tmp & DP_PORT_EN))
1655 		return false;
1656 
1657 	if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1658 		*pipe = PORT_TO_PIPE_CPT(tmp);
1659 	} else if (IS_CHERRYVIEW(dev)) {
1660 		*pipe = DP_PORT_TO_PIPE_CHV(tmp);
1661 	} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1662 		*pipe = PORT_TO_PIPE(tmp);
1663 	} else {
1664 		u32 trans_sel;
1665 		u32 trans_dp;
1666 		int i;
1667 
1668 		switch (intel_dp->output_reg) {
1669 		case PCH_DP_B:
1670 			trans_sel = TRANS_DP_PORT_SEL_B;
1671 			break;
1672 		case PCH_DP_C:
1673 			trans_sel = TRANS_DP_PORT_SEL_C;
1674 			break;
1675 		case PCH_DP_D:
1676 			trans_sel = TRANS_DP_PORT_SEL_D;
1677 			break;
1678 		default:
1679 			return true;
1680 		}
1681 
1682 		for_each_pipe(i) {
1683 			trans_dp = I915_READ(TRANS_DP_CTL(i));
1684 			if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
1685 				*pipe = i;
1686 				return true;
1687 			}
1688 		}
1689 
1690 		DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
1691 			      intel_dp->output_reg);
1692 	}
1693 
1694 	return true;
1695 }
1696 
1697 static void intel_dp_get_config(struct intel_encoder *encoder,
1698 				struct intel_crtc_config *pipe_config)
1699 {
1700 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1701 	u32 tmp, flags = 0;
1702 	struct drm_device *dev = encoder->base.dev;
1703 	struct drm_i915_private *dev_priv = dev->dev_private;
1704 	enum port port = dp_to_dig_port(intel_dp)->port;
1705 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1706 	int dotclock;
1707 
1708 	tmp = I915_READ(intel_dp->output_reg);
1709 	if (tmp & DP_AUDIO_OUTPUT_ENABLE)
1710 		pipe_config->has_audio = true;
1711 
1712 	if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
1713 		if (tmp & DP_SYNC_HS_HIGH)
1714 			flags |= DRM_MODE_FLAG_PHSYNC;
1715 		else
1716 			flags |= DRM_MODE_FLAG_NHSYNC;
1717 
1718 		if (tmp & DP_SYNC_VS_HIGH)
1719 			flags |= DRM_MODE_FLAG_PVSYNC;
1720 		else
1721 			flags |= DRM_MODE_FLAG_NVSYNC;
1722 	} else {
1723 		tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1724 		if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
1725 			flags |= DRM_MODE_FLAG_PHSYNC;
1726 		else
1727 			flags |= DRM_MODE_FLAG_NHSYNC;
1728 
1729 		if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
1730 			flags |= DRM_MODE_FLAG_PVSYNC;
1731 		else
1732 			flags |= DRM_MODE_FLAG_NVSYNC;
1733 	}
1734 
1735 	pipe_config->adjusted_mode.flags |= flags;
1736 
1737 	if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1738 	    tmp & DP_COLOR_RANGE_16_235)
1739 		pipe_config->limited_color_range = true;
1740 
1741 	pipe_config->has_dp_encoder = true;
1742 
1743 	intel_dp_get_m_n(crtc, pipe_config);
1744 
1745 	if (port == PORT_A) {
1746 		if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
1747 			pipe_config->port_clock = 162000;
1748 		else
1749 			pipe_config->port_clock = 270000;
1750 	}
1751 
1752 	dotclock = intel_dotclock_calculate(pipe_config->port_clock,
1753 					    &pipe_config->dp_m_n);
1754 
1755 	if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
1756 		ironlake_check_encoder_dotclock(pipe_config, dotclock);
1757 
1758 	pipe_config->adjusted_mode.crtc_clock = dotclock;
1759 
1760 	if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
1761 	    pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
1762 		/*
1763 		 * This is a big fat ugly hack.
1764 		 *
1765 		 * Some machines in UEFI boot mode provide us a VBT that has 18
1766 		 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
1767 		 * unknown we fail to light up. Yet the same BIOS boots up with
1768 		 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
1769 		 * max, not what it tells us to use.
1770 		 *
1771 		 * Note: This will still be broken if the eDP panel is not lit
1772 		 * up by the BIOS, and thus we can't get the mode at module
1773 		 * load.
1774 		 */
1775 		DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
1776 			      pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
1777 		dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
1778 	}
1779 }
1780 
1781 static bool is_edp_psr(struct intel_dp *intel_dp)
1782 {
1783 	return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
1784 }
1785 
1786 static bool intel_edp_is_psr_enabled(struct drm_device *dev)
1787 {
1788 	struct drm_i915_private *dev_priv = dev->dev_private;
1789 
1790 	if (!HAS_PSR(dev))
1791 		return false;
1792 
1793 	return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
1794 }
1795 
1796 static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
1797 				    struct edp_vsc_psr *vsc_psr)
1798 {
1799 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1800 	struct drm_device *dev = dig_port->base.base.dev;
1801 	struct drm_i915_private *dev_priv = dev->dev_private;
1802 	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1803 	u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
1804 	u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
1805 	uint32_t *data = (uint32_t *) vsc_psr;
1806 	unsigned int i;
1807 
1808 	/* As per BSPec (Pipe Video Data Island Packet), we need to disable
1809 	   the video DIP being updated before program video DIP data buffer
1810 	   registers for DIP being updated. */
1811 	I915_WRITE(ctl_reg, 0);
1812 	POSTING_READ(ctl_reg);
1813 
1814 	for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
1815 		if (i < sizeof(struct edp_vsc_psr))
1816 			I915_WRITE(data_reg + i, *data++);
1817 		else
1818 			I915_WRITE(data_reg + i, 0);
1819 	}
1820 
1821 	I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
1822 	POSTING_READ(ctl_reg);
1823 }
1824 
1825 static void intel_edp_psr_setup(struct intel_dp *intel_dp)
1826 {
1827 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1828 	struct drm_i915_private *dev_priv = dev->dev_private;
1829 	struct edp_vsc_psr psr_vsc;
1830 
1831 	/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
1832 	memset(&psr_vsc, 0, sizeof(psr_vsc));
1833 	psr_vsc.sdp_header.HB0 = 0;
1834 	psr_vsc.sdp_header.HB1 = 0x7;
1835 	psr_vsc.sdp_header.HB2 = 0x2;
1836 	psr_vsc.sdp_header.HB3 = 0x8;
1837 	intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
1838 
1839 	/* Avoid continuous PSR exit by masking memup and hpd */
1840 	I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
1841 		   EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
1842 }
1843 
1844 static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
1845 {
1846 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1847 	struct drm_device *dev = dig_port->base.base.dev;
1848 	struct drm_i915_private *dev_priv = dev->dev_private;
1849 	uint32_t aux_clock_divider;
1850 	int precharge = 0x3;
1851 	int msg_size = 5;       /* Header(4) + Message(1) */
1852 	bool only_standby = false;
1853 
1854 	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
1855 
1856 	if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
1857 		only_standby = true;
1858 
1859 	/* Enable PSR in sink */
1860 	if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
1861 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
1862 				   DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
1863 	else
1864 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
1865 				   DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
1866 
1867 	/* Setup AUX registers */
1868 	I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
1869 	I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION);
1870 	I915_WRITE(EDP_PSR_AUX_CTL(dev),
1871 		   DP_AUX_CH_CTL_TIME_OUT_400us |
1872 		   (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1873 		   (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1874 		   (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
1875 }
1876 
1877 static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
1878 {
1879 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1880 	struct drm_device *dev = dig_port->base.base.dev;
1881 	struct drm_i915_private *dev_priv = dev->dev_private;
1882 	uint32_t max_sleep_time = 0x1f;
1883 	uint32_t idle_frames = 1;
1884 	uint32_t val = 0x0;
1885 	const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
1886 	bool only_standby = false;
1887 
1888 	if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
1889 		only_standby = true;
1890 
1891 	if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
1892 		val |= EDP_PSR_LINK_STANDBY;
1893 		val |= EDP_PSR_TP2_TP3_TIME_0us;
1894 		val |= EDP_PSR_TP1_TIME_0us;
1895 		val |= EDP_PSR_SKIP_AUX_EXIT;
1896 		val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
1897 	} else
1898 		val |= EDP_PSR_LINK_DISABLE;
1899 
1900 	I915_WRITE(EDP_PSR_CTL(dev), val |
1901 		   IS_BROADWELL(dev) ? 0 : link_entry_time |
1902 		   max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
1903 		   idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
1904 		   EDP_PSR_ENABLE);
1905 }
1906 
1907 static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1908 {
1909 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1910 	struct drm_device *dev = dig_port->base.base.dev;
1911 	struct drm_i915_private *dev_priv = dev->dev_private;
1912 	struct drm_crtc *crtc = dig_port->base.base.crtc;
1913 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1914 
1915 #if 0
1916 	lockdep_assert_held(&dev_priv->psr.lock);
1917 #endif
1918 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
1919 	WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
1920 
1921 	dev_priv->psr.source_ok = false;
1922 
1923 	if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
1924 		DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
1925 		return false;
1926 	}
1927 
1928 	if (!i915.enable_psr) {
1929 		DRM_DEBUG_KMS("PSR disable by flag\n");
1930 		return false;
1931 	}
1932 
1933 	/* Below limitations aren't valid for Broadwell */
1934 	if (IS_BROADWELL(dev))
1935 		goto out;
1936 
1937 	if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
1938 	    S3D_ENABLE) {
1939 		DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
1940 		return false;
1941 	}
1942 
1943 	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
1944 		DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
1945 		return false;
1946 	}
1947 
1948  out:
1949 	dev_priv->psr.source_ok = true;
1950 	return true;
1951 }
1952 
1953 static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
1954 {
1955 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1956 	struct drm_device *dev = intel_dig_port->base.base.dev;
1957 	struct drm_i915_private *dev_priv = dev->dev_private;
1958 
1959 	WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
1960 	WARN_ON(dev_priv->psr.active);
1961 #if 0
1962 	lockdep_assert_held(&dev_priv->psr.lock);
1963 #endif
1964 
1965 	/* Enable PSR on the panel */
1966 	intel_edp_psr_enable_sink(intel_dp);
1967 
1968 	/* Enable PSR on the host */
1969 	intel_edp_psr_enable_source(intel_dp);
1970 
1971 	dev_priv->psr.active = true;
1972 }
1973 
1974 void intel_edp_psr_enable(struct intel_dp *intel_dp)
1975 {
1976 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1977 	struct drm_i915_private *dev_priv = dev->dev_private;
1978 
1979 	if (!HAS_PSR(dev)) {
1980 		DRM_DEBUG_KMS("PSR not supported on this platform\n");
1981 		return;
1982 	}
1983 
1984 	if (!is_edp_psr(intel_dp)) {
1985 		DRM_DEBUG_KMS("PSR not supported by this panel\n");
1986 		return;
1987 	}
1988 
1989 	mutex_lock(&dev_priv->psr.lock);
1990 	if (dev_priv->psr.enabled) {
1991 		DRM_DEBUG_KMS("PSR already in use\n");
1992 		mutex_unlock(&dev_priv->psr.lock);
1993 		return;
1994 	}
1995 
1996 	dev_priv->psr.busy_frontbuffer_bits = 0;
1997 
1998 	/* Setup PSR once */
1999 	intel_edp_psr_setup(intel_dp);
2000 
2001 	if (intel_edp_psr_match_conditions(intel_dp))
2002 		dev_priv->psr.enabled = intel_dp;
2003 	mutex_unlock(&dev_priv->psr.lock);
2004 }
2005 
2006 void intel_edp_psr_disable(struct intel_dp *intel_dp)
2007 {
2008 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2009 	struct drm_i915_private *dev_priv = dev->dev_private;
2010 
2011 	mutex_lock(&dev_priv->psr.lock);
2012 	if (!dev_priv->psr.enabled) {
2013 		mutex_unlock(&dev_priv->psr.lock);
2014 		return;
2015 	}
2016 
2017 	if (dev_priv->psr.active) {
2018 		I915_WRITE(EDP_PSR_CTL(dev),
2019 			   I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
2020 
2021 		/* Wait till PSR is idle */
2022 		if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
2023 			       EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
2024 			DRM_ERROR("Timed out waiting for PSR Idle State\n");
2025 
2026 		dev_priv->psr.active = false;
2027 	} else {
2028 		WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
2029 	}
2030 
2031 	dev_priv->psr.enabled = NULL;
2032 	mutex_unlock(&dev_priv->psr.lock);
2033 
2034 	cancel_delayed_work_sync(&dev_priv->psr.work);
2035 }
2036 
2037 static void intel_edp_psr_work(struct work_struct *work)
2038 {
2039 	struct drm_i915_private *dev_priv =
2040 		container_of(work, typeof(*dev_priv), psr.work.work);
2041 	struct intel_dp *intel_dp = dev_priv->psr.enabled;
2042 
2043 	mutex_lock(&dev_priv->psr.lock);
2044 	intel_dp = dev_priv->psr.enabled;
2045 
2046 	if (!intel_dp)
2047 		goto unlock;
2048 
2049 	/*
2050 	 * The delayed work can race with an invalidate hence we need to
2051 	 * recheck. Since psr_flush first clears this and then reschedules we
2052 	 * won't ever miss a flush when bailing out here.
2053 	 */
2054 	if (dev_priv->psr.busy_frontbuffer_bits)
2055 		goto unlock;
2056 
2057 	intel_edp_psr_do_enable(intel_dp);
2058 unlock:
2059 	mutex_unlock(&dev_priv->psr.lock);
2060 }
2061 
2062 static void intel_edp_psr_do_exit(struct drm_device *dev)
2063 {
2064 	struct drm_i915_private *dev_priv = dev->dev_private;
2065 
2066 	if (dev_priv->psr.active) {
2067 		u32 val = I915_READ(EDP_PSR_CTL(dev));
2068 
2069 		WARN_ON(!(val & EDP_PSR_ENABLE));
2070 
2071 		I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
2072 
2073 		dev_priv->psr.active = false;
2074 	}
2075 
2076 }
2077 
2078 void intel_edp_psr_invalidate(struct drm_device *dev,
2079 			      unsigned frontbuffer_bits)
2080 {
2081 	struct drm_i915_private *dev_priv = dev->dev_private;
2082 	struct drm_crtc *crtc;
2083 	enum i915_pipe pipe;
2084 
2085 	mutex_lock(&dev_priv->psr.lock);
2086 	if (!dev_priv->psr.enabled) {
2087 		mutex_unlock(&dev_priv->psr.lock);
2088 		return;
2089 	}
2090 
2091 	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
2092 	pipe = to_intel_crtc(crtc)->pipe;
2093 
2094 	intel_edp_psr_do_exit(dev);
2095 
2096 	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
2097 
2098 	dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
2099 	mutex_unlock(&dev_priv->psr.lock);
2100 }
2101 
2102 void intel_edp_psr_flush(struct drm_device *dev,
2103 			 unsigned frontbuffer_bits)
2104 {
2105 	struct drm_i915_private *dev_priv = dev->dev_private;
2106 	struct drm_crtc *crtc;
2107 	enum i915_pipe pipe;
2108 
2109 	mutex_lock(&dev_priv->psr.lock);
2110 	if (!dev_priv->psr.enabled) {
2111 		mutex_unlock(&dev_priv->psr.lock);
2112 		return;
2113 	}
2114 
2115 	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
2116 	pipe = to_intel_crtc(crtc)->pipe;
2117 	dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
2118 
2119 	/*
2120 	 * On Haswell sprite plane updates don't result in a psr invalidating
2121 	 * signal in the hardware. Which means we need to manually fake this in
2122 	 * software for all flushes, not just when we've seen a preceding
2123 	 * invalidation through frontbuffer rendering.
2124 	 */
2125 	if (IS_HASWELL(dev) &&
2126 	    (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
2127 		intel_edp_psr_do_exit(dev);
2128 
2129 	if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
2130 		schedule_delayed_work(&dev_priv->psr.work,
2131 				      msecs_to_jiffies(100));
2132 	mutex_unlock(&dev_priv->psr.lock);
2133 }
2134 
2135 void intel_edp_psr_init(struct drm_device *dev)
2136 {
2137 	struct drm_i915_private *dev_priv = dev->dev_private;
2138 
2139 	INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work);
2140 	lockinit(&dev_priv->psr.lock, "i915dpl", 0, LK_CANRECURSE);
2141 }
2142 
2143 static void intel_disable_dp(struct intel_encoder *encoder)
2144 {
2145 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2146 	enum port port = dp_to_dig_port(intel_dp)->port;
2147 	struct drm_device *dev = encoder->base.dev;
2148 
2149 	/* Make sure the panel is off before trying to change the mode. But also
2150 	 * ensure that we have vdd while we switch off the panel. */
2151 	intel_edp_panel_vdd_on(intel_dp);
2152 	intel_edp_backlight_off(intel_dp);
2153 	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2154 	intel_edp_panel_off(intel_dp);
2155 
2156 	/* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
2157 	if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
2158 		intel_dp_link_down(intel_dp);
2159 }
2160 
2161 static void g4x_post_disable_dp(struct intel_encoder *encoder)
2162 {
2163 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2164 	enum port port = dp_to_dig_port(intel_dp)->port;
2165 
2166 	if (port != PORT_A)
2167 		return;
2168 
2169 	intel_dp_link_down(intel_dp);
2170 	ironlake_edp_pll_off(intel_dp);
2171 }
2172 
2173 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2174 {
2175 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2176 
2177 	intel_dp_link_down(intel_dp);
2178 }
2179 
2180 static void chv_post_disable_dp(struct intel_encoder *encoder)
2181 {
2182 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2183 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2184 	struct drm_device *dev = encoder->base.dev;
2185 	struct drm_i915_private *dev_priv = dev->dev_private;
2186 	struct intel_crtc *intel_crtc =
2187 		to_intel_crtc(encoder->base.crtc);
2188 	enum dpio_channel ch = vlv_dport_to_channel(dport);
2189 	enum i915_pipe pipe = intel_crtc->pipe;
2190 	u32 val;
2191 
2192 	intel_dp_link_down(intel_dp);
2193 
2194 	mutex_lock(&dev_priv->dpio_lock);
2195 
2196 	/* Propagate soft reset to data lane reset */
2197 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2198 	val |= CHV_PCS_REQ_SOFTRESET_EN;
2199 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2200 
2201 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2202 	val |= CHV_PCS_REQ_SOFTRESET_EN;
2203 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2204 
2205 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2206 	val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2207 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2208 
2209 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2210 	val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2211 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2212 
2213 	mutex_unlock(&dev_priv->dpio_lock);
2214 }
2215 
2216 static void intel_enable_dp(struct intel_encoder *encoder)
2217 {
2218 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2219 	struct drm_device *dev = encoder->base.dev;
2220 	struct drm_i915_private *dev_priv = dev->dev_private;
2221 	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2222 
2223 	if (WARN_ON(dp_reg & DP_PORT_EN))
2224 		return;
2225 
2226 	intel_edp_panel_vdd_on(intel_dp);
2227 	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2228 	intel_dp_start_link_train(intel_dp);
2229 	intel_edp_panel_on(intel_dp);
2230 	edp_panel_vdd_off(intel_dp, true);
2231 	intel_dp_complete_link_train(intel_dp);
2232 	intel_dp_stop_link_train(intel_dp);
2233 }
2234 
2235 static void g4x_enable_dp(struct intel_encoder *encoder)
2236 {
2237 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2238 
2239 	intel_enable_dp(encoder);
2240 	intel_edp_backlight_on(intel_dp);
2241 }
2242 
2243 static void vlv_enable_dp(struct intel_encoder *encoder)
2244 {
2245 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2246 
2247 	intel_edp_backlight_on(intel_dp);
2248 }
2249 
2250 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2251 {
2252 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2253 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2254 
2255 	intel_dp_prepare(encoder);
2256 
2257 	/* Only ilk+ has port A */
2258 	if (dport->port == PORT_A) {
2259 		ironlake_set_pll_cpu_edp(intel_dp);
2260 		ironlake_edp_pll_on(intel_dp);
2261 	}
2262 }
2263 
2264 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2265 {
2266 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2267 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2268 	struct drm_device *dev = encoder->base.dev;
2269 	struct drm_i915_private *dev_priv = dev->dev_private;
2270 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2271 	enum dpio_channel port = vlv_dport_to_channel(dport);
2272 	int pipe = intel_crtc->pipe;
2273 	struct edp_power_seq power_seq;
2274 	u32 val;
2275 
2276 	mutex_lock(&dev_priv->dpio_lock);
2277 
2278 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2279 	val = 0;
2280 	if (pipe)
2281 		val |= (1<<21);
2282 	else
2283 		val &= ~(1<<21);
2284 	val |= 0x001000c4;
2285 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2286 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2287 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2288 
2289 	mutex_unlock(&dev_priv->dpio_lock);
2290 
2291 	if (is_edp(intel_dp)) {
2292 		/* init power sequencer on this pipe and port */
2293 		intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
2294 		intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
2295 							      &power_seq);
2296 	}
2297 
2298 	intel_enable_dp(encoder);
2299 
2300 	vlv_wait_port_ready(dev_priv, dport);
2301 }
2302 
2303 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2304 {
2305 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2306 	struct drm_device *dev = encoder->base.dev;
2307 	struct drm_i915_private *dev_priv = dev->dev_private;
2308 	struct intel_crtc *intel_crtc =
2309 		to_intel_crtc(encoder->base.crtc);
2310 	enum dpio_channel port = vlv_dport_to_channel(dport);
2311 	int pipe = intel_crtc->pipe;
2312 
2313 	intel_dp_prepare(encoder);
2314 
2315 	/* Program Tx lane resets to default */
2316 	mutex_lock(&dev_priv->dpio_lock);
2317 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2318 			 DPIO_PCS_TX_LANE2_RESET |
2319 			 DPIO_PCS_TX_LANE1_RESET);
2320 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2321 			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2322 			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2323 			 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2324 				 DPIO_PCS_CLK_SOFT_RESET);
2325 
2326 	/* Fix up inter-pair skew failure */
2327 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2328 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2329 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2330 	mutex_unlock(&dev_priv->dpio_lock);
2331 }
2332 
2333 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2334 {
2335 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2336 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2337 	struct drm_device *dev = encoder->base.dev;
2338 	struct drm_i915_private *dev_priv = dev->dev_private;
2339 	struct edp_power_seq power_seq;
2340 	struct intel_crtc *intel_crtc =
2341 		to_intel_crtc(encoder->base.crtc);
2342 	enum dpio_channel ch = vlv_dport_to_channel(dport);
2343 	int pipe = intel_crtc->pipe;
2344 	int data, i;
2345 	u32 val;
2346 
2347 	mutex_lock(&dev_priv->dpio_lock);
2348 
2349 	/* Deassert soft data lane reset*/
2350 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2351 	val |= CHV_PCS_REQ_SOFTRESET_EN;
2352 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2353 
2354 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2355 	val |= CHV_PCS_REQ_SOFTRESET_EN;
2356 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2357 
2358 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2359 	val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2360 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2361 
2362 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2363 	val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2364 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2365 
2366 	/* Program Tx lane latency optimal setting*/
2367 	for (i = 0; i < 4; i++) {
2368 		/* Set the latency optimal bit */
2369 		data = (i == 1) ? 0x0 : 0x6;
2370 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
2371 				data << DPIO_FRC_LATENCY_SHFIT);
2372 
2373 		/* Set the upar bit */
2374 		data = (i == 1) ? 0x0 : 0x1;
2375 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2376 				data << DPIO_UPAR_SHIFT);
2377 	}
2378 
2379 	/* Data lane stagger programming */
2380 	/* FIXME: Fix up value only after power analysis */
2381 
2382 	mutex_unlock(&dev_priv->dpio_lock);
2383 
2384 	if (is_edp(intel_dp)) {
2385 		/* init power sequencer on this pipe and port */
2386 		intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
2387 		intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
2388 							      &power_seq);
2389 	}
2390 
2391 	intel_enable_dp(encoder);
2392 
2393 	vlv_wait_port_ready(dev_priv, dport);
2394 }
2395 
2396 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2397 {
2398 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2399 	struct drm_device *dev = encoder->base.dev;
2400 	struct drm_i915_private *dev_priv = dev->dev_private;
2401 	struct intel_crtc *intel_crtc =
2402 		to_intel_crtc(encoder->base.crtc);
2403 	enum dpio_channel ch = vlv_dport_to_channel(dport);
2404 	enum i915_pipe pipe = intel_crtc->pipe;
2405 	u32 val;
2406 
2407 	mutex_lock(&dev_priv->dpio_lock);
2408 
2409 	/* program left/right clock distribution */
2410 	if (pipe != PIPE_B) {
2411 		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2412 		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2413 		if (ch == DPIO_CH0)
2414 			val |= CHV_BUFLEFTENA1_FORCE;
2415 		if (ch == DPIO_CH1)
2416 			val |= CHV_BUFRIGHTENA1_FORCE;
2417 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2418 	} else {
2419 		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2420 		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2421 		if (ch == DPIO_CH0)
2422 			val |= CHV_BUFLEFTENA2_FORCE;
2423 		if (ch == DPIO_CH1)
2424 			val |= CHV_BUFRIGHTENA2_FORCE;
2425 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2426 	}
2427 
2428 	/* program clock channel usage */
2429 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2430 	val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2431 	if (pipe != PIPE_B)
2432 		val &= ~CHV_PCS_USEDCLKCHANNEL;
2433 	else
2434 		val |= CHV_PCS_USEDCLKCHANNEL;
2435 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2436 
2437 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2438 	val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2439 	if (pipe != PIPE_B)
2440 		val &= ~CHV_PCS_USEDCLKCHANNEL;
2441 	else
2442 		val |= CHV_PCS_USEDCLKCHANNEL;
2443 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2444 
2445 	/*
2446 	 * This a a bit weird since generally CL
2447 	 * matches the pipe, but here we need to
2448 	 * pick the CL based on the port.
2449 	 */
2450 	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2451 	if (pipe != PIPE_B)
2452 		val &= ~CHV_CMN_USEDCLKCHANNEL;
2453 	else
2454 		val |= CHV_CMN_USEDCLKCHANNEL;
2455 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2456 
2457 	mutex_unlock(&dev_priv->dpio_lock);
2458 }
2459 
2460 /*
2461  * Native read with retry for link status and receiver capability reads for
2462  * cases where the sink may still be asleep.
2463  */
2464 static bool
2465 intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
2466 			       uint8_t *recv, int recv_bytes)
2467 {
2468 	int ret, i;
2469 
2470 	/*
2471 	 * Sinks are *supposed* to come up within 1ms from an off state,
2472 	 * but we're also supposed to retry 3 times per the spec.
2473 	 */
2474 	for (i = 0; i < 3; i++) {
2475 		ret = intel_dp_aux_native_read(intel_dp, address, recv,
2476 					       recv_bytes);
2477 		if (ret == recv_bytes)
2478 			return true;
2479 		msleep(1);
2480 	}
2481 
2482 	return false;
2483 }
2484 
2485 /*
2486  * Fetch AUX CH registers 0x202 - 0x207 which contain
2487  * link status information
2488  */
2489 static bool
2490 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2491 {
2492 	return intel_dp_aux_native_read_retry(intel_dp,
2493 					      DP_LANE0_1_STATUS,
2494 					      link_status,
2495 					      DP_LINK_STATUS_SIZE);
2496 }
2497 
2498 /* These are source-specific values. */
2499 static uint8_t
2500 intel_dp_voltage_max(struct intel_dp *intel_dp)
2501 {
2502 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2503 	enum port port = dp_to_dig_port(intel_dp)->port;
2504 
2505 	if (IS_VALLEYVIEW(dev))
2506 		return DP_TRAIN_VOLTAGE_SWING_1200;
2507 	else if (IS_GEN7(dev) && port == PORT_A)
2508 		return DP_TRAIN_VOLTAGE_SWING_800;
2509 	else if (HAS_PCH_CPT(dev) && port != PORT_A)
2510 		return DP_TRAIN_VOLTAGE_SWING_1200;
2511 	else
2512 		return DP_TRAIN_VOLTAGE_SWING_800;
2513 }
2514 
2515 static uint8_t
2516 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2517 {
2518 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2519 	enum port port = dp_to_dig_port(intel_dp)->port;
2520 
2521 	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2522 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2523 		case DP_TRAIN_VOLTAGE_SWING_400:
2524 			return DP_TRAIN_PRE_EMPHASIS_9_5;
2525 		case DP_TRAIN_VOLTAGE_SWING_600:
2526 			return DP_TRAIN_PRE_EMPHASIS_6;
2527 		case DP_TRAIN_VOLTAGE_SWING_800:
2528 			return DP_TRAIN_PRE_EMPHASIS_3_5;
2529 		case DP_TRAIN_VOLTAGE_SWING_1200:
2530 		default:
2531 			return DP_TRAIN_PRE_EMPHASIS_0;
2532 		}
2533 	} else if (IS_VALLEYVIEW(dev)) {
2534 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2535 		case DP_TRAIN_VOLTAGE_SWING_400:
2536 			return DP_TRAIN_PRE_EMPHASIS_9_5;
2537 		case DP_TRAIN_VOLTAGE_SWING_600:
2538 			return DP_TRAIN_PRE_EMPHASIS_6;
2539 		case DP_TRAIN_VOLTAGE_SWING_800:
2540 			return DP_TRAIN_PRE_EMPHASIS_3_5;
2541 		case DP_TRAIN_VOLTAGE_SWING_1200:
2542 		default:
2543 			return DP_TRAIN_PRE_EMPHASIS_0;
2544 		}
2545 	} else if (IS_GEN7(dev) && port == PORT_A) {
2546 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2547 		case DP_TRAIN_VOLTAGE_SWING_400:
2548 			return DP_TRAIN_PRE_EMPHASIS_6;
2549 		case DP_TRAIN_VOLTAGE_SWING_600:
2550 		case DP_TRAIN_VOLTAGE_SWING_800:
2551 			return DP_TRAIN_PRE_EMPHASIS_3_5;
2552 		default:
2553 			return DP_TRAIN_PRE_EMPHASIS_0;
2554 		}
2555 	} else {
2556 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2557 		case DP_TRAIN_VOLTAGE_SWING_400:
2558 			return DP_TRAIN_PRE_EMPHASIS_6;
2559 		case DP_TRAIN_VOLTAGE_SWING_600:
2560 			return DP_TRAIN_PRE_EMPHASIS_6;
2561 		case DP_TRAIN_VOLTAGE_SWING_800:
2562 			return DP_TRAIN_PRE_EMPHASIS_3_5;
2563 		case DP_TRAIN_VOLTAGE_SWING_1200:
2564 		default:
2565 			return DP_TRAIN_PRE_EMPHASIS_0;
2566 		}
2567 	}
2568 }
2569 
2570 static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2571 {
2572 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2573 	struct drm_i915_private *dev_priv = dev->dev_private;
2574 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2575 	struct intel_crtc *intel_crtc =
2576 		to_intel_crtc(dport->base.base.crtc);
2577 	unsigned long demph_reg_value, preemph_reg_value,
2578 		uniqtranscale_reg_value;
2579 	uint8_t train_set = intel_dp->train_set[0];
2580 	enum dpio_channel port = vlv_dport_to_channel(dport);
2581 	int pipe = intel_crtc->pipe;
2582 
2583 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2584 	case DP_TRAIN_PRE_EMPHASIS_0:
2585 		preemph_reg_value = 0x0004000;
2586 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2587 		case DP_TRAIN_VOLTAGE_SWING_400:
2588 			demph_reg_value = 0x2B405555;
2589 			uniqtranscale_reg_value = 0x552AB83A;
2590 			break;
2591 		case DP_TRAIN_VOLTAGE_SWING_600:
2592 			demph_reg_value = 0x2B404040;
2593 			uniqtranscale_reg_value = 0x5548B83A;
2594 			break;
2595 		case DP_TRAIN_VOLTAGE_SWING_800:
2596 			demph_reg_value = 0x2B245555;
2597 			uniqtranscale_reg_value = 0x5560B83A;
2598 			break;
2599 		case DP_TRAIN_VOLTAGE_SWING_1200:
2600 			demph_reg_value = 0x2B405555;
2601 			uniqtranscale_reg_value = 0x5598DA3A;
2602 			break;
2603 		default:
2604 			return 0;
2605 		}
2606 		break;
2607 	case DP_TRAIN_PRE_EMPHASIS_3_5:
2608 		preemph_reg_value = 0x0002000;
2609 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2610 		case DP_TRAIN_VOLTAGE_SWING_400:
2611 			demph_reg_value = 0x2B404040;
2612 			uniqtranscale_reg_value = 0x5552B83A;
2613 			break;
2614 		case DP_TRAIN_VOLTAGE_SWING_600:
2615 			demph_reg_value = 0x2B404848;
2616 			uniqtranscale_reg_value = 0x5580B83A;
2617 			break;
2618 		case DP_TRAIN_VOLTAGE_SWING_800:
2619 			demph_reg_value = 0x2B404040;
2620 			uniqtranscale_reg_value = 0x55ADDA3A;
2621 			break;
2622 		default:
2623 			return 0;
2624 		}
2625 		break;
2626 	case DP_TRAIN_PRE_EMPHASIS_6:
2627 		preemph_reg_value = 0x0000000;
2628 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2629 		case DP_TRAIN_VOLTAGE_SWING_400:
2630 			demph_reg_value = 0x2B305555;
2631 			uniqtranscale_reg_value = 0x5570B83A;
2632 			break;
2633 		case DP_TRAIN_VOLTAGE_SWING_600:
2634 			demph_reg_value = 0x2B2B4040;
2635 			uniqtranscale_reg_value = 0x55ADDA3A;
2636 			break;
2637 		default:
2638 			return 0;
2639 		}
2640 		break;
2641 	case DP_TRAIN_PRE_EMPHASIS_9_5:
2642 		preemph_reg_value = 0x0006000;
2643 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2644 		case DP_TRAIN_VOLTAGE_SWING_400:
2645 			demph_reg_value = 0x1B405555;
2646 			uniqtranscale_reg_value = 0x55ADDA3A;
2647 			break;
2648 		default:
2649 			return 0;
2650 		}
2651 		break;
2652 	default:
2653 		return 0;
2654 	}
2655 
2656 	mutex_lock(&dev_priv->dpio_lock);
2657 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
2658 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
2659 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
2660 			 uniqtranscale_reg_value);
2661 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
2662 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
2663 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
2664 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
2665 	mutex_unlock(&dev_priv->dpio_lock);
2666 
2667 	return 0;
2668 }
2669 
2670 static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
2671 {
2672 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2673 	struct drm_i915_private *dev_priv = dev->dev_private;
2674 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2675 	struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
2676 	u32 deemph_reg_value, margin_reg_value, val;
2677 	uint8_t train_set = intel_dp->train_set[0];
2678 	enum dpio_channel ch = vlv_dport_to_channel(dport);
2679 	enum i915_pipe pipe = intel_crtc->pipe;
2680 	int i;
2681 
2682 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2683 	case DP_TRAIN_PRE_EMPHASIS_0:
2684 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2685 		case DP_TRAIN_VOLTAGE_SWING_400:
2686 			deemph_reg_value = 128;
2687 			margin_reg_value = 52;
2688 			break;
2689 		case DP_TRAIN_VOLTAGE_SWING_600:
2690 			deemph_reg_value = 128;
2691 			margin_reg_value = 77;
2692 			break;
2693 		case DP_TRAIN_VOLTAGE_SWING_800:
2694 			deemph_reg_value = 128;
2695 			margin_reg_value = 102;
2696 			break;
2697 		case DP_TRAIN_VOLTAGE_SWING_1200:
2698 			deemph_reg_value = 128;
2699 			margin_reg_value = 154;
2700 			/* FIXME extra to set for 1200 */
2701 			break;
2702 		default:
2703 			return 0;
2704 		}
2705 		break;
2706 	case DP_TRAIN_PRE_EMPHASIS_3_5:
2707 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2708 		case DP_TRAIN_VOLTAGE_SWING_400:
2709 			deemph_reg_value = 85;
2710 			margin_reg_value = 78;
2711 			break;
2712 		case DP_TRAIN_VOLTAGE_SWING_600:
2713 			deemph_reg_value = 85;
2714 			margin_reg_value = 116;
2715 			break;
2716 		case DP_TRAIN_VOLTAGE_SWING_800:
2717 			deemph_reg_value = 85;
2718 			margin_reg_value = 154;
2719 			break;
2720 		default:
2721 			return 0;
2722 		}
2723 		break;
2724 	case DP_TRAIN_PRE_EMPHASIS_6:
2725 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2726 		case DP_TRAIN_VOLTAGE_SWING_400:
2727 			deemph_reg_value = 64;
2728 			margin_reg_value = 104;
2729 			break;
2730 		case DP_TRAIN_VOLTAGE_SWING_600:
2731 			deemph_reg_value = 64;
2732 			margin_reg_value = 154;
2733 			break;
2734 		default:
2735 			return 0;
2736 		}
2737 		break;
2738 	case DP_TRAIN_PRE_EMPHASIS_9_5:
2739 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2740 		case DP_TRAIN_VOLTAGE_SWING_400:
2741 			deemph_reg_value = 43;
2742 			margin_reg_value = 154;
2743 			break;
2744 		default:
2745 			return 0;
2746 		}
2747 		break;
2748 	default:
2749 		return 0;
2750 	}
2751 
2752 	mutex_lock(&dev_priv->dpio_lock);
2753 
2754 	/* Clear calc init */
2755 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
2756 	val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
2757 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
2758 
2759 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
2760 	val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
2761 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
2762 
2763 	/* Program swing deemph */
2764 	for (i = 0; i < 4; i++) {
2765 		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
2766 		val &= ~DPIO_SWING_DEEMPH9P5_MASK;
2767 		val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
2768 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
2769 	}
2770 
2771 	/* Program swing margin */
2772 	for (i = 0; i < 4; i++) {
2773 		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
2774 		val &= ~DPIO_SWING_MARGIN_MASK;
2775 		val |= margin_reg_value << DPIO_SWING_MARGIN_SHIFT;
2776 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
2777 	}
2778 
2779 	/* Disable unique transition scale */
2780 	for (i = 0; i < 4; i++) {
2781 		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
2782 		val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
2783 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
2784 	}
2785 
2786 	if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
2787 			== DP_TRAIN_PRE_EMPHASIS_0) &&
2788 		((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
2789 			== DP_TRAIN_VOLTAGE_SWING_1200)) {
2790 
2791 		/*
2792 		 * The document said it needs to set bit 27 for ch0 and bit 26
2793 		 * for ch1. Might be a typo in the doc.
2794 		 * For now, for this unique transition scale selection, set bit
2795 		 * 27 for ch0 and ch1.
2796 		 */
2797 		for (i = 0; i < 4; i++) {
2798 			val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
2799 			val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
2800 			vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
2801 		}
2802 
2803 		for (i = 0; i < 4; i++) {
2804 			val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
2805 			val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
2806 			val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
2807 			vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
2808 		}
2809 	}
2810 
2811 	/* Start swing calculation */
2812 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
2813 	val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
2814 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
2815 
2816 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
2817 	val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
2818 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
2819 
2820 	/* LRC Bypass */
2821 	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
2822 	val |= DPIO_LRC_BYPASS;
2823 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
2824 
2825 	mutex_unlock(&dev_priv->dpio_lock);
2826 
2827 	return 0;
2828 }
2829 
2830 static void
2831 intel_get_adjust_train(struct intel_dp *intel_dp,
2832 		       const uint8_t link_status[DP_LINK_STATUS_SIZE])
2833 {
2834 	uint8_t v = 0;
2835 	uint8_t p = 0;
2836 	int lane;
2837 	uint8_t voltage_max;
2838 	uint8_t preemph_max;
2839 
2840 	for (lane = 0; lane < intel_dp->lane_count; lane++) {
2841 		uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
2842 		uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
2843 
2844 		if (this_v > v)
2845 			v = this_v;
2846 		if (this_p > p)
2847 			p = this_p;
2848 	}
2849 
2850 	voltage_max = intel_dp_voltage_max(intel_dp);
2851 	if (v >= voltage_max)
2852 		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
2853 
2854 	preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
2855 	if (p >= preemph_max)
2856 		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
2857 
2858 	for (lane = 0; lane < 4; lane++)
2859 		intel_dp->train_set[lane] = v | p;
2860 }
2861 
2862 static uint32_t
2863 intel_gen4_signal_levels(uint8_t train_set)
2864 {
2865 	uint32_t	signal_levels = 0;
2866 
2867 	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2868 	case DP_TRAIN_VOLTAGE_SWING_400:
2869 	default:
2870 		signal_levels |= DP_VOLTAGE_0_4;
2871 		break;
2872 	case DP_TRAIN_VOLTAGE_SWING_600:
2873 		signal_levels |= DP_VOLTAGE_0_6;
2874 		break;
2875 	case DP_TRAIN_VOLTAGE_SWING_800:
2876 		signal_levels |= DP_VOLTAGE_0_8;
2877 		break;
2878 	case DP_TRAIN_VOLTAGE_SWING_1200:
2879 		signal_levels |= DP_VOLTAGE_1_2;
2880 		break;
2881 	}
2882 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2883 	case DP_TRAIN_PRE_EMPHASIS_0:
2884 	default:
2885 		signal_levels |= DP_PRE_EMPHASIS_0;
2886 		break;
2887 	case DP_TRAIN_PRE_EMPHASIS_3_5:
2888 		signal_levels |= DP_PRE_EMPHASIS_3_5;
2889 		break;
2890 	case DP_TRAIN_PRE_EMPHASIS_6:
2891 		signal_levels |= DP_PRE_EMPHASIS_6;
2892 		break;
2893 	case DP_TRAIN_PRE_EMPHASIS_9_5:
2894 		signal_levels |= DP_PRE_EMPHASIS_9_5;
2895 		break;
2896 	}
2897 	return signal_levels;
2898 }
2899 
2900 /* Gen6's DP voltage swing and pre-emphasis control */
2901 static uint32_t
2902 intel_gen6_edp_signal_levels(uint8_t train_set)
2903 {
2904 	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2905 					 DP_TRAIN_PRE_EMPHASIS_MASK);
2906 	switch (signal_levels) {
2907 	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2908 	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2909 		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
2910 	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2911 		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
2912 	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2913 	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2914 		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
2915 	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2916 	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2917 		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
2918 	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2919 	case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
2920 		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
2921 	default:
2922 		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2923 			      "0x%x\n", signal_levels);
2924 		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
2925 	}
2926 }
2927 
2928 /* Gen7's DP voltage swing and pre-emphasis control */
2929 static uint32_t
2930 intel_gen7_edp_signal_levels(uint8_t train_set)
2931 {
2932 	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2933 					 DP_TRAIN_PRE_EMPHASIS_MASK);
2934 	switch (signal_levels) {
2935 	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2936 		return EDP_LINK_TRAIN_400MV_0DB_IVB;
2937 	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2938 		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
2939 	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2940 		return EDP_LINK_TRAIN_400MV_6DB_IVB;
2941 
2942 	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2943 		return EDP_LINK_TRAIN_600MV_0DB_IVB;
2944 	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2945 		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
2946 
2947 	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2948 		return EDP_LINK_TRAIN_800MV_0DB_IVB;
2949 	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2950 		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
2951 
2952 	default:
2953 		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2954 			      "0x%x\n", signal_levels);
2955 		return EDP_LINK_TRAIN_500MV_0DB_IVB;
2956 	}
2957 }
2958 
2959 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
2960 static uint32_t
2961 intel_hsw_signal_levels(uint8_t train_set)
2962 {
2963 	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2964 					 DP_TRAIN_PRE_EMPHASIS_MASK);
2965 	switch (signal_levels) {
2966 	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2967 		return DDI_BUF_EMP_400MV_0DB_HSW;
2968 	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2969 		return DDI_BUF_EMP_400MV_3_5DB_HSW;
2970 	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2971 		return DDI_BUF_EMP_400MV_6DB_HSW;
2972 	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
2973 		return DDI_BUF_EMP_400MV_9_5DB_HSW;
2974 
2975 	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2976 		return DDI_BUF_EMP_600MV_0DB_HSW;
2977 	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2978 		return DDI_BUF_EMP_600MV_3_5DB_HSW;
2979 	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2980 		return DDI_BUF_EMP_600MV_6DB_HSW;
2981 
2982 	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2983 		return DDI_BUF_EMP_800MV_0DB_HSW;
2984 	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2985 		return DDI_BUF_EMP_800MV_3_5DB_HSW;
2986 	default:
2987 		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2988 			      "0x%x\n", signal_levels);
2989 		return DDI_BUF_EMP_400MV_0DB_HSW;
2990 	}
2991 }
2992 
2993 /* Properly updates "DP" with the correct signal levels. */
2994 static void
2995 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
2996 {
2997 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2998 	enum port port = intel_dig_port->port;
2999 	struct drm_device *dev = intel_dig_port->base.base.dev;
3000 	uint32_t signal_levels, mask;
3001 	uint8_t train_set = intel_dp->train_set[0];
3002 
3003 	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3004 		signal_levels = intel_hsw_signal_levels(train_set);
3005 		mask = DDI_BUF_EMP_MASK;
3006 	} else if (IS_CHERRYVIEW(dev)) {
3007 		signal_levels = intel_chv_signal_levels(intel_dp);
3008 		mask = 0;
3009 	} else if (IS_VALLEYVIEW(dev)) {
3010 		signal_levels = intel_vlv_signal_levels(intel_dp);
3011 		mask = 0;
3012 	} else if (IS_GEN7(dev) && port == PORT_A) {
3013 		signal_levels = intel_gen7_edp_signal_levels(train_set);
3014 		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3015 	} else if (IS_GEN6(dev) && port == PORT_A) {
3016 		signal_levels = intel_gen6_edp_signal_levels(train_set);
3017 		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3018 	} else {
3019 		signal_levels = intel_gen4_signal_levels(train_set);
3020 		mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3021 	}
3022 
3023 	DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3024 
3025 	*DP = (*DP & ~mask) | signal_levels;
3026 }
3027 
3028 static bool
3029 intel_dp_set_link_train(struct intel_dp *intel_dp,
3030 			uint32_t *DP,
3031 			uint8_t dp_train_pat)
3032 {
3033 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3034 	struct drm_device *dev = intel_dig_port->base.base.dev;
3035 	struct drm_i915_private *dev_priv = dev->dev_private;
3036 	enum port port = intel_dig_port->port;
3037 	uint8_t buf[sizeof(intel_dp->train_set) + 1];
3038 	int ret, len;
3039 
3040 	if (HAS_DDI(dev)) {
3041 		uint32_t temp = I915_READ(DP_TP_CTL(port));
3042 
3043 		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
3044 			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
3045 		else
3046 			temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
3047 
3048 		temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3049 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3050 		case DP_TRAINING_PATTERN_DISABLE:
3051 			temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
3052 
3053 			break;
3054 		case DP_TRAINING_PATTERN_1:
3055 			temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
3056 			break;
3057 		case DP_TRAINING_PATTERN_2:
3058 			temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
3059 			break;
3060 		case DP_TRAINING_PATTERN_3:
3061 			temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
3062 			break;
3063 		}
3064 		I915_WRITE(DP_TP_CTL(port), temp);
3065 
3066 	} else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
3067 		*DP &= ~DP_LINK_TRAIN_MASK_CPT;
3068 
3069 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3070 		case DP_TRAINING_PATTERN_DISABLE:
3071 			*DP |= DP_LINK_TRAIN_OFF_CPT;
3072 			break;
3073 		case DP_TRAINING_PATTERN_1:
3074 			*DP |= DP_LINK_TRAIN_PAT_1_CPT;
3075 			break;
3076 		case DP_TRAINING_PATTERN_2:
3077 			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
3078 			break;
3079 		case DP_TRAINING_PATTERN_3:
3080 			DRM_ERROR("DP training pattern 3 not supported\n");
3081 			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
3082 			break;
3083 		}
3084 
3085 	} else {
3086 		*DP &= ~DP_LINK_TRAIN_MASK;
3087 
3088 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3089 		case DP_TRAINING_PATTERN_DISABLE:
3090 			*DP |= DP_LINK_TRAIN_OFF;
3091 			break;
3092 		case DP_TRAINING_PATTERN_1:
3093 			*DP |= DP_LINK_TRAIN_PAT_1;
3094 			break;
3095 		case DP_TRAINING_PATTERN_2:
3096 			*DP |= DP_LINK_TRAIN_PAT_2;
3097 			break;
3098 		case DP_TRAINING_PATTERN_3:
3099 			DRM_ERROR("DP training pattern 3 not supported\n");
3100 			*DP |= DP_LINK_TRAIN_PAT_2;
3101 			break;
3102 		}
3103 	}
3104 
3105 	I915_WRITE(intel_dp->output_reg, *DP);
3106 	POSTING_READ(intel_dp->output_reg);
3107 
3108 	buf[0] = dp_train_pat;
3109 	if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3110 	    DP_TRAINING_PATTERN_DISABLE) {
3111 		/* don't write DP_TRAINING_LANEx_SET on disable */
3112 		len = 1;
3113 	} else {
3114 		/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3115 		memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3116 		len = intel_dp->lane_count + 1;
3117 	}
3118 
3119 	ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_PATTERN_SET,
3120 					buf, len);
3121 
3122 	return ret == len;
3123 }
3124 
3125 static bool
3126 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3127 			uint8_t dp_train_pat)
3128 {
3129 	memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3130 	intel_dp_set_signal_levels(intel_dp, DP);
3131 	return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3132 }
3133 
3134 static bool
3135 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3136 			   const uint8_t link_status[DP_LINK_STATUS_SIZE])
3137 {
3138 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3139 	struct drm_device *dev = intel_dig_port->base.base.dev;
3140 	struct drm_i915_private *dev_priv = dev->dev_private;
3141 	int ret;
3142 
3143 	intel_get_adjust_train(intel_dp, link_status);
3144 	intel_dp_set_signal_levels(intel_dp, DP);
3145 
3146 	I915_WRITE(intel_dp->output_reg, *DP);
3147 	POSTING_READ(intel_dp->output_reg);
3148 
3149 	ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_LANE0_SET,
3150 					intel_dp->train_set,
3151 					intel_dp->lane_count);
3152 
3153 	return ret == intel_dp->lane_count;
3154 }
3155 
3156 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3157 {
3158 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3159 	struct drm_device *dev = intel_dig_port->base.base.dev;
3160 	struct drm_i915_private *dev_priv = dev->dev_private;
3161 	enum port port = intel_dig_port->port;
3162 	uint32_t val;
3163 
3164 	if (!HAS_DDI(dev))
3165 		return;
3166 
3167 	val = I915_READ(DP_TP_CTL(port));
3168 	val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3169 	val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3170 	I915_WRITE(DP_TP_CTL(port), val);
3171 
3172 	/*
3173 	 * On PORT_A we can have only eDP in SST mode. There the only reason
3174 	 * we need to set idle transmission mode is to work around a HW issue
3175 	 * where we enable the pipe while not in idle link-training mode.
3176 	 * In this case there is requirement to wait for a minimum number of
3177 	 * idle patterns to be sent.
3178 	 */
3179 	if (port == PORT_A)
3180 		return;
3181 
3182 	if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3183 		     1))
3184 		DRM_ERROR("Timed out waiting for DP idle patterns\n");
3185 }
3186 
3187 /* Enable corresponding port and start training pattern 1 */
3188 void
3189 intel_dp_start_link_train(struct intel_dp *intel_dp)
3190 {
3191 	struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3192 	struct drm_device *dev = encoder->dev;
3193 	int i;
3194 	uint8_t voltage;
3195 	int voltage_tries, loop_tries;
3196 	uint32_t DP = intel_dp->DP;
3197 	uint8_t link_config[2];
3198 
3199 	if (HAS_DDI(dev))
3200 		intel_ddi_prepare_link_retrain(encoder);
3201 
3202 	/* Write the link configuration data */
3203 	link_config[0] = intel_dp->link_bw;
3204 	link_config[1] = intel_dp->lane_count;
3205 	if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3206 		link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3207 	intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, link_config, 2);
3208 
3209 	link_config[0] = 0;
3210 	link_config[1] = DP_SET_ANSI_8B10B;
3211 	intel_dp_aux_native_write(intel_dp, DP_DOWNSPREAD_CTRL, link_config, 2);
3212 
3213 	DP |= DP_PORT_EN;
3214 
3215 	/* clock recovery */
3216 	if (!intel_dp_reset_link_train(intel_dp, &DP,
3217 				       DP_TRAINING_PATTERN_1 |
3218 				       DP_LINK_SCRAMBLING_DISABLE)) {
3219 		DRM_ERROR("failed to enable link training\n");
3220 		return;
3221 	}
3222 
3223 	voltage = 0xff;
3224 	voltage_tries = 0;
3225 	loop_tries = 0;
3226 	for (;;) {
3227 		uint8_t link_status[DP_LINK_STATUS_SIZE];
3228 
3229 		drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3230 		if (!intel_dp_get_link_status(intel_dp, link_status)) {
3231 			DRM_ERROR("failed to get link status\n");
3232 			break;
3233 		}
3234 
3235 		if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3236 			DRM_DEBUG_KMS("clock recovery OK\n");
3237 			break;
3238 		}
3239 
3240 		/* Check to see if we've tried the max voltage */
3241 		for (i = 0; i < intel_dp->lane_count; i++)
3242 			if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3243 				break;
3244 		if (i == intel_dp->lane_count) {
3245 			++loop_tries;
3246 			if (loop_tries == 5) {
3247 				DRM_ERROR("too many full retries, give up\n");
3248 				break;
3249 			}
3250 			intel_dp_reset_link_train(intel_dp, &DP,
3251 						  DP_TRAINING_PATTERN_1 |
3252 						  DP_LINK_SCRAMBLING_DISABLE);
3253 			voltage_tries = 0;
3254 			continue;
3255 		}
3256 
3257 		/* Check to see if we've tried the same voltage 5 times */
3258 		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3259 			++voltage_tries;
3260 			if (voltage_tries == 5) {
3261 				DRM_ERROR("too many voltage retries, give up\n");
3262 				break;
3263 			}
3264 		} else
3265 			voltage_tries = 0;
3266 		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3267 
3268 		/* Update training set as requested by target */
3269 		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3270 			DRM_ERROR("failed to update link training\n");
3271 			break;
3272 		}
3273 	}
3274 
3275 	intel_dp->DP = DP;
3276 }
3277 
3278 void
3279 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3280 {
3281 	bool channel_eq = false;
3282 	int tries, cr_tries;
3283 	uint32_t DP = intel_dp->DP;
3284 	uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3285 
3286 	/* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3287 	if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3288 		training_pattern = DP_TRAINING_PATTERN_3;
3289 
3290 	/* channel equalization */
3291 	if (!intel_dp_set_link_train(intel_dp, &DP,
3292 				     training_pattern |
3293 				     DP_LINK_SCRAMBLING_DISABLE)) {
3294 		DRM_ERROR("failed to start channel equalization\n");
3295 		return;
3296 	}
3297 
3298 	tries = 0;
3299 	cr_tries = 0;
3300 	channel_eq = false;
3301 	for (;;) {
3302 		uint8_t link_status[DP_LINK_STATUS_SIZE];
3303 
3304 		if (cr_tries > 5) {
3305 			DRM_ERROR("failed to train DP, aborting\n");
3306 			break;
3307 		}
3308 
3309 		drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3310 		if (!intel_dp_get_link_status(intel_dp, link_status)) {
3311 			DRM_ERROR("failed to get link status\n");
3312 			break;
3313 		}
3314 
3315 		/* Make sure clock is still ok */
3316 		if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3317 			intel_dp_start_link_train(intel_dp);
3318 			intel_dp_set_link_train(intel_dp, &DP,
3319 						training_pattern |
3320 						DP_LINK_SCRAMBLING_DISABLE);
3321 			cr_tries++;
3322 			continue;
3323 		}
3324 
3325 		if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3326 			channel_eq = true;
3327 			break;
3328 		}
3329 
3330 		/* Try 5 times, then try clock recovery if that fails */
3331 		if (tries > 5) {
3332 			intel_dp_link_down(intel_dp);
3333 			intel_dp_start_link_train(intel_dp);
3334 			intel_dp_set_link_train(intel_dp, &DP,
3335 						training_pattern |
3336 						DP_LINK_SCRAMBLING_DISABLE);
3337 			tries = 0;
3338 			cr_tries++;
3339 			continue;
3340 		}
3341 
3342 		/* Update training set as requested by target */
3343 		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3344 			DRM_ERROR("failed to update link training\n");
3345 			break;
3346 		}
3347 		++tries;
3348 	}
3349 
3350 	intel_dp_set_idle_link_train(intel_dp);
3351 
3352 	intel_dp->DP = DP;
3353 
3354 	if (channel_eq)
3355 		DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3356 
3357 }
3358 
3359 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3360 {
3361 	intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3362 				DP_TRAINING_PATTERN_DISABLE);
3363 }
3364 
3365 static void
3366 intel_dp_link_down(struct intel_dp *intel_dp)
3367 {
3368 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3369 	enum port port = intel_dig_port->port;
3370 	struct drm_device *dev = intel_dig_port->base.base.dev;
3371 	struct drm_i915_private *dev_priv = dev->dev_private;
3372 	struct intel_crtc *intel_crtc =
3373 		to_intel_crtc(intel_dig_port->base.base.crtc);
3374 	uint32_t DP = intel_dp->DP;
3375 
3376 	if (WARN_ON(HAS_DDI(dev)))
3377 		return;
3378 
3379 	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3380 		return;
3381 
3382 	DRM_DEBUG_KMS("\n");
3383 
3384 	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
3385 		DP &= ~DP_LINK_TRAIN_MASK_CPT;
3386 		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
3387 	} else {
3388 		DP &= ~DP_LINK_TRAIN_MASK;
3389 		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
3390 	}
3391 	POSTING_READ(intel_dp->output_reg);
3392 
3393 	if (HAS_PCH_IBX(dev) &&
3394 	    I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
3395 		struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
3396 
3397 		/* Hardware workaround: leaving our transcoder select
3398 		 * set to transcoder B while it's off will prevent the
3399 		 * corresponding HDMI output on transcoder A.
3400 		 *
3401 		 * Combine this with another hardware workaround:
3402 		 * transcoder select bit can only be cleared while the
3403 		 * port is enabled.
3404 		 */
3405 		DP &= ~DP_PIPEB_SELECT;
3406 		I915_WRITE(intel_dp->output_reg, DP);
3407 
3408 		/* Changes to enable or select take place the vblank
3409 		 * after being written.
3410 		 */
3411 		if (WARN_ON(crtc == NULL)) {
3412 			/* We should never try to disable a port without a crtc
3413 			 * attached. For paranoia keep the code around for a
3414 			 * bit. */
3415 			POSTING_READ(intel_dp->output_reg);
3416 			msleep(50);
3417 		} else
3418 			intel_wait_for_vblank(dev, intel_crtc->pipe);
3419 	}
3420 
3421 	DP &= ~DP_AUDIO_OUTPUT_ENABLE;
3422 	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3423 	POSTING_READ(intel_dp->output_reg);
3424 	msleep(intel_dp->panel_power_down_delay);
3425 }
3426 
3427 static bool
3428 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3429 {
3430 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3431 	struct drm_device *dev = dig_port->base.base.dev;
3432 	struct drm_i915_private *dev_priv = dev->dev_private;
3433 
3434 	char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
3435 
3436 	if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
3437 					   sizeof(intel_dp->dpcd)) == 0)
3438 		return false; /* aux transfer failed */
3439 
3440 	ksnprintf(dpcd_hex_dump,
3441 		  sizeof(dpcd_hex_dump),
3442 		  "%02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n",
3443 		  intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2],
3444 		  intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5],
3445 		  intel_dp->dpcd[6], intel_dp->dpcd[7]);
3446 	DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
3447 
3448 	if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3449 		return false; /* DPCD not present */
3450 
3451 	/* Check if the panel supports PSR */
3452 	memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3453 	if (is_edp(intel_dp)) {
3454 		intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
3455 					       intel_dp->psr_dpcd,
3456 					       sizeof(intel_dp->psr_dpcd));
3457 		if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3458 			dev_priv->psr.sink_support = true;
3459 			DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3460 		}
3461 	}
3462 
3463 	/* Training Pattern 3 support */
3464 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3465 	    intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) {
3466 		intel_dp->use_tps3 = true;
3467 		DRM_DEBUG_KMS("Displayport TPS3 supported");
3468 	} else
3469 		intel_dp->use_tps3 = false;
3470 
3471 	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3472 	      DP_DWN_STRM_PORT_PRESENT))
3473 		return true; /* native DP sink */
3474 
3475 	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3476 		return true; /* no per-port downstream info */
3477 
3478 	if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0,
3479 					   intel_dp->downstream_ports,
3480 					   DP_MAX_DOWNSTREAM_PORTS) == 0)
3481 		return false; /* downstream port status fetch failed */
3482 
3483 	return true;
3484 }
3485 
3486 static void
3487 intel_dp_probe_oui(struct intel_dp *intel_dp)
3488 {
3489 	u8 buf[3];
3490 
3491 	if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3492 		return;
3493 
3494 	intel_edp_panel_vdd_on(intel_dp);
3495 
3496 	if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
3497 		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3498 			      buf[0], buf[1], buf[2]);
3499 
3500 	if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
3501 		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3502 			      buf[0], buf[1], buf[2]);
3503 
3504 	edp_panel_vdd_off(intel_dp, false);
3505 }
3506 
3507 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3508 {
3509 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3510 	struct drm_device *dev = intel_dig_port->base.base.dev;
3511 	struct intel_crtc *intel_crtc =
3512 		to_intel_crtc(intel_dig_port->base.base.crtc);
3513 	u8 buf[1];
3514 
3515 	if (!intel_dp_aux_native_read(intel_dp, DP_TEST_SINK_MISC, buf, 1))
3516 		return -EAGAIN;
3517 
3518 	if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
3519 		return -ENOTTY;
3520 
3521 	if (!intel_dp_aux_native_write_1(intel_dp, DP_TEST_SINK,
3522 					 DP_TEST_SINK_START))
3523 		return -EAGAIN;
3524 
3525 	/* Wait 2 vblanks to be sure we will have the correct CRC value */
3526 	intel_wait_for_vblank(dev, intel_crtc->pipe);
3527 	intel_wait_for_vblank(dev, intel_crtc->pipe);
3528 
3529 	if (!intel_dp_aux_native_read(intel_dp, DP_TEST_CRC_R_CR, crc, 6))
3530 		return -EAGAIN;
3531 
3532 	intel_dp_aux_native_write_1(intel_dp, DP_TEST_SINK, 0);
3533 	return 0;
3534 }
3535 
3536 static bool
3537 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3538 {
3539 	int ret;
3540 
3541 	ret = intel_dp_aux_native_read_retry(intel_dp,
3542 					     DP_DEVICE_SERVICE_IRQ_VECTOR,
3543 					     sink_irq_vector, 1);
3544 	if (!ret)
3545 		return false;
3546 
3547 	return true;
3548 }
3549 
3550 static void
3551 intel_dp_handle_test_request(struct intel_dp *intel_dp)
3552 {
3553 	/* NAK by default */
3554 	intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK);
3555 }
3556 
3557 /*
3558  * According to DP spec
3559  * 5.1.2:
3560  *  1. Read DPCD
3561  *  2. Configure link according to Receiver Capabilities
3562  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
3563  *  4. Check link status on receipt of hot-plug interrupt
3564  */
3565 
3566 void
3567 intel_dp_check_link_status(struct intel_dp *intel_dp)
3568 {
3569 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3570 	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
3571 	u8 sink_irq_vector;
3572 	u8 link_status[DP_LINK_STATUS_SIZE];
3573 
3574 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
3575 
3576 	if (!intel_encoder->connectors_active)
3577 		return;
3578 
3579 	if (WARN_ON(!intel_encoder->base.crtc))
3580 		return;
3581 
3582 	if (!to_intel_crtc(intel_encoder->base.crtc)->active)
3583 		return;
3584 
3585 	/* Try to read receiver status if the link appears to be up */
3586 	if (!intel_dp_get_link_status(intel_dp, link_status)) {
3587 		return;
3588 	}
3589 
3590 	/* Now read the DPCD to see if it's actually running */
3591 	if (!intel_dp_get_dpcd(intel_dp)) {
3592 		return;
3593 	}
3594 
3595 	/* Try to read the source of the interrupt */
3596 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
3597 	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
3598 		/* Clear interrupt source */
3599 		intel_dp_aux_native_write_1(intel_dp,
3600 					    DP_DEVICE_SERVICE_IRQ_VECTOR,
3601 					    sink_irq_vector);
3602 
3603 		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
3604 			intel_dp_handle_test_request(intel_dp);
3605 		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
3606 			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
3607 	}
3608 
3609 	if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3610 		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
3611 			      intel_encoder->base.name);
3612 		intel_dp_start_link_train(intel_dp);
3613 		intel_dp_complete_link_train(intel_dp);
3614 		intel_dp_stop_link_train(intel_dp);
3615 	}
3616 }
3617 
3618 /* XXX this is probably wrong for multiple downstream ports */
3619 static enum drm_connector_status
3620 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
3621 {
3622 	uint8_t *dpcd = intel_dp->dpcd;
3623 	uint8_t type;
3624 
3625 	if (!intel_dp_get_dpcd(intel_dp))
3626 		return connector_status_disconnected;
3627 
3628 	/* if there's no downstream port, we're done */
3629 	if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
3630 		return connector_status_connected;
3631 
3632 	/* If we're HPD-aware, SINK_COUNT changes dynamically */
3633 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
3634 	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
3635 		uint8_t reg;
3636 		if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
3637 						    &reg, 1))
3638 			return connector_status_unknown;
3639 		return DP_GET_SINK_COUNT(reg) ? connector_status_connected
3640 					      : connector_status_disconnected;
3641 	}
3642 
3643 	/* If no HPD, poke DDC gently */
3644 	if (drm_probe_ddc(intel_dp->adapter))
3645 		return connector_status_connected;
3646 
3647 	/* Well we tried, say unknown for unreliable port types */
3648 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
3649 		type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
3650 		if (type == DP_DS_PORT_TYPE_VGA ||
3651 		    type == DP_DS_PORT_TYPE_NON_EDID)
3652 			return connector_status_unknown;
3653 	} else {
3654 		type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3655 			DP_DWN_STRM_PORT_TYPE_MASK;
3656 		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
3657 		    type == DP_DWN_STRM_PORT_TYPE_OTHER)
3658 			return connector_status_unknown;
3659 	}
3660 
3661 	/* Anything else is out of spec, warn and ignore */
3662 	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
3663 	return connector_status_disconnected;
3664 }
3665 
3666 static enum drm_connector_status
3667 ironlake_dp_detect(struct intel_dp *intel_dp)
3668 {
3669 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3670 	struct drm_i915_private *dev_priv = dev->dev_private;
3671 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3672 	enum drm_connector_status status;
3673 
3674 	/* Can't disconnect eDP, but you can close the lid... */
3675 	if (is_edp(intel_dp)) {
3676 		status = intel_panel_detect(dev);
3677 		if (status == connector_status_unknown)
3678 			status = connector_status_connected;
3679 		return status;
3680 	}
3681 
3682 	if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
3683 		return connector_status_disconnected;
3684 
3685 	return intel_dp_detect_dpcd(intel_dp);
3686 }
3687 
3688 static int g4x_digital_port_connected(struct drm_device *dev,
3689 				       struct intel_digital_port *intel_dig_port)
3690 {
3691 	struct drm_i915_private *dev_priv = dev->dev_private;
3692 	uint32_t bit;
3693 
3694 	if (IS_VALLEYVIEW(dev)) {
3695 		switch (intel_dig_port->port) {
3696 		case PORT_B:
3697 			bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
3698 			break;
3699 		case PORT_C:
3700 			bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
3701 			break;
3702 		case PORT_D:
3703 			bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
3704 			break;
3705 		default:
3706 			return -EINVAL;
3707 		}
3708 	} else {
3709 		switch (intel_dig_port->port) {
3710 		case PORT_B:
3711 			bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
3712 			break;
3713 		case PORT_C:
3714 			bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
3715 			break;
3716 		case PORT_D:
3717 			bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
3718 			break;
3719 		default:
3720 			return -EINVAL;
3721 		}
3722 	}
3723 
3724 	if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
3725 		return 0;
3726 	return 1;
3727 }
3728 
3729 static enum drm_connector_status
3730 g4x_dp_detect(struct intel_dp *intel_dp)
3731 {
3732 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3733 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3734 	int ret;
3735 
3736 	/* Can't disconnect eDP, but you can close the lid... */
3737 	if (is_edp(intel_dp)) {
3738 		enum drm_connector_status status;
3739 
3740 		status = intel_panel_detect(dev);
3741 		if (status == connector_status_unknown)
3742 			status = connector_status_connected;
3743 		return status;
3744 	}
3745 
3746 	ret = g4x_digital_port_connected(dev, intel_dig_port);
3747 	if (ret == -EINVAL)
3748 		return connector_status_unknown;
3749 	else if (ret == 0)
3750 		return connector_status_disconnected;
3751 
3752 	return intel_dp_detect_dpcd(intel_dp);
3753 }
3754 
3755 static struct edid *
3756 intel_dp_get_edid(struct drm_connector *connector, struct device *adapter)
3757 {
3758 	struct intel_connector *intel_connector = to_intel_connector(connector);
3759 
3760 	/* use cached edid if we have one */
3761 	if (intel_connector->edid) {
3762 		/* invalid edid */
3763 		if (IS_ERR(intel_connector->edid))
3764 			return NULL;
3765 
3766 		return drm_edid_duplicate(intel_connector->edid);
3767 	}
3768 
3769 	return drm_get_edid(connector, adapter);
3770 }
3771 
3772 static int
3773 intel_dp_get_edid_modes(struct drm_connector *connector, struct device *adapter)
3774 {
3775 	struct intel_connector *intel_connector = to_intel_connector(connector);
3776 
3777 	/* use cached edid if we have one */
3778 	if (intel_connector->edid) {
3779 		/* invalid edid */
3780 		if (IS_ERR(intel_connector->edid))
3781 			return 0;
3782 
3783 		return intel_connector_update_modes(connector,
3784 						    intel_connector->edid);
3785 	}
3786 
3787 	return intel_ddc_get_modes(connector, adapter);
3788 }
3789 
3790 static enum drm_connector_status
3791 intel_dp_detect(struct drm_connector *connector, bool force)
3792 {
3793 	struct intel_dp *intel_dp = intel_attached_dp(connector);
3794 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3795 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
3796 	struct drm_device *dev = connector->dev;
3797 	struct drm_i915_private *dev_priv = dev->dev_private;
3798 	enum drm_connector_status status;
3799 	enum intel_display_power_domain power_domain;
3800 	struct edid *edid = NULL;
3801 
3802 	power_domain = intel_display_port_power_domain(intel_encoder);
3803 	intel_display_power_get(dev_priv, power_domain);
3804 
3805 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
3806 		      connector->base.id, connector->name);
3807 
3808 	intel_dp->has_audio = false;
3809 
3810 	if (HAS_PCH_SPLIT(dev))
3811 		status = ironlake_dp_detect(intel_dp);
3812 	else
3813 		status = g4x_dp_detect(intel_dp);
3814 
3815 	if (status != connector_status_connected)
3816 		goto out;
3817 
3818 	intel_dp_probe_oui(intel_dp);
3819 
3820 	if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
3821 		intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
3822 	} else {
3823 		edid = intel_dp_get_edid(connector, intel_dp->adapter);
3824 		if (edid) {
3825 			intel_dp->has_audio = drm_detect_monitor_audio(edid);
3826 			kfree(edid);
3827 		}
3828 	}
3829 
3830 	if (intel_encoder->type != INTEL_OUTPUT_EDP)
3831 		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
3832 	status = connector_status_connected;
3833 
3834 out:
3835 	intel_display_power_put(dev_priv, power_domain);
3836 	return status;
3837 }
3838 
3839 static int intel_dp_get_modes(struct drm_connector *connector)
3840 {
3841 	struct intel_dp *intel_dp = intel_attached_dp(connector);
3842 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3843 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
3844 	struct intel_connector *intel_connector = to_intel_connector(connector);
3845 	struct drm_device *dev = connector->dev;
3846 	struct drm_i915_private *dev_priv = dev->dev_private;
3847 	enum intel_display_power_domain power_domain;
3848 	int ret;
3849 
3850 	/* We should parse the EDID data and find out if it has an audio sink
3851 	 */
3852 
3853 	power_domain = intel_display_port_power_domain(intel_encoder);
3854 	intel_display_power_get(dev_priv, power_domain);
3855 
3856 	ret = intel_dp_get_edid_modes(connector, intel_dp->adapter);
3857 	intel_display_power_put(dev_priv, power_domain);
3858 	if (ret)
3859 		return ret;
3860 
3861 	/* if eDP has no EDID, fall back to fixed mode */
3862 	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
3863 		struct drm_display_mode *mode;
3864 		mode = drm_mode_duplicate(dev,
3865 					  intel_connector->panel.fixed_mode);
3866 		if (mode) {
3867 			drm_mode_probed_add(connector, mode);
3868 			return 1;
3869 		}
3870 	}
3871 	return 0;
3872 }
3873 
3874 static bool
3875 intel_dp_detect_audio(struct drm_connector *connector)
3876 {
3877 	struct intel_dp *intel_dp = intel_attached_dp(connector);
3878 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3879 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
3880 	struct drm_device *dev = connector->dev;
3881 	struct drm_i915_private *dev_priv = dev->dev_private;
3882 	enum intel_display_power_domain power_domain;
3883 	struct edid *edid;
3884 	bool has_audio = false;
3885 
3886 	power_domain = intel_display_port_power_domain(intel_encoder);
3887 	intel_display_power_get(dev_priv, power_domain);
3888 
3889 	edid = intel_dp_get_edid(connector, intel_dp->adapter);
3890 	if (edid) {
3891 		has_audio = drm_detect_monitor_audio(edid);
3892 		kfree(edid);
3893 	}
3894 
3895 	intel_display_power_put(dev_priv, power_domain);
3896 
3897 	return has_audio;
3898 }
3899 
3900 static int
3901 intel_dp_set_property(struct drm_connector *connector,
3902 		      struct drm_property *property,
3903 		      uint64_t val)
3904 {
3905 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
3906 	struct intel_connector *intel_connector = to_intel_connector(connector);
3907 	struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
3908 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3909 	int ret;
3910 
3911 	ret = drm_object_property_set_value(&connector->base, property, val);
3912 	if (ret)
3913 		return ret;
3914 
3915 	if (property == dev_priv->force_audio_property) {
3916 		int i = val;
3917 		bool has_audio;
3918 
3919 		if (i == intel_dp->force_audio)
3920 			return 0;
3921 
3922 		intel_dp->force_audio = i;
3923 
3924 		if (i == HDMI_AUDIO_AUTO)
3925 			has_audio = intel_dp_detect_audio(connector);
3926 		else
3927 			has_audio = (i == HDMI_AUDIO_ON);
3928 
3929 		if (has_audio == intel_dp->has_audio)
3930 			return 0;
3931 
3932 		intel_dp->has_audio = has_audio;
3933 		goto done;
3934 	}
3935 
3936 	if (property == dev_priv->broadcast_rgb_property) {
3937 		bool old_auto = intel_dp->color_range_auto;
3938 		uint32_t old_range = intel_dp->color_range;
3939 
3940 		switch (val) {
3941 		case INTEL_BROADCAST_RGB_AUTO:
3942 			intel_dp->color_range_auto = true;
3943 			break;
3944 		case INTEL_BROADCAST_RGB_FULL:
3945 			intel_dp->color_range_auto = false;
3946 			intel_dp->color_range = 0;
3947 			break;
3948 		case INTEL_BROADCAST_RGB_LIMITED:
3949 			intel_dp->color_range_auto = false;
3950 			intel_dp->color_range = DP_COLOR_RANGE_16_235;
3951 			break;
3952 		default:
3953 			return -EINVAL;
3954 		}
3955 
3956 		if (old_auto == intel_dp->color_range_auto &&
3957 		    old_range == intel_dp->color_range)
3958 			return 0;
3959 
3960 		goto done;
3961 	}
3962 
3963 	if (is_edp(intel_dp) &&
3964 	    property == connector->dev->mode_config.scaling_mode_property) {
3965 		if (val == DRM_MODE_SCALE_NONE) {
3966 			DRM_DEBUG_KMS("no scaling not supported\n");
3967 			return -EINVAL;
3968 		}
3969 
3970 		if (intel_connector->panel.fitting_mode == val) {
3971 			/* the eDP scaling property is not changed */
3972 			return 0;
3973 		}
3974 		intel_connector->panel.fitting_mode = val;
3975 
3976 		goto done;
3977 	}
3978 
3979 	return -EINVAL;
3980 
3981 done:
3982 	if (intel_encoder->base.crtc)
3983 		intel_crtc_restore_mode(intel_encoder->base.crtc);
3984 
3985 	return 0;
3986 }
3987 
3988 static void
3989 intel_dp_connector_destroy(struct drm_connector *connector)
3990 {
3991 	struct intel_connector *intel_connector = to_intel_connector(connector);
3992 
3993 	if (!IS_ERR_OR_NULL(intel_connector->edid))
3994 		kfree(intel_connector->edid);
3995 
3996 	/* Can't call is_edp() since the encoder may have been destroyed
3997 	 * already. */
3998 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3999 		intel_panel_fini(&intel_connector->panel);
4000 
4001 	drm_connector_cleanup(connector);
4002 	kfree(connector);
4003 }
4004 
4005 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4006 {
4007 	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4008 	struct intel_dp *intel_dp = &intel_dig_port->dp;
4009 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4010 
4011 	if (intel_dp->dp_iic_bus != NULL) {
4012 		if (intel_dp->adapter != NULL) {
4013 			device_delete_child(intel_dp->dp_iic_bus,
4014 			intel_dp->adapter);
4015 		}
4016 		device_delete_child(dev->dev, intel_dp->dp_iic_bus);
4017 	}
4018 
4019 	drm_encoder_cleanup(encoder);
4020 	if (is_edp(intel_dp)) {
4021 		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4022 		drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4023 		edp_panel_vdd_off_sync(intel_dp);
4024 		drm_modeset_unlock(&dev->mode_config.connection_mutex);
4025 #if 0
4026 		if (intel_dp->edp_notifier.notifier_call) {
4027 			unregister_reboot_notifier(&intel_dp->edp_notifier);
4028 			intel_dp->edp_notifier.notifier_call = NULL;
4029 		}
4030 #endif
4031 	}
4032 	kfree(intel_dig_port);
4033 }
4034 
4035 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4036 {
4037 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4038 
4039 	if (!is_edp(intel_dp))
4040 		return;
4041 
4042 	edp_panel_vdd_off_sync(intel_dp);
4043 }
4044 
4045 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4046 {
4047 	intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder));
4048 }
4049 
4050 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4051 	.dpms = intel_connector_dpms,
4052 	.detect = intel_dp_detect,
4053 	.fill_modes = drm_helper_probe_single_connector_modes,
4054 	.set_property = intel_dp_set_property,
4055 	.destroy = intel_dp_connector_destroy,
4056 };
4057 
4058 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4059 	.get_modes = intel_dp_get_modes,
4060 	.mode_valid = intel_dp_mode_valid,
4061 	.best_encoder = intel_best_encoder,
4062 };
4063 
4064 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4065 	.reset = intel_dp_encoder_reset,
4066 	.destroy = intel_dp_encoder_destroy,
4067 };
4068 
4069 void
4070 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
4071 {
4072 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4073 
4074 	intel_dp_check_link_status(intel_dp);
4075 }
4076 
4077 bool
4078 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4079 {
4080 	struct intel_dp *intel_dp = &intel_dig_port->dp;
4081 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4082 	struct drm_device *dev = intel_dig_port->base.base.dev;
4083 	struct drm_i915_private *dev_priv = dev->dev_private;
4084 	enum intel_display_power_domain power_domain;
4085 	bool ret = true;
4086 
4087 	if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4088  		intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4089 
4090 	DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port,
4091  		      long_hpd ? "long" : "short");
4092 
4093 	power_domain = intel_display_port_power_domain(intel_encoder);
4094 	intel_display_power_get(dev_priv, power_domain);
4095 
4096 	if (long_hpd) {
4097 		ret = true;
4098 		goto put_power;
4099 	}
4100 
4101 	/*
4102 	 * we'll check the link status via the normal hot plug path later -
4103 	 * but for short hpds we should check it now
4104 	 */
4105 	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4106 	intel_dp_check_link_status(intel_dp);
4107 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
4108 	ret = false;
4109 
4110 put_power:
4111 	intel_display_power_put(dev_priv, power_domain);
4112 
4113 	return ret;
4114 }
4115 
4116 /* Return which DP Port should be selected for Transcoder DP control */
4117 int
4118 intel_trans_dp_port_sel(struct drm_crtc *crtc)
4119 {
4120 	struct drm_device *dev = crtc->dev;
4121 	struct intel_encoder *intel_encoder;
4122 	struct intel_dp *intel_dp;
4123 
4124 	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4125 		intel_dp = enc_to_intel_dp(&intel_encoder->base);
4126 
4127 		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4128 		    intel_encoder->type == INTEL_OUTPUT_EDP)
4129 			return intel_dp->output_reg;
4130 	}
4131 
4132 	return -1;
4133 }
4134 
4135 /* check the VBT to see whether the eDP is on DP-D port */
4136 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
4137 {
4138 	struct drm_i915_private *dev_priv = dev->dev_private;
4139 	union child_device_config *p_child;
4140 	int i;
4141 	static const short port_mapping[] = {
4142 		[PORT_B] = PORT_IDPB,
4143 		[PORT_C] = PORT_IDPC,
4144 		[PORT_D] = PORT_IDPD,
4145 	};
4146 
4147 	if (port == PORT_A)
4148 		return true;
4149 
4150 	if (!dev_priv->vbt.child_dev_num)
4151 		return false;
4152 
4153 	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4154 		p_child = dev_priv->vbt.child_dev + i;
4155 
4156 		if (p_child->common.dvo_port == port_mapping[port] &&
4157 		    (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4158 		    (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
4159 			return true;
4160 	}
4161 	return false;
4162 }
4163 
4164 void
4165 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4166 {
4167 	struct intel_connector *intel_connector = to_intel_connector(connector);
4168 
4169 	intel_attach_force_audio_property(connector);
4170 	intel_attach_broadcast_rgb_property(connector);
4171 	intel_dp->color_range_auto = true;
4172 
4173 	if (is_edp(intel_dp)) {
4174 		drm_mode_create_scaling_mode_property(connector->dev);
4175 		drm_object_attach_property(
4176 			&connector->base,
4177 			connector->dev->mode_config.scaling_mode_property,
4178 			DRM_MODE_SCALE_ASPECT);
4179 		intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
4180 	}
4181 }
4182 
4183 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4184 {
4185 	intel_dp->last_power_cycle = jiffies;
4186 	intel_dp->last_power_on = jiffies;
4187 	intel_dp->last_backlight_off = jiffies;
4188 }
4189 
4190 static void
4191 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4192 				    struct intel_dp *intel_dp,
4193 				    struct edp_power_seq *out)
4194 {
4195 	struct drm_i915_private *dev_priv = dev->dev_private;
4196 	struct edp_power_seq cur, vbt, spec, final;
4197 	u32 pp_on, pp_off, pp_div, pp;
4198 	int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
4199 
4200 	if (HAS_PCH_SPLIT(dev)) {
4201 		pp_ctrl_reg = PCH_PP_CONTROL;
4202 		pp_on_reg = PCH_PP_ON_DELAYS;
4203 		pp_off_reg = PCH_PP_OFF_DELAYS;
4204 		pp_div_reg = PCH_PP_DIVISOR;
4205 	} else {
4206 		enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4207 
4208 		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4209 		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4210 		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4211 		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4212 	}
4213 
4214 	/* Workaround: Need to write PP_CONTROL with the unlock key as
4215 	 * the very first thing. */
4216 	pp = ironlake_get_pp_control(intel_dp);
4217 	I915_WRITE(pp_ctrl_reg, pp);
4218 
4219 	pp_on = I915_READ(pp_on_reg);
4220 	pp_off = I915_READ(pp_off_reg);
4221 	pp_div = I915_READ(pp_div_reg);
4222 
4223 	/* Pull timing values out of registers */
4224 	cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4225 		PANEL_POWER_UP_DELAY_SHIFT;
4226 
4227 	cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4228 		PANEL_LIGHT_ON_DELAY_SHIFT;
4229 
4230 	cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4231 		PANEL_LIGHT_OFF_DELAY_SHIFT;
4232 
4233 	cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4234 		PANEL_POWER_DOWN_DELAY_SHIFT;
4235 
4236 	cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4237 		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4238 
4239 	DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4240 		      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4241 
4242 	vbt = dev_priv->vbt.edp_pps;
4243 
4244 	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4245 	 * our hw here, which are all in 100usec. */
4246 	spec.t1_t3 = 210 * 10;
4247 	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4248 	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4249 	spec.t10 = 500 * 10;
4250 	/* This one is special and actually in units of 100ms, but zero
4251 	 * based in the hw (so we need to add 100 ms). But the sw vbt
4252 	 * table multiplies it with 1000 to make it in units of 100usec,
4253 	 * too. */
4254 	spec.t11_t12 = (510 + 100) * 10;
4255 
4256 	DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4257 		      vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4258 
4259 	/* Use the max of the register settings and vbt. If both are
4260 	 * unset, fall back to the spec limits. */
4261 #define assign_final(field)	final.field = (max(cur.field, vbt.field) == 0 ? \
4262 				       spec.field : \
4263 				       max(cur.field, vbt.field))
4264 	assign_final(t1_t3);
4265 	assign_final(t8);
4266 	assign_final(t9);
4267 	assign_final(t10);
4268 	assign_final(t11_t12);
4269 #undef assign_final
4270 
4271 #define get_delay(field)	(DIV_ROUND_UP(final.field, 10))
4272 	intel_dp->panel_power_up_delay = get_delay(t1_t3);
4273 	intel_dp->backlight_on_delay = get_delay(t8);
4274 	intel_dp->backlight_off_delay = get_delay(t9);
4275 	intel_dp->panel_power_down_delay = get_delay(t10);
4276 	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4277 #undef get_delay
4278 
4279 	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4280 		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4281 		      intel_dp->panel_power_cycle_delay);
4282 
4283 	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4284 		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
4285 
4286 	if (out)
4287 		*out = final;
4288 }
4289 
4290 static void
4291 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
4292 					      struct intel_dp *intel_dp,
4293 					      struct edp_power_seq *seq)
4294 {
4295 	struct drm_i915_private *dev_priv = dev->dev_private;
4296 	u32 pp_on, pp_off, pp_div, port_sel = 0;
4297 	int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4298 	int pp_on_reg, pp_off_reg, pp_div_reg;
4299 
4300 	if (HAS_PCH_SPLIT(dev)) {
4301 		pp_on_reg = PCH_PP_ON_DELAYS;
4302 		pp_off_reg = PCH_PP_OFF_DELAYS;
4303 		pp_div_reg = PCH_PP_DIVISOR;
4304 	} else {
4305 		enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4306 
4307 		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4308 		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4309 		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4310 	}
4311 
4312 	/*
4313 	 * And finally store the new values in the power sequencer. The
4314 	 * backlight delays are set to 1 because we do manual waits on them. For
4315 	 * T8, even BSpec recommends doing it. For T9, if we don't do this,
4316 	 * we'll end up waiting for the backlight off delay twice: once when we
4317 	 * do the manual sleep, and once when we disable the panel and wait for
4318 	 * the PP_STATUS bit to become zero.
4319 	 */
4320 	pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
4321 		(1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4322 	pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
4323 		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
4324 	/* Compute the divisor for the pp clock, simply match the Bspec
4325 	 * formula. */
4326 	pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
4327 	pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
4328 			<< PANEL_POWER_CYCLE_DELAY_SHIFT);
4329 
4330 	/* Haswell doesn't have any port selection bits for the panel
4331 	 * power sequencer any more. */
4332 	if (IS_VALLEYVIEW(dev)) {
4333 		if (dp_to_dig_port(intel_dp)->port == PORT_B)
4334 			port_sel = PANEL_PORT_SELECT_DPB_VLV;
4335 		else
4336 			port_sel = PANEL_PORT_SELECT_DPC_VLV;
4337 	} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
4338 		if (dp_to_dig_port(intel_dp)->port == PORT_A)
4339 			port_sel = PANEL_PORT_SELECT_DPA;
4340 		else
4341 			port_sel = PANEL_PORT_SELECT_DPD;
4342 	}
4343 
4344 	pp_on |= port_sel;
4345 
4346 	I915_WRITE(pp_on_reg, pp_on);
4347 	I915_WRITE(pp_off_reg, pp_off);
4348 	I915_WRITE(pp_div_reg, pp_div);
4349 
4350 	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
4351 		      I915_READ(pp_on_reg),
4352 		      I915_READ(pp_off_reg),
4353 		      I915_READ(pp_div_reg));
4354 }
4355 
4356 void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4357 {
4358 	struct drm_i915_private *dev_priv = dev->dev_private;
4359 	struct intel_encoder *encoder;
4360 	struct intel_dp *intel_dp = NULL;
4361 	struct intel_crtc_config *config = NULL;
4362 	struct intel_crtc *intel_crtc = NULL;
4363 	struct intel_connector *intel_connector = dev_priv->drrs.connector;
4364 	u32 reg, val;
4365 	enum edp_drrs_refresh_rate_type index = DRRS_HIGH_RR;
4366 
4367 	if (refresh_rate <= 0) {
4368 		DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4369 		return;
4370 	}
4371 
4372 	if (intel_connector == NULL) {
4373 		DRM_DEBUG_KMS("DRRS supported for eDP only.\n");
4374 		return;
4375 	}
4376 
4377 	/*
4378 	 * FIXME: This needs proper synchronization with psr state. But really
4379 	 * hard to tell without seeing the user of this function of this code.
4380 	 * Check locking and ordering once that lands.
4381 	 */
4382 	if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) {
4383 		DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
4384 		return;
4385 	}
4386 
4387 	encoder = intel_attached_encoder(&intel_connector->base);
4388 	intel_dp = enc_to_intel_dp(&encoder->base);
4389 	intel_crtc = encoder->new_crtc;
4390 
4391 	if (!intel_crtc) {
4392 		DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
4393 		return;
4394 	}
4395 
4396 	config = &intel_crtc->config;
4397 
4398 	if (intel_dp->drrs_state.type < SEAMLESS_DRRS_SUPPORT) {
4399 		DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
4400 		return;
4401 	}
4402 
4403 	if (intel_connector->panel.downclock_mode->vrefresh == refresh_rate)
4404 		index = DRRS_LOW_RR;
4405 
4406 	if (index == intel_dp->drrs_state.refresh_rate_type) {
4407 		DRM_DEBUG_KMS(
4408 			"DRRS requested for previously set RR...ignoring\n");
4409 		return;
4410 	}
4411 
4412 	if (!intel_crtc->active) {
4413 		DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
4414 		return;
4415 	}
4416 
4417 	if (INTEL_INFO(dev)->gen > 6 && INTEL_INFO(dev)->gen < 8) {
4418 		reg = PIPECONF(intel_crtc->config.cpu_transcoder);
4419 		val = I915_READ(reg);
4420 		if (index > DRRS_HIGH_RR) {
4421 			val |= PIPECONF_EDP_RR_MODE_SWITCH;
4422 			intel_dp_set_m2_n2(intel_crtc, &config->dp_m2_n2);
4423 		} else {
4424 			val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
4425 		}
4426 		I915_WRITE(reg, val);
4427 	}
4428 
4429 	/*
4430 	 * mutex taken to ensure that there is no race between differnt
4431 	 * drrs calls trying to update refresh rate. This scenario may occur
4432 	 * in future when idleness detection based DRRS in kernel and
4433 	 * possible calls from user space to set differnt RR are made.
4434 	 */
4435 
4436 	mutex_lock(&intel_dp->drrs_state.mutex);
4437 
4438 	intel_dp->drrs_state.refresh_rate_type = index;
4439 
4440 	mutex_unlock(&intel_dp->drrs_state.mutex);
4441 
4442 	DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
4443 }
4444 
4445 static struct drm_display_mode *
4446 intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
4447 			struct intel_connector *intel_connector,
4448 			struct drm_display_mode *fixed_mode)
4449 {
4450 	struct drm_connector *connector = &intel_connector->base;
4451 	struct intel_dp *intel_dp = &intel_dig_port->dp;
4452 	struct drm_device *dev = intel_dig_port->base.base.dev;
4453 	struct drm_i915_private *dev_priv = dev->dev_private;
4454 	struct drm_display_mode *downclock_mode = NULL;
4455 
4456 	if (INTEL_INFO(dev)->gen <= 6) {
4457 		DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
4458 		return NULL;
4459 	}
4460 
4461 	if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4462 		DRM_INFO("VBT doesn't support DRRS\n");
4463 		return NULL;
4464 	}
4465 
4466 	downclock_mode = intel_find_panel_downclock
4467 					(dev, fixed_mode, connector);
4468 
4469 	if (!downclock_mode) {
4470 		DRM_INFO("DRRS not supported\n");
4471 		return NULL;
4472 	}
4473 
4474 	dev_priv->drrs.connector = intel_connector;
4475 
4476 	lockinit(&intel_dp->drrs_state.mutex, "i915dsm", 0, LK_CANRECURSE);
4477 
4478 	intel_dp->drrs_state.type = dev_priv->vbt.drrs_type;
4479 
4480 	intel_dp->drrs_state.refresh_rate_type = DRRS_HIGH_RR;
4481 	DRM_INFO("seamless DRRS supported for eDP panel.\n");
4482 	return downclock_mode;
4483 }
4484 
4485 void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder)
4486 {
4487 	struct drm_device *dev = intel_encoder->base.dev;
4488 	struct drm_i915_private *dev_priv = dev->dev_private;
4489 	struct intel_dp *intel_dp;
4490 	enum intel_display_power_domain power_domain;
4491 
4492 	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4493 		return;
4494 
4495 	intel_dp = enc_to_intel_dp(&intel_encoder->base);
4496 	if (!edp_have_panel_vdd(intel_dp))
4497 		return;
4498 	/*
4499 	 * The VDD bit needs a power domain reference, so if the bit is
4500 	 * already enabled when we boot or resume, grab this reference and
4501 	 * schedule a vdd off, so we don't hold on to the reference
4502 	 * indefinitely.
4503 	 */
4504 	DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4505 	power_domain = intel_display_port_power_domain(intel_encoder);
4506 	intel_display_power_get(dev_priv, power_domain);
4507 
4508 	edp_panel_vdd_schedule_off(intel_dp);
4509 }
4510 
4511 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
4512 				     struct intel_connector *intel_connector,
4513 				     struct edp_power_seq *power_seq)
4514 {
4515 	struct drm_connector *connector = &intel_connector->base;
4516 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4517 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4518 	struct drm_device *dev = intel_encoder->base.dev;
4519 	struct drm_i915_private *dev_priv = dev->dev_private;
4520 	struct drm_display_mode *fixed_mode = NULL;
4521 	struct drm_display_mode *downclock_mode = NULL;
4522 	bool has_dpcd;
4523 	struct drm_display_mode *scan;
4524 	struct edid *edid;
4525 
4526 	intel_dp->drrs_state.type = DRRS_NOT_SUPPORTED;
4527 
4528 	if (!is_edp(intel_dp))
4529 		return true;
4530 
4531 	intel_edp_panel_vdd_sanitize(intel_encoder);
4532 
4533 	/* Cache DPCD and EDID for edp. */
4534 	intel_edp_panel_vdd_on(intel_dp);
4535 	has_dpcd = intel_dp_get_dpcd(intel_dp);
4536 	edp_panel_vdd_off(intel_dp, false);
4537 
4538 	if (has_dpcd) {
4539 		if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
4540 			dev_priv->no_aux_handshake =
4541 				intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
4542 				DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
4543 	} else {
4544 		/* if this fails, presume the device is a ghost */
4545 		DRM_INFO("failed to retrieve link info, disabling eDP\n");
4546 		return false;
4547 	}
4548 
4549 	/* We now know it's not a ghost, init power sequence regs. */
4550 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq);
4551 
4552 	mutex_lock(&dev->mode_config.mutex);
4553 	edid = drm_get_edid(connector, intel_dp->adapter);
4554 	if (edid) {
4555 		if (drm_add_edid_modes(connector, edid)) {
4556 			drm_mode_connector_update_edid_property(connector,
4557 								edid);
4558 			drm_edid_to_eld(connector, edid);
4559 		} else {
4560 			kfree(edid);
4561 			edid = ERR_PTR(-EINVAL);
4562 		}
4563 	} else {
4564 		edid = ERR_PTR(-ENOENT);
4565 	}
4566 	intel_connector->edid = edid;
4567 
4568 	/* prefer fixed mode from EDID if available */
4569 	list_for_each_entry(scan, &connector->probed_modes, head) {
4570 		if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
4571 			fixed_mode = drm_mode_duplicate(dev, scan);
4572 			downclock_mode = intel_dp_drrs_init(
4573 						intel_dig_port,
4574 						intel_connector, fixed_mode);
4575 			break;
4576 		}
4577 	}
4578 
4579 	/* fallback to VBT if available for eDP */
4580 	if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
4581 		fixed_mode = drm_mode_duplicate(dev,
4582 					dev_priv->vbt.lfp_lvds_vbt_mode);
4583 		if (fixed_mode)
4584 			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
4585 	}
4586 	mutex_unlock(&dev->mode_config.mutex);
4587 
4588 #if 0
4589 	if (IS_VALLEYVIEW(dev)) {
4590 		intel_dp->edp_notifier.notifier_call = edp_notify_handler;
4591 		register_reboot_notifier(&intel_dp->edp_notifier);
4592 	}
4593 #endif
4594 
4595 	intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
4596 	intel_panel_setup_backlight(connector);
4597 
4598 	return true;
4599 }
4600 
4601 bool
4602 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
4603 			struct intel_connector *intel_connector)
4604 {
4605 	struct drm_connector *connector = &intel_connector->base;
4606 	struct intel_dp *intel_dp = &intel_dig_port->dp;
4607 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4608 	struct drm_device *dev = intel_encoder->base.dev;
4609 	struct drm_i915_private *dev_priv = dev->dev_private;
4610 	enum port port = intel_dig_port->port;
4611 	struct edp_power_seq power_seq = { 0 };
4612 	const char *name = NULL;
4613 	int type, error;
4614 
4615 	/* intel_dp vfuncs */
4616 	if (IS_VALLEYVIEW(dev))
4617 		intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
4618 	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4619 		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
4620 	else if (HAS_PCH_SPLIT(dev))
4621 		intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
4622 	else
4623 		intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
4624 
4625 	intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
4626 
4627 	/* Preserve the current hw state. */
4628 	intel_dp->DP = I915_READ(intel_dp->output_reg);
4629 	intel_dp->attached_connector = intel_connector;
4630 
4631 	if (intel_dp_is_edp(dev, port))
4632 		type = DRM_MODE_CONNECTOR_eDP;
4633 	else
4634 		type = DRM_MODE_CONNECTOR_DisplayPort;
4635 
4636 	/*
4637 	 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
4638 	 * for DP the encoder type can be set by the caller to
4639 	 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
4640 	 */
4641 	if (type == DRM_MODE_CONNECTOR_eDP)
4642 		intel_encoder->type = INTEL_OUTPUT_EDP;
4643 
4644 	DRM_DEBUG_KMS("Adding %s connector on port %c\n",
4645 			type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
4646 			port_name(port));
4647 
4648 	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
4649 	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
4650 
4651 	connector->interlace_allowed = true;
4652 	connector->doublescan_allowed = 0;
4653 
4654 	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4655 			  edp_panel_vdd_work);
4656 
4657 	intel_connector_attach_encoder(intel_connector, intel_encoder);
4658 	drm_connector_register(connector);
4659 
4660 	if (HAS_DDI(dev))
4661 		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
4662 	else
4663 		intel_connector->get_hw_state = intel_connector_get_hw_state;
4664 	intel_connector->unregister = intel_dp_connector_unregister;
4665 
4666 	intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
4667 	if (HAS_DDI(dev)) {
4668 		switch (intel_dig_port->port) {
4669 		case PORT_A:
4670 			intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
4671 			break;
4672 		case PORT_B:
4673 			intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
4674 			break;
4675 		case PORT_C:
4676 			intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
4677 			break;
4678 		case PORT_D:
4679 			intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
4680 			break;
4681 		default:
4682 			BUG();
4683 		}
4684 	}
4685 
4686 	/* Set up the DDC bus. */
4687 	switch (port) {
4688 	case PORT_A:
4689 		intel_encoder->hpd_pin = HPD_PORT_A;
4690 		name = "DPDDC-A";
4691 		break;
4692 	case PORT_B:
4693 		intel_encoder->hpd_pin = HPD_PORT_B;
4694 		name = "DPDDC-B";
4695 		break;
4696 	case PORT_C:
4697 		intel_encoder->hpd_pin = HPD_PORT_C;
4698 		name = "DPDDC-C";
4699 		break;
4700 	case PORT_D:
4701 		intel_encoder->hpd_pin = HPD_PORT_D;
4702 		name = "DPDDC-D";
4703 		break;
4704 	default:
4705 		BUG();
4706 	}
4707 
4708 	if (is_edp(intel_dp)) {
4709 		intel_dp_init_panel_power_timestamps(intel_dp);
4710 		intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
4711 	}
4712 
4713 	error = intel_dp_i2c_init(intel_dp, intel_connector, name);
4714 	WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n",
4715 	     error, port_name(port));
4716 
4717 	if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
4718 #if 0
4719 		i2c_del_adapter(&intel_dp->adapter);
4720 #endif
4721 		if (is_edp(intel_dp)) {
4722 			cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4723 			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4724 			edp_panel_vdd_off_sync(intel_dp);
4725 			drm_modeset_unlock(&dev->mode_config.connection_mutex);
4726 		}
4727 		drm_connector_unregister(connector);
4728 		drm_connector_cleanup(connector);
4729 		return false;
4730 	}
4731 
4732 	intel_dp_add_properties(intel_dp, connector);
4733 
4734 	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
4735 	 * 0xd.  Failure to do so will result in spurious interrupts being
4736 	 * generated on the port when a cable is not attached.
4737 	 */
4738 	if (IS_G4X(dev) && !IS_GM45(dev)) {
4739 		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
4740 		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
4741 	}
4742 
4743 	return true;
4744 }
4745 
4746 void
4747 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
4748 {
4749 	struct drm_i915_private *dev_priv = dev->dev_private;
4750 	struct intel_digital_port *intel_dig_port;
4751 	struct intel_encoder *intel_encoder;
4752 	struct drm_encoder *encoder;
4753 	struct intel_connector *intel_connector;
4754 
4755 	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
4756 	if (!intel_dig_port)
4757 		return;
4758 
4759 	intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
4760 	if (!intel_connector) {
4761 		kfree(intel_dig_port);
4762 		return;
4763 	}
4764 
4765 	intel_encoder = &intel_dig_port->base;
4766 	encoder = &intel_encoder->base;
4767 
4768 	drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
4769 			 DRM_MODE_ENCODER_TMDS);
4770 
4771 	intel_encoder->compute_config = intel_dp_compute_config;
4772 	intel_encoder->disable = intel_disable_dp;
4773 	intel_encoder->get_hw_state = intel_dp_get_hw_state;
4774 	intel_encoder->get_config = intel_dp_get_config;
4775 	intel_encoder->suspend = intel_dp_encoder_suspend;
4776 	if (IS_CHERRYVIEW(dev)) {
4777 		intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
4778 		intel_encoder->pre_enable = chv_pre_enable_dp;
4779 		intel_encoder->enable = vlv_enable_dp;
4780 		intel_encoder->post_disable = chv_post_disable_dp;
4781 	} else if (IS_VALLEYVIEW(dev)) {
4782 		intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
4783 		intel_encoder->pre_enable = vlv_pre_enable_dp;
4784 		intel_encoder->enable = vlv_enable_dp;
4785 		intel_encoder->post_disable = vlv_post_disable_dp;
4786 	} else {
4787 		intel_encoder->pre_enable = g4x_pre_enable_dp;
4788 		intel_encoder->enable = g4x_enable_dp;
4789 		intel_encoder->post_disable = g4x_post_disable_dp;
4790 	}
4791 
4792 	intel_dig_port->port = port;
4793 	intel_dig_port->dp.output_reg = output_reg;
4794 
4795 	intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4796 	if (IS_CHERRYVIEW(dev)) {
4797 		if (port == PORT_D)
4798 			intel_encoder->crtc_mask = 1 << 2;
4799 		else
4800 			intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
4801 	} else {
4802 		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
4803 	}
4804 	intel_encoder->cloneable = 0;
4805 	intel_encoder->hot_plug = intel_dp_hot_plug;
4806 
4807 	intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
4808 	dev_priv->hpd_irq_port[port] = intel_dig_port;
4809 
4810 	if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
4811 		drm_encoder_cleanup(encoder);
4812 		kfree(intel_dig_port);
4813 		kfree(intel_connector);
4814 	}
4815 }
4816