xref: /dragonfly/sys/dev/drm/i915/intel_dp.c (revision bb66151c)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27 
28 #include <linux/i2c.h>
29 #include <linux/export.h>
30 #include <drm/drmP.h>
31 #include <drm/drm_crtc.h>
32 #include <drm/drm_crtc_helper.h>
33 #include <drm/drm_edid.h>
34 #include "intel_drv.h"
35 #include <drm/i915_drm.h>
36 #include "i915_drv.h"
37 
38 #define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
39 
40 /**
41  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
42  * @intel_dp: DP struct
43  *
44  * If a CPU or PCH DP output is attached to an eDP panel, this function
45  * will return true, and false otherwise.
46  */
47 static bool is_edp(struct intel_dp *intel_dp)
48 {
49 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
50 
51 	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
52 }
53 
54 /**
55  * is_pch_edp - is the port on the PCH and attached to an eDP panel?
56  * @intel_dp: DP struct
57  *
58  * Returns true if the given DP struct corresponds to a PCH DP port attached
59  * to an eDP panel, false otherwise.  Helpful for determining whether we
60  * may need FDI resources for a given DP output or not.
61  */
62 static bool is_pch_edp(struct intel_dp *intel_dp)
63 {
64 	return intel_dp->is_pch_edp;
65 }
66 
67 /**
68  * is_cpu_edp - is the port on the CPU and attached to an eDP panel?
69  * @intel_dp: DP struct
70  *
71  * Returns true if the given DP struct corresponds to a CPU eDP port.
72  */
73 static bool is_cpu_edp(struct intel_dp *intel_dp)
74 {
75 	return is_edp(intel_dp) && !is_pch_edp(intel_dp);
76 }
77 
78 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
79 {
80 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
81 
82 	return intel_dig_port->base.base.dev;
83 }
84 
85 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
86 {
87 	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
88 }
89 
90 /**
91  * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP?
92  * @encoder: DRM encoder
93  *
94  * Return true if @encoder corresponds to a PCH attached eDP panel.  Needed
95  * by intel_display.c.
96  */
97 bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
98 {
99 	struct intel_dp *intel_dp;
100 
101 	if (!encoder)
102 		return false;
103 
104 	intel_dp = enc_to_intel_dp(encoder);
105 
106 	return is_pch_edp(intel_dp);
107 }
108 
109 static void intel_dp_link_down(struct intel_dp *intel_dp);
110 
111 void
112 intel_edp_link_config(struct intel_encoder *intel_encoder,
113 		       int *lane_num, int *link_bw)
114 {
115 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
116 
117 	*lane_num = intel_dp->lane_count;
118 	*link_bw = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
119 }
120 
121 int
122 intel_edp_target_clock(struct intel_encoder *intel_encoder,
123 		       struct drm_display_mode *mode)
124 {
125 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
126 	struct intel_connector *intel_connector = intel_dp->attached_connector;
127 
128 	if (intel_connector->panel.fixed_mode)
129 		return intel_connector->panel.fixed_mode->clock;
130 	else
131 		return mode->clock;
132 }
133 
134 static int
135 intel_dp_max_link_bw(struct intel_dp *intel_dp)
136 {
137 	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
138 
139 	switch (max_link_bw) {
140 	case DP_LINK_BW_1_62:
141 	case DP_LINK_BW_2_7:
142 		break;
143 	default:
144 		max_link_bw = DP_LINK_BW_1_62;
145 		break;
146 	}
147 	return max_link_bw;
148 }
149 
150 /*
151  * The units on the numbers in the next two are... bizarre.  Examples will
152  * make it clearer; this one parallels an example in the eDP spec.
153  *
154  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
155  *
156  *     270000 * 1 * 8 / 10 == 216000
157  *
158  * The actual data capacity of that configuration is 2.16Gbit/s, so the
159  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
160  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
161  * 119000.  At 18bpp that's 2142000 kilobits per second.
162  *
163  * Thus the strange-looking division by 10 in intel_dp_link_required, to
164  * get the result in decakilobits instead of kilobits.
165  */
166 
167 static int
168 intel_dp_link_required(int pixel_clock, int bpp)
169 {
170 	return (pixel_clock * bpp + 9) / 10;
171 }
172 
173 static int
174 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
175 {
176 	return (max_link_clock * max_lanes * 8) / 10;
177 }
178 
179 static bool
180 intel_dp_adjust_dithering(struct intel_dp *intel_dp,
181 			  struct drm_display_mode *mode,
182 			  bool adjust_mode)
183 {
184 	int max_link_clock =
185 		drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
186 	int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
187 	int max_rate, mode_rate;
188 
189 	mode_rate = intel_dp_link_required(mode->clock, 24);
190 	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
191 
192 	if (mode_rate > max_rate) {
193 		mode_rate = intel_dp_link_required(mode->clock, 18);
194 		if (mode_rate > max_rate)
195 			return false;
196 
197 		if (adjust_mode)
198 			mode->private_flags
199 				|= INTEL_MODE_DP_FORCE_6BPC;
200 
201 		return true;
202 	}
203 
204 	return true;
205 }
206 
207 static int
208 intel_dp_mode_valid(struct drm_connector *connector,
209 		    struct drm_display_mode *mode)
210 {
211 	struct intel_dp *intel_dp = intel_attached_dp(connector);
212 	struct intel_connector *intel_connector = to_intel_connector(connector);
213 	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
214 
215 	if (is_edp(intel_dp) && fixed_mode) {
216 		if (mode->hdisplay > fixed_mode->hdisplay)
217 			return MODE_PANEL;
218 
219 		if (mode->vdisplay > fixed_mode->vdisplay)
220 			return MODE_PANEL;
221 	}
222 
223 	if (!intel_dp_adjust_dithering(intel_dp, mode, false))
224 		return MODE_CLOCK_HIGH;
225 
226 	if (mode->clock < 10000)
227 		return MODE_CLOCK_LOW;
228 
229 	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
230 		return MODE_H_ILLEGAL;
231 
232 	return MODE_OK;
233 }
234 
235 static uint32_t
236 pack_aux(uint8_t *src, int src_bytes)
237 {
238 	int	i;
239 	uint32_t v = 0;
240 
241 	if (src_bytes > 4)
242 		src_bytes = 4;
243 	for (i = 0; i < src_bytes; i++)
244 		v |= ((uint32_t) src[i]) << ((3-i) * 8);
245 	return v;
246 }
247 
248 static void
249 unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
250 {
251 	int i;
252 	if (dst_bytes > 4)
253 		dst_bytes = 4;
254 	for (i = 0; i < dst_bytes; i++)
255 		dst[i] = src >> ((3-i) * 8);
256 }
257 
258 /* hrawclock is 1/4 the FSB frequency */
259 static int
260 intel_hrawclk(struct drm_device *dev)
261 {
262 	struct drm_i915_private *dev_priv = dev->dev_private;
263 	uint32_t clkcfg;
264 
265 	/* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
266 	if (IS_VALLEYVIEW(dev))
267 		return 200;
268 
269 	clkcfg = I915_READ(CLKCFG);
270 	switch (clkcfg & CLKCFG_FSB_MASK) {
271 	case CLKCFG_FSB_400:
272 		return 100;
273 	case CLKCFG_FSB_533:
274 		return 133;
275 	case CLKCFG_FSB_667:
276 		return 166;
277 	case CLKCFG_FSB_800:
278 		return 200;
279 	case CLKCFG_FSB_1067:
280 		return 266;
281 	case CLKCFG_FSB_1333:
282 		return 333;
283 	/* these two are just a guess; one of them might be right */
284 	case CLKCFG_FSB_1600:
285 	case CLKCFG_FSB_1600_ALT:
286 		return 400;
287 	default:
288 		return 133;
289 	}
290 }
291 
292 static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
293 {
294 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
295 	struct drm_i915_private *dev_priv = dev->dev_private;
296 
297 	return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0;
298 }
299 
300 static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
301 {
302 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
303 	struct drm_i915_private *dev_priv = dev->dev_private;
304 
305 	return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0;
306 }
307 
308 static void
309 intel_dp_check_edp(struct intel_dp *intel_dp)
310 {
311 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
312 	struct drm_i915_private *dev_priv = dev->dev_private;
313 
314 	if (!is_edp(intel_dp))
315 		return;
316 	if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
317 		WARN(1, "eDP powered off while attempting aux channel communication.\n");
318 		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
319 			      I915_READ(PCH_PP_STATUS),
320 			      I915_READ(PCH_PP_CONTROL));
321 	}
322 }
323 
324 static uint32_t
325 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
326 {
327 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
328 	struct drm_device *dev = intel_dig_port->base.base.dev;
329 	struct drm_i915_private *dev_priv = dev->dev_private;
330 	uint32_t ch_ctl = intel_dp->output_reg + 0x10;
331 	uint32_t status;
332 	bool done;
333 
334 	if (IS_HASWELL(dev)) {
335 		switch (intel_dig_port->port) {
336 		case PORT_A:
337 			ch_ctl = DPA_AUX_CH_CTL;
338 			break;
339 		case PORT_B:
340 			ch_ctl = PCH_DPB_AUX_CH_CTL;
341 			break;
342 		case PORT_C:
343 			ch_ctl = PCH_DPC_AUX_CH_CTL;
344 			break;
345 		case PORT_D:
346 			ch_ctl = PCH_DPD_AUX_CH_CTL;
347 			break;
348 		default:
349 			BUG();
350 		}
351 	}
352 
353 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
354 	if (has_aux_irq)
355 		done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
356 					  msecs_to_jiffies(10));
357 	else
358 		done = wait_for_atomic(C, 10) == 0;
359 	if (!done)
360 		DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
361 			  has_aux_irq);
362 #undef C
363 
364 	return status;
365 }
366 
367 static int
368 intel_dp_aux_ch(struct intel_dp *intel_dp,
369 		uint8_t *send, int send_bytes,
370 		uint8_t *recv, int recv_size)
371 {
372 	uint32_t output_reg = intel_dp->output_reg;
373 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
374 	struct drm_device *dev = intel_dig_port->base.base.dev;
375 	struct drm_i915_private *dev_priv = dev->dev_private;
376 	uint32_t ch_ctl = output_reg + 0x10;
377 	uint32_t ch_data = ch_ctl + 4;
378 	int i, ret, recv_bytes;
379 	uint32_t status;
380 	uint32_t aux_clock_divider;
381 	int try, precharge;
382 	bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
383 
384 	/* dp aux is extremely sensitive to irq latency, hence request the
385 	 * lowest possible wakeup latency and so prevent the cpu from going into
386 	 * deep sleep states.
387 	 */
388 	pm_qos_update_request(&dev_priv->pm_qos, 0);
389 
390 	if (IS_HASWELL(dev)) {
391 		switch (intel_dig_port->port) {
392 		case PORT_A:
393 			ch_ctl = DPA_AUX_CH_CTL;
394 			ch_data = DPA_AUX_CH_DATA1;
395 			break;
396 		case PORT_B:
397 			ch_ctl = PCH_DPB_AUX_CH_CTL;
398 			ch_data = PCH_DPB_AUX_CH_DATA1;
399 			break;
400 		case PORT_C:
401 			ch_ctl = PCH_DPC_AUX_CH_CTL;
402 			ch_data = PCH_DPC_AUX_CH_DATA1;
403 			break;
404 		case PORT_D:
405 			ch_ctl = PCH_DPD_AUX_CH_CTL;
406 			ch_data = PCH_DPD_AUX_CH_DATA1;
407 			break;
408 		default:
409 			BUG();
410 		}
411 	}
412 
413 	intel_dp_check_edp(intel_dp);
414 	/* The clock divider is based off the hrawclk,
415 	 * and would like to run at 2MHz. So, take the
416 	 * hrawclk value and divide by 2 and use that
417 	 *
418 	 * Note that PCH attached eDP panels should use a 125MHz input
419 	 * clock divider.
420 	 */
421 	if (is_cpu_edp(intel_dp)) {
422 		if (HAS_DDI(dev))
423 			aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1;
424 		else if (IS_VALLEYVIEW(dev))
425 			aux_clock_divider = 100;
426 		else if (IS_GEN6(dev) || IS_GEN7(dev))
427 			aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
428 		else
429 			aux_clock_divider = 225; /* eDP input clock at 450Mhz */
430 	} else if (HAS_PCH_SPLIT(dev))
431 		aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
432 	else
433 		aux_clock_divider = intel_hrawclk(dev) / 2;
434 
435 	if (IS_GEN6(dev))
436 		precharge = 3;
437 	else
438 		precharge = 5;
439 
440 	/* Try to wait for any previous AUX channel activity */
441 	for (try = 0; try < 3; try++) {
442 		status = I915_READ_NOTRACE(ch_ctl);
443 		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
444 			break;
445 		msleep(1);
446 	}
447 
448 	if (try == 3) {
449 		WARN(1, "dp_aux_ch not started status 0x%08x\n",
450 		     I915_READ(ch_ctl));
451 		ret = -EBUSY;
452 		goto out;
453 	}
454 
455 	/* Must try at least 3 times according to DP spec */
456 	for (try = 0; try < 5; try++) {
457 		/* Load the send data into the aux channel data registers */
458 		for (i = 0; i < send_bytes; i += 4)
459 			I915_WRITE(ch_data + i,
460 				   pack_aux(send + i, send_bytes - i));
461 
462 		/* Send the command and wait for it to complete */
463 		I915_WRITE(ch_ctl,
464 			   DP_AUX_CH_CTL_SEND_BUSY |
465 			   (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
466 			   DP_AUX_CH_CTL_TIME_OUT_400us |
467 			   (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
468 			   (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
469 			   (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
470 			   DP_AUX_CH_CTL_DONE |
471 			   DP_AUX_CH_CTL_TIME_OUT_ERROR |
472 			   DP_AUX_CH_CTL_RECEIVE_ERROR);
473 
474 		status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
475 
476 		/* Clear done status and any errors */
477 		I915_WRITE(ch_ctl,
478 			   status |
479 			   DP_AUX_CH_CTL_DONE |
480 			   DP_AUX_CH_CTL_TIME_OUT_ERROR |
481 			   DP_AUX_CH_CTL_RECEIVE_ERROR);
482 
483 		if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
484 			      DP_AUX_CH_CTL_RECEIVE_ERROR))
485 			continue;
486 		if (status & DP_AUX_CH_CTL_DONE)
487 			break;
488 	}
489 
490 	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
491 		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
492 		ret = -EBUSY;
493 		goto out;
494 	}
495 
496 	/* Check for timeout or receive error.
497 	 * Timeouts occur when the sink is not connected
498 	 */
499 	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
500 		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
501 		ret = -EIO;
502 		goto out;
503 	}
504 
505 	/* Timeouts occur when the device isn't connected, so they're
506 	 * "normal" -- don't fill the kernel log with these */
507 	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
508 		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
509 		ret = -ETIMEDOUT;
510 		goto out;
511 	}
512 
513 	/* Unload any bytes sent back from the other side */
514 	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
515 		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
516 	if (recv_bytes > recv_size)
517 		recv_bytes = recv_size;
518 
519 	for (i = 0; i < recv_bytes; i += 4)
520 		unpack_aux(I915_READ(ch_data + i),
521 			   recv + i, recv_bytes - i);
522 
523 	ret = recv_bytes;
524 out:
525 	pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
526 
527 	return ret;
528 }
529 
530 /* Write data to the aux channel in native mode */
531 static int
532 intel_dp_aux_native_write(struct intel_dp *intel_dp,
533 			  uint16_t address, uint8_t *send, int send_bytes)
534 {
535 	int ret;
536 	uint8_t	msg[20];
537 	int msg_bytes;
538 	uint8_t	ack;
539 
540 	intel_dp_check_edp(intel_dp);
541 	if (send_bytes > 16)
542 		return -1;
543 	msg[0] = AUX_NATIVE_WRITE << 4;
544 	msg[1] = address >> 8;
545 	msg[2] = address & 0xff;
546 	msg[3] = send_bytes - 1;
547 	memcpy(&msg[4], send, send_bytes);
548 	msg_bytes = send_bytes + 4;
549 	for (;;) {
550 		ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
551 		if (ret < 0)
552 			return ret;
553 		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
554 			break;
555 		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
556 			udelay(100);
557 		else
558 			return -EIO;
559 	}
560 	return send_bytes;
561 }
562 
563 /* Write a single byte to the aux channel in native mode */
564 static int
565 intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
566 			    uint16_t address, uint8_t byte)
567 {
568 	return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
569 }
570 
571 /* read bytes from a native aux channel */
572 static int
573 intel_dp_aux_native_read(struct intel_dp *intel_dp,
574 			 uint16_t address, uint8_t *recv, int recv_bytes)
575 {
576 	uint8_t msg[4];
577 	int msg_bytes;
578 	uint8_t reply[20];
579 	int reply_bytes;
580 	uint8_t ack;
581 	int ret;
582 
583 	intel_dp_check_edp(intel_dp);
584 	msg[0] = AUX_NATIVE_READ << 4;
585 	msg[1] = address >> 8;
586 	msg[2] = address & 0xff;
587 	msg[3] = recv_bytes - 1;
588 
589 	msg_bytes = 4;
590 	reply_bytes = recv_bytes + 1;
591 
592 	for (;;) {
593 		ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
594 				      reply, reply_bytes);
595 		if (ret == 0)
596 			return -EPROTO;
597 		if (ret < 0)
598 			return ret;
599 		ack = reply[0];
600 		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
601 			memcpy(recv, reply + 1, ret - 1);
602 			return ret - 1;
603 		}
604 		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
605 			udelay(100);
606 		else
607 			return -EIO;
608 	}
609 }
610 
611 static int
612 intel_dp_i2c_aux_ch(struct device *adapter, int mode,
613 		    uint8_t write_byte, uint8_t *read_byte)
614 {
615 	struct iic_dp_aux_data *data = device_get_softc(adapter);
616 	struct intel_dp *intel_dp = data->priv;
617 	uint16_t address = data->address;
618 	uint8_t msg[5];
619 	uint8_t reply[2];
620 	unsigned retry;
621 	int msg_bytes;
622 	int reply_bytes;
623 	int ret;
624 
625 	intel_dp_check_edp(intel_dp);
626 	/* Set up the command byte */
627 	if (mode & MODE_I2C_READ)
628 		msg[0] = AUX_I2C_READ << 4;
629 	else
630 		msg[0] = AUX_I2C_WRITE << 4;
631 
632 	if (!(mode & MODE_I2C_STOP))
633 		msg[0] |= AUX_I2C_MOT << 4;
634 
635 	msg[1] = address >> 8;
636 	msg[2] = address;
637 
638 	switch (mode) {
639 	case MODE_I2C_WRITE:
640 		msg[3] = 0;
641 		msg[4] = write_byte;
642 		msg_bytes = 5;
643 		reply_bytes = 1;
644 		break;
645 	case MODE_I2C_READ:
646 		msg[3] = 0;
647 		msg_bytes = 4;
648 		reply_bytes = 2;
649 		break;
650 	default:
651 		msg_bytes = 3;
652 		reply_bytes = 1;
653 		break;
654 	}
655 
656 	for (retry = 0; retry < 5; retry++) {
657 		ret = intel_dp_aux_ch(intel_dp,
658 				      msg, msg_bytes,
659 				      reply, reply_bytes);
660 		if (ret < 0) {
661 			DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
662 			return ret;
663 		}
664 
665 		switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
666 		case AUX_NATIVE_REPLY_ACK:
667 			/* I2C-over-AUX Reply field is only valid
668 			 * when paired with AUX ACK.
669 			 */
670 			break;
671 		case AUX_NATIVE_REPLY_NACK:
672 			DRM_DEBUG_KMS("aux_ch native nack\n");
673 			return -EREMOTEIO;
674 		case AUX_NATIVE_REPLY_DEFER:
675 			udelay(100);
676 			continue;
677 		default:
678 			DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
679 				  reply[0]);
680 			return -EREMOTEIO;
681 		}
682 
683 		switch (reply[0] & AUX_I2C_REPLY_MASK) {
684 		case AUX_I2C_REPLY_ACK:
685 			if (mode == MODE_I2C_READ) {
686 				*read_byte = reply[1];
687 			}
688 			return (0/*reply_bytes - 1*/);
689 		case AUX_I2C_REPLY_NACK:
690 			DRM_DEBUG_KMS("aux_i2c nack\n");
691 			return -EREMOTEIO;
692 		case AUX_I2C_REPLY_DEFER:
693 			DRM_DEBUG_KMS("aux_i2c defer\n");
694 			udelay(100);
695 			break;
696 		default:
697 			DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
698 			return -EREMOTEIO;
699 		}
700 	}
701 
702 	DRM_ERROR("too many retries, giving up\n");
703 	return -EREMOTEIO;
704 }
705 
706 static int
707 intel_dp_i2c_init(struct intel_dp *intel_dp,
708 		  struct intel_connector *intel_connector, const char *name)
709 {
710 	int	ret;
711 
712 	DRM_DEBUG_KMS("i2c_init %s\n", name);
713 
714 	ironlake_edp_panel_vdd_on(intel_dp);
715 	ret = iic_dp_aux_add_bus(intel_connector->base.dev->dev, name,
716 	    intel_dp_i2c_aux_ch, intel_dp, &intel_dp->dp_iic_bus,
717 	    &intel_dp->adapter);
718 	ironlake_edp_panel_vdd_off(intel_dp, false);
719 	return ret;
720 }
721 
722 bool
723 intel_dp_mode_fixup(struct drm_encoder *encoder,
724 		    const struct drm_display_mode *mode,
725 		    struct drm_display_mode *adjusted_mode)
726 {
727 	struct drm_device *dev = encoder->dev;
728 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
729 	struct intel_connector *intel_connector = intel_dp->attached_connector;
730 	int lane_count, clock;
731 	int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
732 	int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
733 	int bpp, mode_rate;
734 	static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
735 
736 	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
737 		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
738 				       adjusted_mode);
739 		intel_pch_panel_fitting(dev,
740 					intel_connector->panel.fitting_mode,
741 					mode, adjusted_mode);
742 	}
743 
744 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
745 		return false;
746 
747 	DRM_DEBUG_KMS("DP link computation with max lane count %i "
748 		      "max bw %02x pixel clock %iKHz\n",
749 		      max_lane_count, bws[max_clock], adjusted_mode->clock);
750 
751 	if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, true))
752 		return false;
753 
754 	bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
755 
756 	if (intel_dp->color_range_auto) {
757 		/*
758 		 * See:
759 		 * CEA-861-E - 5.1 Default Encoding Parameters
760 		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
761 		 */
762 		if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
763 			intel_dp->color_range = DP_COLOR_RANGE_16_235;
764 		else
765 			intel_dp->color_range = 0;
766 	}
767 
768 	if (intel_dp->color_range)
769 		adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE;
770 
771 	mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
772 
773 	for (clock = 0; clock <= max_clock; clock++) {
774 		for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
775 			int link_bw_clock =
776 				drm_dp_bw_code_to_link_rate(bws[clock]);
777 			int link_avail = intel_dp_max_data_rate(link_bw_clock,
778 								lane_count);
779 
780 			if (mode_rate <= link_avail) {
781 				intel_dp->link_bw = bws[clock];
782 				intel_dp->lane_count = lane_count;
783 				adjusted_mode->clock = link_bw_clock;
784 				DRM_DEBUG_KMS("DP link bw %02x lane "
785 						"count %d clock %d bpp %d\n",
786 				       intel_dp->link_bw, intel_dp->lane_count,
787 				       adjusted_mode->clock, bpp);
788 				DRM_DEBUG_KMS("DP link bw required %i available %i\n",
789 					      mode_rate, link_avail);
790 				return true;
791 			}
792 		}
793 	}
794 
795 	return false;
796 }
797 
798 void
799 intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
800 		 struct drm_display_mode *adjusted_mode)
801 {
802 	struct drm_device *dev = crtc->dev;
803 	struct intel_encoder *intel_encoder;
804 	struct intel_dp *intel_dp;
805 	struct drm_i915_private *dev_priv = dev->dev_private;
806 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
807 	int lane_count = 4;
808 	struct intel_link_m_n m_n;
809 	int pipe = intel_crtc->pipe;
810 	enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
811 	int target_clock;
812 
813 	/*
814 	 * Find the lane count in the intel_encoder private
815 	 */
816 	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
817 		intel_dp = enc_to_intel_dp(&intel_encoder->base);
818 
819 		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
820 		    intel_encoder->type == INTEL_OUTPUT_EDP)
821 		{
822 			lane_count = intel_dp->lane_count;
823 			break;
824 		}
825 	}
826 
827 	target_clock = mode->clock;
828 	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
829 		if (intel_encoder->type == INTEL_OUTPUT_EDP) {
830 			target_clock = intel_edp_target_clock(intel_encoder,
831 							      mode);
832 			break;
833 		}
834 	}
835 
836 	/*
837 	 * Compute the GMCH and Link ratios. The '3' here is
838 	 * the number of bytes_per_pixel post-LUT, which we always
839 	 * set up for 8-bits of R/G/B, or 3 bytes total.
840 	 */
841 	intel_link_compute_m_n(intel_crtc->bpp, lane_count,
842 			       target_clock, adjusted_mode->clock, &m_n);
843 
844 	if (IS_HASWELL(dev)) {
845 		I915_WRITE(PIPE_DATA_M1(cpu_transcoder),
846 			   TU_SIZE(m_n.tu) | m_n.gmch_m);
847 		I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
848 		I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
849 		I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
850 	} else if (HAS_PCH_SPLIT(dev)) {
851 		I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
852 		I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n);
853 		I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m);
854 		I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n);
855 	} else if (IS_VALLEYVIEW(dev)) {
856 		I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
857 		I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
858 		I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
859 		I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
860 	} else {
861 		I915_WRITE(PIPE_GMCH_DATA_M(pipe),
862 			   TU_SIZE(m_n.tu) | m_n.gmch_m);
863 		I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
864 		I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
865 		I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
866 	}
867 }
868 
869 void intel_dp_init_link_config(struct intel_dp *intel_dp)
870 {
871 	memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
872 	intel_dp->link_configuration[0] = intel_dp->link_bw;
873 	intel_dp->link_configuration[1] = intel_dp->lane_count;
874 	intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
875 	/*
876 	 * Check for DPCD version > 1.1 and enhanced framing support
877 	 */
878 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
879 	    (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
880 		intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
881 	}
882 }
883 
884 static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
885 {
886 	struct drm_device *dev = crtc->dev;
887 	struct drm_i915_private *dev_priv = dev->dev_private;
888 	u32 dpa_ctl;
889 
890 	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
891 	dpa_ctl = I915_READ(DP_A);
892 	dpa_ctl &= ~DP_PLL_FREQ_MASK;
893 
894 	if (clock < 200000) {
895 		/* For a long time we've carried around a ILK-DevA w/a for the
896 		 * 160MHz clock. If we're really unlucky, it's still required.
897 		 */
898 		DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
899 		dpa_ctl |= DP_PLL_FREQ_160MHZ;
900 	} else {
901 		dpa_ctl |= DP_PLL_FREQ_270MHZ;
902 	}
903 
904 	I915_WRITE(DP_A, dpa_ctl);
905 
906 	POSTING_READ(DP_A);
907 	udelay(500);
908 }
909 
910 static void
911 intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
912 		  struct drm_display_mode *adjusted_mode)
913 {
914 	struct drm_device *dev = encoder->dev;
915 	struct drm_i915_private *dev_priv = dev->dev_private;
916 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
917 	struct drm_crtc *crtc = encoder->crtc;
918 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
919 
920 	/*
921 	 * There are four kinds of DP registers:
922 	 *
923 	 * 	IBX PCH
924 	 * 	SNB CPU
925 	 *	IVB CPU
926 	 * 	CPT PCH
927 	 *
928 	 * IBX PCH and CPU are the same for almost everything,
929 	 * except that the CPU DP PLL is configured in this
930 	 * register
931 	 *
932 	 * CPT PCH is quite different, having many bits moved
933 	 * to the TRANS_DP_CTL register instead. That
934 	 * configuration happens (oddly) in ironlake_pch_enable
935 	 */
936 
937 	/* Preserve the BIOS-computed detected bit. This is
938 	 * supposed to be read-only.
939 	 */
940 	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
941 
942 	/* Handle DP bits in common between all three register formats */
943 	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
944 
945 	switch (intel_dp->lane_count) {
946 	case 1:
947 		intel_dp->DP |= DP_PORT_WIDTH_1;
948 		break;
949 	case 2:
950 		intel_dp->DP |= DP_PORT_WIDTH_2;
951 		break;
952 	case 4:
953 		intel_dp->DP |= DP_PORT_WIDTH_4;
954 		break;
955 	}
956 	if (intel_dp->has_audio) {
957 		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
958 				 pipe_name(intel_crtc->pipe));
959 		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
960 		intel_write_eld(encoder, adjusted_mode);
961 	}
962 
963 	intel_dp_init_link_config(intel_dp);
964 
965 	/* Split out the IBX/CPU vs CPT settings */
966 
967 	if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
968 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
969 			intel_dp->DP |= DP_SYNC_HS_HIGH;
970 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
971 			intel_dp->DP |= DP_SYNC_VS_HIGH;
972 		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
973 
974 		if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
975 			intel_dp->DP |= DP_ENHANCED_FRAMING;
976 
977 		intel_dp->DP |= intel_crtc->pipe << 29;
978 
979 		/* don't miss out required setting for eDP */
980 		if (adjusted_mode->clock < 200000)
981 			intel_dp->DP |= DP_PLL_FREQ_160MHZ;
982 		else
983 			intel_dp->DP |= DP_PLL_FREQ_270MHZ;
984 	} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
985 		if (!HAS_PCH_SPLIT(dev))
986 			intel_dp->DP |= intel_dp->color_range;
987 
988 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
989 			intel_dp->DP |= DP_SYNC_HS_HIGH;
990 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
991 			intel_dp->DP |= DP_SYNC_VS_HIGH;
992 		intel_dp->DP |= DP_LINK_TRAIN_OFF;
993 
994 		if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
995 			intel_dp->DP |= DP_ENHANCED_FRAMING;
996 
997 		if (intel_crtc->pipe == 1)
998 			intel_dp->DP |= DP_PIPEB_SELECT;
999 
1000 		if (is_cpu_edp(intel_dp)) {
1001 			/* don't miss out required setting for eDP */
1002 			if (adjusted_mode->clock < 200000)
1003 				intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1004 			else
1005 				intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1006 		}
1007 	} else {
1008 		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1009 	}
1010 
1011 	if (is_cpu_edp(intel_dp))
1012 		ironlake_set_pll_edp(crtc, adjusted_mode->clock);
1013 }
1014 
1015 #define IDLE_ON_MASK		(PP_ON | 0 	  | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1016 #define IDLE_ON_VALUE   	(PP_ON | 0 	  | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1017 
1018 #define IDLE_OFF_MASK		(PP_ON | 0        | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1019 #define IDLE_OFF_VALUE		(0     | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1020 
1021 #define IDLE_CYCLE_MASK		(PP_ON | 0        | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1022 #define IDLE_CYCLE_VALUE	(0     | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1023 
1024 static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
1025 				       u32 mask,
1026 				       u32 value)
1027 {
1028 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1029 	struct drm_i915_private *dev_priv = dev->dev_private;
1030 
1031 	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1032 		      mask, value,
1033 		      I915_READ(PCH_PP_STATUS),
1034 		      I915_READ(PCH_PP_CONTROL));
1035 
1036 	if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) {
1037 		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1038 			  I915_READ(PCH_PP_STATUS),
1039 			  I915_READ(PCH_PP_CONTROL));
1040 	}
1041 }
1042 
1043 static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
1044 {
1045 	DRM_DEBUG_KMS("Wait for panel power on\n");
1046 	ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1047 }
1048 
1049 static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
1050 {
1051 	DRM_DEBUG_KMS("Wait for panel power off time\n");
1052 	ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1053 }
1054 
1055 static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
1056 {
1057 	DRM_DEBUG_KMS("Wait for panel power cycle\n");
1058 	ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1059 }
1060 
1061 
1062 /* Read the current pp_control value, unlocking the register if it
1063  * is locked
1064  */
1065 
1066 static  u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv)
1067 {
1068 	u32	control = I915_READ(PCH_PP_CONTROL);
1069 
1070 	control &= ~PANEL_UNLOCK_MASK;
1071 	control |= PANEL_UNLOCK_REGS;
1072 	return control;
1073 }
1074 
1075 void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
1076 {
1077 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1078 	struct drm_i915_private *dev_priv = dev->dev_private;
1079 	u32 pp;
1080 
1081 	if (!is_edp(intel_dp))
1082 		return;
1083 	DRM_DEBUG_KMS("Turn eDP VDD on\n");
1084 
1085 	WARN(intel_dp->want_panel_vdd,
1086 	     "eDP VDD already requested on\n");
1087 
1088 	intel_dp->want_panel_vdd = true;
1089 
1090 	if (ironlake_edp_have_panel_vdd(intel_dp)) {
1091 		DRM_DEBUG_KMS("eDP VDD already on\n");
1092 		return;
1093 	}
1094 
1095 	if (!ironlake_edp_have_panel_power(intel_dp))
1096 		ironlake_wait_panel_power_cycle(intel_dp);
1097 
1098 	pp = ironlake_get_pp_control(dev_priv);
1099 	pp |= EDP_FORCE_VDD;
1100 	I915_WRITE(PCH_PP_CONTROL, pp);
1101 	POSTING_READ(PCH_PP_CONTROL);
1102 	DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
1103 		      I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
1104 
1105 	/*
1106 	 * If the panel wasn't on, delay before accessing aux channel
1107 	 */
1108 	if (!ironlake_edp_have_panel_power(intel_dp)) {
1109 		DRM_DEBUG_KMS("eDP was not running\n");
1110 		msleep(intel_dp->panel_power_up_delay);
1111 	}
1112 }
1113 
1114 static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
1115 {
1116 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1117 	struct drm_i915_private *dev_priv = dev->dev_private;
1118 	u32 pp;
1119 
1120 	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1121 
1122 	if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
1123 		pp = ironlake_get_pp_control(dev_priv);
1124 		pp &= ~EDP_FORCE_VDD;
1125 		I915_WRITE(PCH_PP_CONTROL, pp);
1126 		POSTING_READ(PCH_PP_CONTROL);
1127 
1128 		/* Make sure sequencer is idle before allowing subsequent activity */
1129 		DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
1130 			      I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
1131 
1132 		msleep(intel_dp->panel_power_down_delay);
1133 	}
1134 }
1135 
1136 static void ironlake_panel_vdd_work(struct work_struct *__work)
1137 {
1138 	struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1139 						 struct intel_dp, panel_vdd_work);
1140 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1141 
1142 	mutex_lock(&dev->mode_config.mutex);
1143 	ironlake_panel_vdd_off_sync(intel_dp);
1144 	mutex_unlock(&dev->mode_config.mutex);
1145 }
1146 
1147 void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1148 {
1149 	if (!is_edp(intel_dp))
1150 		return;
1151 
1152 	DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
1153 	WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
1154 
1155 	intel_dp->want_panel_vdd = false;
1156 
1157 	if (sync) {
1158 		ironlake_panel_vdd_off_sync(intel_dp);
1159 	} else {
1160 		/*
1161 		 * Queue the timer to fire a long
1162 		 * time from now (relative to the power down delay)
1163 		 * to keep the panel power up across a sequence of operations
1164 		 */
1165 		schedule_delayed_work(&intel_dp->panel_vdd_work,
1166 				      msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
1167 	}
1168 }
1169 
1170 void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1171 {
1172 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1173 	struct drm_i915_private *dev_priv = dev->dev_private;
1174 	u32 pp;
1175 
1176 	if (!is_edp(intel_dp))
1177 		return;
1178 
1179 	DRM_DEBUG_KMS("Turn eDP power on\n");
1180 
1181 	if (ironlake_edp_have_panel_power(intel_dp)) {
1182 		DRM_DEBUG_KMS("eDP power already on\n");
1183 		return;
1184 	}
1185 
1186 	ironlake_wait_panel_power_cycle(intel_dp);
1187 
1188 	pp = ironlake_get_pp_control(dev_priv);
1189 	if (IS_GEN5(dev)) {
1190 		/* ILK workaround: disable reset around power sequence */
1191 		pp &= ~PANEL_POWER_RESET;
1192 		I915_WRITE(PCH_PP_CONTROL, pp);
1193 		POSTING_READ(PCH_PP_CONTROL);
1194 	}
1195 
1196 	pp |= POWER_TARGET_ON;
1197 	if (!IS_GEN5(dev))
1198 		pp |= PANEL_POWER_RESET;
1199 
1200 	I915_WRITE(PCH_PP_CONTROL, pp);
1201 	POSTING_READ(PCH_PP_CONTROL);
1202 
1203 	ironlake_wait_panel_on(intel_dp);
1204 
1205 	if (IS_GEN5(dev)) {
1206 		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1207 		I915_WRITE(PCH_PP_CONTROL, pp);
1208 		POSTING_READ(PCH_PP_CONTROL);
1209 	}
1210 }
1211 
1212 void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1213 {
1214 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1215 	struct drm_i915_private *dev_priv = dev->dev_private;
1216 	u32 pp;
1217 
1218 	if (!is_edp(intel_dp))
1219 		return;
1220 
1221 	DRM_DEBUG_KMS("Turn eDP power off\n");
1222 
1223 	WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
1224 
1225 	pp = ironlake_get_pp_control(dev_priv);
1226 	/* We need to switch off panel power _and_ force vdd, for otherwise some
1227 	 * panels get very unhappy and cease to work. */
1228 	pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
1229 	I915_WRITE(PCH_PP_CONTROL, pp);
1230 	POSTING_READ(PCH_PP_CONTROL);
1231 
1232 	intel_dp->want_panel_vdd = false;
1233 
1234 	ironlake_wait_panel_off(intel_dp);
1235 }
1236 
1237 void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1238 {
1239 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1240 	struct drm_device *dev = intel_dig_port->base.base.dev;
1241 	struct drm_i915_private *dev_priv = dev->dev_private;
1242 	int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
1243 	u32 pp;
1244 
1245 	if (!is_edp(intel_dp))
1246 		return;
1247 
1248 	DRM_DEBUG_KMS("\n");
1249 	/*
1250 	 * If we enable the backlight right away following a panel power
1251 	 * on, we may see slight flicker as the panel syncs with the eDP
1252 	 * link.  So delay a bit to make sure the image is solid before
1253 	 * allowing it to appear.
1254 	 */
1255 	msleep(intel_dp->backlight_on_delay);
1256 	pp = ironlake_get_pp_control(dev_priv);
1257 	pp |= EDP_BLC_ENABLE;
1258 	I915_WRITE(PCH_PP_CONTROL, pp);
1259 	POSTING_READ(PCH_PP_CONTROL);
1260 
1261 	intel_panel_enable_backlight(dev, pipe);
1262 }
1263 
1264 void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
1265 {
1266 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1267 	struct drm_i915_private *dev_priv = dev->dev_private;
1268 	u32 pp;
1269 
1270 	if (!is_edp(intel_dp))
1271 		return;
1272 
1273 	intel_panel_disable_backlight(dev);
1274 
1275 	DRM_DEBUG_KMS("\n");
1276 	pp = ironlake_get_pp_control(dev_priv);
1277 	pp &= ~EDP_BLC_ENABLE;
1278 	I915_WRITE(PCH_PP_CONTROL, pp);
1279 	POSTING_READ(PCH_PP_CONTROL);
1280 	msleep(intel_dp->backlight_off_delay);
1281 }
1282 
1283 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
1284 {
1285 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1286 	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1287 	struct drm_device *dev = crtc->dev;
1288 	struct drm_i915_private *dev_priv = dev->dev_private;
1289 	u32 dpa_ctl;
1290 
1291 	assert_pipe_disabled(dev_priv,
1292 			     to_intel_crtc(crtc)->pipe);
1293 
1294 	DRM_DEBUG_KMS("\n");
1295 	dpa_ctl = I915_READ(DP_A);
1296 	WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
1297 	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1298 
1299 	/* We don't adjust intel_dp->DP while tearing down the link, to
1300 	 * facilitate link retraining (e.g. after hotplug). Hence clear all
1301 	 * enable bits here to ensure that we don't enable too much. */
1302 	intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
1303 	intel_dp->DP |= DP_PLL_ENABLE;
1304 	I915_WRITE(DP_A, intel_dp->DP);
1305 	POSTING_READ(DP_A);
1306 	udelay(200);
1307 }
1308 
1309 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
1310 {
1311 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1312 	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1313 	struct drm_device *dev = crtc->dev;
1314 	struct drm_i915_private *dev_priv = dev->dev_private;
1315 	u32 dpa_ctl;
1316 
1317 	assert_pipe_disabled(dev_priv,
1318 			     to_intel_crtc(crtc)->pipe);
1319 
1320 	dpa_ctl = I915_READ(DP_A);
1321 	WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
1322 	     "dp pll off, should be on\n");
1323 	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1324 
1325 	/* We can't rely on the value tracked for the DP register in
1326 	 * intel_dp->DP because link_down must not change that (otherwise link
1327 	 * re-training will fail. */
1328 	dpa_ctl &= ~DP_PLL_ENABLE;
1329 	I915_WRITE(DP_A, dpa_ctl);
1330 	POSTING_READ(DP_A);
1331 	udelay(200);
1332 }
1333 
1334 /* If the sink supports it, try to set the power state appropriately */
1335 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
1336 {
1337 	int ret, i;
1338 
1339 	/* Should have a valid DPCD by this point */
1340 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
1341 		return;
1342 
1343 	if (mode != DRM_MODE_DPMS_ON) {
1344 		ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
1345 						  DP_SET_POWER_D3);
1346 		if (ret != 1)
1347 			DRM_DEBUG_DRIVER("failed to write sink power state\n");
1348 	} else {
1349 		/*
1350 		 * When turning on, we need to retry for 1ms to give the sink
1351 		 * time to wake up.
1352 		 */
1353 		for (i = 0; i < 3; i++) {
1354 			ret = intel_dp_aux_native_write_1(intel_dp,
1355 							  DP_SET_POWER,
1356 							  DP_SET_POWER_D0);
1357 			if (ret == 1)
1358 				break;
1359 			msleep(1);
1360 		}
1361 	}
1362 }
1363 
1364 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1365 				  enum i915_pipe *pipe)
1366 {
1367 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1368 	struct drm_device *dev = encoder->base.dev;
1369 	struct drm_i915_private *dev_priv = dev->dev_private;
1370 	u32 tmp = I915_READ(intel_dp->output_reg);
1371 
1372 	if (!(tmp & DP_PORT_EN))
1373 		return false;
1374 
1375 	if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
1376 		*pipe = PORT_TO_PIPE_CPT(tmp);
1377 	} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
1378 		*pipe = PORT_TO_PIPE(tmp);
1379 	} else {
1380 		u32 trans_sel;
1381 		u32 trans_dp;
1382 		int i;
1383 
1384 		switch (intel_dp->output_reg) {
1385 		case PCH_DP_B:
1386 			trans_sel = TRANS_DP_PORT_SEL_B;
1387 			break;
1388 		case PCH_DP_C:
1389 			trans_sel = TRANS_DP_PORT_SEL_C;
1390 			break;
1391 		case PCH_DP_D:
1392 			trans_sel = TRANS_DP_PORT_SEL_D;
1393 			break;
1394 		default:
1395 			return true;
1396 		}
1397 
1398 		for_each_pipe(i) {
1399 			trans_dp = I915_READ(TRANS_DP_CTL(i));
1400 			if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
1401 				*pipe = i;
1402 				return true;
1403 			}
1404 		}
1405 
1406 		DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
1407 			      intel_dp->output_reg);
1408 	}
1409 
1410 	return true;
1411 }
1412 
1413 static void intel_disable_dp(struct intel_encoder *encoder)
1414 {
1415 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1416 
1417 	/* Make sure the panel is off before trying to change the mode. But also
1418 	 * ensure that we have vdd while we switch off the panel. */
1419 	ironlake_edp_panel_vdd_on(intel_dp);
1420 	ironlake_edp_backlight_off(intel_dp);
1421 	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1422 	ironlake_edp_panel_off(intel_dp);
1423 
1424 	/* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
1425 	if (!is_cpu_edp(intel_dp))
1426 		intel_dp_link_down(intel_dp);
1427 }
1428 
1429 static void intel_post_disable_dp(struct intel_encoder *encoder)
1430 {
1431 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1432 
1433 	if (is_cpu_edp(intel_dp)) {
1434 		intel_dp_link_down(intel_dp);
1435 		ironlake_edp_pll_off(intel_dp);
1436 	}
1437 }
1438 
1439 static void intel_enable_dp(struct intel_encoder *encoder)
1440 {
1441 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1442 	struct drm_device *dev = encoder->base.dev;
1443 	struct drm_i915_private *dev_priv = dev->dev_private;
1444 	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
1445 
1446 	if (WARN_ON(dp_reg & DP_PORT_EN))
1447 		return;
1448 
1449 	ironlake_edp_panel_vdd_on(intel_dp);
1450 	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1451 	intel_dp_start_link_train(intel_dp);
1452 	ironlake_edp_panel_on(intel_dp);
1453 	ironlake_edp_panel_vdd_off(intel_dp, true);
1454 	intel_dp_complete_link_train(intel_dp);
1455 	ironlake_edp_backlight_on(intel_dp);
1456 }
1457 
1458 static void intel_pre_enable_dp(struct intel_encoder *encoder)
1459 {
1460 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1461 
1462 	if (is_cpu_edp(intel_dp))
1463 		ironlake_edp_pll_on(intel_dp);
1464 }
1465 
1466 /*
1467  * Native read with retry for link status and receiver capability reads for
1468  * cases where the sink may still be asleep.
1469  */
1470 static bool
1471 intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
1472 			       uint8_t *recv, int recv_bytes)
1473 {
1474 	int ret, i;
1475 
1476 	/*
1477 	 * Sinks are *supposed* to come up within 1ms from an off state,
1478 	 * but we're also supposed to retry 3 times per the spec.
1479 	 */
1480 	for (i = 0; i < 3; i++) {
1481 		ret = intel_dp_aux_native_read(intel_dp, address, recv,
1482 					       recv_bytes);
1483 		if (ret == recv_bytes)
1484 			return true;
1485 		msleep(1);
1486 	}
1487 
1488 	return false;
1489 }
1490 
1491 /*
1492  * Fetch AUX CH registers 0x202 - 0x207 which contain
1493  * link status information
1494  */
1495 static bool
1496 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1497 {
1498 	return intel_dp_aux_native_read_retry(intel_dp,
1499 					      DP_LANE0_1_STATUS,
1500 					      link_status,
1501 					      DP_LINK_STATUS_SIZE);
1502 }
1503 
1504 #if 0
1505 static char	*voltage_names[] = {
1506 	"0.4V", "0.6V", "0.8V", "1.2V"
1507 };
1508 static char	*pre_emph_names[] = {
1509 	"0dB", "3.5dB", "6dB", "9.5dB"
1510 };
1511 static char	*link_train_names[] = {
1512 	"pattern 1", "pattern 2", "idle", "off"
1513 };
1514 #endif
1515 
1516 /*
1517  * These are source-specific values; current Intel hardware supports
1518  * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
1519  */
1520 
1521 static uint8_t
1522 intel_dp_voltage_max(struct intel_dp *intel_dp)
1523 {
1524 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1525 
1526 	if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
1527 		return DP_TRAIN_VOLTAGE_SWING_800;
1528 	else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
1529 		return DP_TRAIN_VOLTAGE_SWING_1200;
1530 	else
1531 		return DP_TRAIN_VOLTAGE_SWING_800;
1532 }
1533 
1534 static uint8_t
1535 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
1536 {
1537 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1538 
1539 	if (IS_HASWELL(dev)) {
1540 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1541 		case DP_TRAIN_VOLTAGE_SWING_400:
1542 			return DP_TRAIN_PRE_EMPHASIS_9_5;
1543 		case DP_TRAIN_VOLTAGE_SWING_600:
1544 			return DP_TRAIN_PRE_EMPHASIS_6;
1545 		case DP_TRAIN_VOLTAGE_SWING_800:
1546 			return DP_TRAIN_PRE_EMPHASIS_3_5;
1547 		case DP_TRAIN_VOLTAGE_SWING_1200:
1548 		default:
1549 			return DP_TRAIN_PRE_EMPHASIS_0;
1550 		}
1551 	} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
1552 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1553 		case DP_TRAIN_VOLTAGE_SWING_400:
1554 			return DP_TRAIN_PRE_EMPHASIS_6;
1555 		case DP_TRAIN_VOLTAGE_SWING_600:
1556 		case DP_TRAIN_VOLTAGE_SWING_800:
1557 			return DP_TRAIN_PRE_EMPHASIS_3_5;
1558 		default:
1559 			return DP_TRAIN_PRE_EMPHASIS_0;
1560 		}
1561 	} else {
1562 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1563 		case DP_TRAIN_VOLTAGE_SWING_400:
1564 			return DP_TRAIN_PRE_EMPHASIS_6;
1565 		case DP_TRAIN_VOLTAGE_SWING_600:
1566 			return DP_TRAIN_PRE_EMPHASIS_6;
1567 		case DP_TRAIN_VOLTAGE_SWING_800:
1568 			return DP_TRAIN_PRE_EMPHASIS_3_5;
1569 		case DP_TRAIN_VOLTAGE_SWING_1200:
1570 		default:
1571 			return DP_TRAIN_PRE_EMPHASIS_0;
1572 		}
1573 	}
1574 }
1575 
1576 static void
1577 intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1578 {
1579 	uint8_t v = 0;
1580 	uint8_t p = 0;
1581 	int lane;
1582 	uint8_t voltage_max;
1583 	uint8_t preemph_max;
1584 
1585 	for (lane = 0; lane < intel_dp->lane_count; lane++) {
1586 		uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
1587 		uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
1588 
1589 		if (this_v > v)
1590 			v = this_v;
1591 		if (this_p > p)
1592 			p = this_p;
1593 	}
1594 
1595 	voltage_max = intel_dp_voltage_max(intel_dp);
1596 	if (v >= voltage_max)
1597 		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
1598 
1599 	preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
1600 	if (p >= preemph_max)
1601 		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
1602 
1603 	for (lane = 0; lane < 4; lane++)
1604 		intel_dp->train_set[lane] = v | p;
1605 }
1606 
1607 static uint32_t
1608 intel_gen4_signal_levels(uint8_t train_set)
1609 {
1610 	uint32_t	signal_levels = 0;
1611 
1612 	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1613 	case DP_TRAIN_VOLTAGE_SWING_400:
1614 	default:
1615 		signal_levels |= DP_VOLTAGE_0_4;
1616 		break;
1617 	case DP_TRAIN_VOLTAGE_SWING_600:
1618 		signal_levels |= DP_VOLTAGE_0_6;
1619 		break;
1620 	case DP_TRAIN_VOLTAGE_SWING_800:
1621 		signal_levels |= DP_VOLTAGE_0_8;
1622 		break;
1623 	case DP_TRAIN_VOLTAGE_SWING_1200:
1624 		signal_levels |= DP_VOLTAGE_1_2;
1625 		break;
1626 	}
1627 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
1628 	case DP_TRAIN_PRE_EMPHASIS_0:
1629 	default:
1630 		signal_levels |= DP_PRE_EMPHASIS_0;
1631 		break;
1632 	case DP_TRAIN_PRE_EMPHASIS_3_5:
1633 		signal_levels |= DP_PRE_EMPHASIS_3_5;
1634 		break;
1635 	case DP_TRAIN_PRE_EMPHASIS_6:
1636 		signal_levels |= DP_PRE_EMPHASIS_6;
1637 		break;
1638 	case DP_TRAIN_PRE_EMPHASIS_9_5:
1639 		signal_levels |= DP_PRE_EMPHASIS_9_5;
1640 		break;
1641 	}
1642 	return signal_levels;
1643 }
1644 
1645 /* Gen6's DP voltage swing and pre-emphasis control */
1646 static uint32_t
1647 intel_gen6_edp_signal_levels(uint8_t train_set)
1648 {
1649 	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1650 					 DP_TRAIN_PRE_EMPHASIS_MASK);
1651 	switch (signal_levels) {
1652 	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1653 	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1654 		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
1655 	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1656 		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
1657 	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1658 	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
1659 		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
1660 	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1661 	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1662 		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
1663 	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1664 	case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
1665 		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
1666 	default:
1667 		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1668 			      "0x%x\n", signal_levels);
1669 		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
1670 	}
1671 }
1672 
1673 /* Gen7's DP voltage swing and pre-emphasis control */
1674 static uint32_t
1675 intel_gen7_edp_signal_levels(uint8_t train_set)
1676 {
1677 	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1678 					 DP_TRAIN_PRE_EMPHASIS_MASK);
1679 	switch (signal_levels) {
1680 	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1681 		return EDP_LINK_TRAIN_400MV_0DB_IVB;
1682 	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1683 		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
1684 	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1685 		return EDP_LINK_TRAIN_400MV_6DB_IVB;
1686 
1687 	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1688 		return EDP_LINK_TRAIN_600MV_0DB_IVB;
1689 	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1690 		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
1691 
1692 	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1693 		return EDP_LINK_TRAIN_800MV_0DB_IVB;
1694 	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1695 		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
1696 
1697 	default:
1698 		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1699 			      "0x%x\n", signal_levels);
1700 		return EDP_LINK_TRAIN_500MV_0DB_IVB;
1701 	}
1702 }
1703 
1704 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
1705 static uint32_t
1706 intel_hsw_signal_levels(uint8_t train_set)
1707 {
1708 	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1709 					 DP_TRAIN_PRE_EMPHASIS_MASK);
1710 	switch (signal_levels) {
1711 	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1712 		return DDI_BUF_EMP_400MV_0DB_HSW;
1713 	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1714 		return DDI_BUF_EMP_400MV_3_5DB_HSW;
1715 	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1716 		return DDI_BUF_EMP_400MV_6DB_HSW;
1717 	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
1718 		return DDI_BUF_EMP_400MV_9_5DB_HSW;
1719 
1720 	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1721 		return DDI_BUF_EMP_600MV_0DB_HSW;
1722 	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1723 		return DDI_BUF_EMP_600MV_3_5DB_HSW;
1724 	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
1725 		return DDI_BUF_EMP_600MV_6DB_HSW;
1726 
1727 	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1728 		return DDI_BUF_EMP_800MV_0DB_HSW;
1729 	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1730 		return DDI_BUF_EMP_800MV_3_5DB_HSW;
1731 	default:
1732 		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1733 			      "0x%x\n", signal_levels);
1734 		return DDI_BUF_EMP_400MV_0DB_HSW;
1735 	}
1736 }
1737 
1738 /* Properly updates "DP" with the correct signal levels. */
1739 static void
1740 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
1741 {
1742 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1743 	struct drm_device *dev = intel_dig_port->base.base.dev;
1744 	uint32_t signal_levels, mask;
1745 	uint8_t train_set = intel_dp->train_set[0];
1746 
1747 	if (IS_HASWELL(dev)) {
1748 		signal_levels = intel_hsw_signal_levels(train_set);
1749 		mask = DDI_BUF_EMP_MASK;
1750 	} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
1751 		signal_levels = intel_gen7_edp_signal_levels(train_set);
1752 		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
1753 	} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1754 		signal_levels = intel_gen6_edp_signal_levels(train_set);
1755 		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
1756 	} else {
1757 		signal_levels = intel_gen4_signal_levels(train_set);
1758 		mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
1759 	}
1760 
1761 	DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
1762 
1763 	*DP = (*DP & ~mask) | signal_levels;
1764 }
1765 
1766 static bool
1767 intel_dp_set_link_train(struct intel_dp *intel_dp,
1768 			uint32_t dp_reg_value,
1769 			uint8_t dp_train_pat)
1770 {
1771 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1772 	struct drm_device *dev = intel_dig_port->base.base.dev;
1773 	struct drm_i915_private *dev_priv = dev->dev_private;
1774 	enum port port = intel_dig_port->port;
1775 	int ret;
1776 	uint32_t temp;
1777 
1778 	if (IS_HASWELL(dev)) {
1779 		temp = I915_READ(DP_TP_CTL(port));
1780 
1781 		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
1782 			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
1783 		else
1784 			temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
1785 
1786 		temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1787 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1788 		case DP_TRAINING_PATTERN_DISABLE:
1789 
1790 			if (port != PORT_A) {
1791 				temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
1792 				I915_WRITE(DP_TP_CTL(port), temp);
1793 
1794 				if (wait_for((I915_READ(DP_TP_STATUS(port)) &
1795 					      DP_TP_STATUS_IDLE_DONE), 1))
1796 					DRM_ERROR("Timed out waiting for DP idle patterns\n");
1797 
1798 				temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1799 			}
1800 
1801 			temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
1802 
1803 			break;
1804 		case DP_TRAINING_PATTERN_1:
1805 			temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
1806 			break;
1807 		case DP_TRAINING_PATTERN_2:
1808 			temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
1809 			break;
1810 		case DP_TRAINING_PATTERN_3:
1811 			temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
1812 			break;
1813 		}
1814 		I915_WRITE(DP_TP_CTL(port), temp);
1815 
1816 	} else if (HAS_PCH_CPT(dev) &&
1817 		   (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
1818 		dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
1819 
1820 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1821 		case DP_TRAINING_PATTERN_DISABLE:
1822 			dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
1823 			break;
1824 		case DP_TRAINING_PATTERN_1:
1825 			dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
1826 			break;
1827 		case DP_TRAINING_PATTERN_2:
1828 			dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
1829 			break;
1830 		case DP_TRAINING_PATTERN_3:
1831 			DRM_ERROR("DP training pattern 3 not supported\n");
1832 			dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
1833 			break;
1834 		}
1835 
1836 	} else {
1837 		dp_reg_value &= ~DP_LINK_TRAIN_MASK;
1838 
1839 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1840 		case DP_TRAINING_PATTERN_DISABLE:
1841 			dp_reg_value |= DP_LINK_TRAIN_OFF;
1842 			break;
1843 		case DP_TRAINING_PATTERN_1:
1844 			dp_reg_value |= DP_LINK_TRAIN_PAT_1;
1845 			break;
1846 		case DP_TRAINING_PATTERN_2:
1847 			dp_reg_value |= DP_LINK_TRAIN_PAT_2;
1848 			break;
1849 		case DP_TRAINING_PATTERN_3:
1850 			DRM_ERROR("DP training pattern 3 not supported\n");
1851 			dp_reg_value |= DP_LINK_TRAIN_PAT_2;
1852 			break;
1853 		}
1854 	}
1855 
1856 	I915_WRITE(intel_dp->output_reg, dp_reg_value);
1857 	POSTING_READ(intel_dp->output_reg);
1858 
1859 	intel_dp_aux_native_write_1(intel_dp,
1860 				    DP_TRAINING_PATTERN_SET,
1861 				    dp_train_pat);
1862 
1863 	if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
1864 	    DP_TRAINING_PATTERN_DISABLE) {
1865 		ret = intel_dp_aux_native_write(intel_dp,
1866 						DP_TRAINING_LANE0_SET,
1867 						intel_dp->train_set,
1868 						intel_dp->lane_count);
1869 		if (ret != intel_dp->lane_count)
1870 			return false;
1871 	}
1872 
1873 	return true;
1874 }
1875 
1876 /* Enable corresponding port and start training pattern 1 */
1877 void
1878 intel_dp_start_link_train(struct intel_dp *intel_dp)
1879 {
1880 	struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
1881 	struct drm_device *dev = encoder->dev;
1882 	int i;
1883 	uint8_t voltage;
1884 	bool clock_recovery = false;
1885 	int voltage_tries, loop_tries;
1886 	uint32_t DP = intel_dp->DP;
1887 
1888 	if (HAS_DDI(dev))
1889 		intel_ddi_prepare_link_retrain(encoder);
1890 
1891 	/* Write the link configuration data */
1892 	intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
1893 				  intel_dp->link_configuration,
1894 				  DP_LINK_CONFIGURATION_SIZE);
1895 
1896 	DP |= DP_PORT_EN;
1897 
1898 	memset(intel_dp->train_set, 0, 4);
1899 	voltage = 0xff;
1900 	voltage_tries = 0;
1901 	loop_tries = 0;
1902 	clock_recovery = false;
1903 	for (;;) {
1904 		/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1905 		uint8_t	    link_status[DP_LINK_STATUS_SIZE];
1906 
1907 		intel_dp_set_signal_levels(intel_dp, &DP);
1908 
1909 		/* Set training pattern 1 */
1910 		if (!intel_dp_set_link_train(intel_dp, DP,
1911 					     DP_TRAINING_PATTERN_1 |
1912 					     DP_LINK_SCRAMBLING_DISABLE))
1913 			break;
1914 
1915 		drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
1916 		if (!intel_dp_get_link_status(intel_dp, link_status)) {
1917 			DRM_ERROR("failed to get link status\n");
1918 			break;
1919 		}
1920 
1921 		if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
1922 			DRM_DEBUG_KMS("clock recovery OK\n");
1923 			clock_recovery = true;
1924 			break;
1925 		}
1926 
1927 		/* Check to see if we've tried the max voltage */
1928 		for (i = 0; i < intel_dp->lane_count; i++)
1929 			if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
1930 				break;
1931 		if (i == intel_dp->lane_count) {
1932 			++loop_tries;
1933 			if (loop_tries == 5) {
1934 				DRM_DEBUG_KMS("too many full retries, give up\n");
1935 				break;
1936 			}
1937 			memset(intel_dp->train_set, 0, 4);
1938 			voltage_tries = 0;
1939 			continue;
1940 		}
1941 
1942 		/* Check to see if we've tried the same voltage 5 times */
1943 		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
1944 			++voltage_tries;
1945 			if (voltage_tries == 5) {
1946 				DRM_DEBUG_KMS("too many voltage retries, give up\n");
1947 				break;
1948 			}
1949 		} else
1950 			voltage_tries = 0;
1951 		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
1952 
1953 		/* Compute new intel_dp->train_set as requested by target */
1954 		intel_get_adjust_train(intel_dp, link_status);
1955 	}
1956 
1957 	intel_dp->DP = DP;
1958 }
1959 
1960 void
1961 intel_dp_complete_link_train(struct intel_dp *intel_dp)
1962 {
1963 	bool channel_eq = false;
1964 	int tries, cr_tries;
1965 	uint32_t DP = intel_dp->DP;
1966 
1967 	/* channel equalization */
1968 	tries = 0;
1969 	cr_tries = 0;
1970 	channel_eq = false;
1971 	for (;;) {
1972 		uint8_t	    link_status[DP_LINK_STATUS_SIZE];
1973 
1974 		if (cr_tries > 5) {
1975 			DRM_ERROR("failed to train DP, aborting\n");
1976 			intel_dp_link_down(intel_dp);
1977 			break;
1978 		}
1979 
1980 		intel_dp_set_signal_levels(intel_dp, &DP);
1981 
1982 		/* channel eq pattern */
1983 		if (!intel_dp_set_link_train(intel_dp, DP,
1984 					     DP_TRAINING_PATTERN_2 |
1985 					     DP_LINK_SCRAMBLING_DISABLE))
1986 			break;
1987 
1988 		drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
1989 		if (!intel_dp_get_link_status(intel_dp, link_status))
1990 			break;
1991 
1992 		/* Make sure clock is still ok */
1993 		if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
1994 			intel_dp_start_link_train(intel_dp);
1995 			cr_tries++;
1996 			continue;
1997 		}
1998 
1999 		if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
2000 			channel_eq = true;
2001 			break;
2002 		}
2003 
2004 		/* Try 5 times, then try clock recovery if that fails */
2005 		if (tries > 5) {
2006 			intel_dp_link_down(intel_dp);
2007 			intel_dp_start_link_train(intel_dp);
2008 			tries = 0;
2009 			cr_tries++;
2010 			continue;
2011 		}
2012 
2013 		/* Compute new intel_dp->train_set as requested by target */
2014 		intel_get_adjust_train(intel_dp, link_status);
2015 		++tries;
2016 	}
2017 
2018 	if (channel_eq)
2019 		DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n");
2020 
2021 	intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
2022 }
2023 
2024 static void
2025 intel_dp_link_down(struct intel_dp *intel_dp)
2026 {
2027 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2028 	struct drm_device *dev = intel_dig_port->base.base.dev;
2029 	struct drm_i915_private *dev_priv = dev->dev_private;
2030 	struct intel_crtc *intel_crtc =
2031 		to_intel_crtc(intel_dig_port->base.base.crtc);
2032 	uint32_t DP = intel_dp->DP;
2033 
2034 	/*
2035 	 * DDI code has a strict mode set sequence and we should try to respect
2036 	 * it, otherwise we might hang the machine in many different ways. So we
2037 	 * really should be disabling the port only on a complete crtc_disable
2038 	 * sequence. This function is just called under two conditions on DDI
2039 	 * code:
2040 	 * - Link train failed while doing crtc_enable, and on this case we
2041 	 *   really should respect the mode set sequence and wait for a
2042 	 *   crtc_disable.
2043 	 * - Someone turned the monitor off and intel_dp_check_link_status
2044 	 *   called us. We don't need to disable the whole port on this case, so
2045 	 *   when someone turns the monitor on again,
2046 	 *   intel_ddi_prepare_link_retrain will take care of redoing the link
2047 	 *   train.
2048 	 */
2049 	if (HAS_DDI(dev))
2050 		return;
2051 
2052 	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
2053 		return;
2054 
2055 	DRM_DEBUG_KMS("\n");
2056 
2057 	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
2058 		DP &= ~DP_LINK_TRAIN_MASK_CPT;
2059 		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
2060 	} else {
2061 		DP &= ~DP_LINK_TRAIN_MASK;
2062 		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
2063 	}
2064 	POSTING_READ(intel_dp->output_reg);
2065 
2066 	/* We don't really know why we're doing this */
2067 	intel_wait_for_vblank(dev, intel_crtc->pipe);
2068 
2069 	if (HAS_PCH_IBX(dev) &&
2070 	    I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
2071 		struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2072 
2073 		/* Hardware workaround: leaving our transcoder select
2074 		 * set to transcoder B while it's off will prevent the
2075 		 * corresponding HDMI output on transcoder A.
2076 		 *
2077 		 * Combine this with another hardware workaround:
2078 		 * transcoder select bit can only be cleared while the
2079 		 * port is enabled.
2080 		 */
2081 		DP &= ~DP_PIPEB_SELECT;
2082 		I915_WRITE(intel_dp->output_reg, DP);
2083 
2084 		/* Changes to enable or select take place the vblank
2085 		 * after being written.
2086 		 */
2087 		if (WARN_ON(crtc == NULL)) {
2088 			/* We should never try to disable a port without a crtc
2089 			 * attached. For paranoia keep the code around for a
2090 			 * bit. */
2091 			POSTING_READ(intel_dp->output_reg);
2092 			msleep(50);
2093 		} else
2094 			intel_wait_for_vblank(dev, intel_crtc->pipe);
2095 	}
2096 
2097 	DP &= ~DP_AUDIO_OUTPUT_ENABLE;
2098 	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
2099 	POSTING_READ(intel_dp->output_reg);
2100 	msleep(intel_dp->panel_power_down_delay);
2101 }
2102 
2103 static bool
2104 intel_dp_get_dpcd(struct intel_dp *intel_dp)
2105 {
2106 	char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
2107 
2108 	if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
2109 					   sizeof(intel_dp->dpcd)) == 0)
2110 		return false; /* aux transfer failed */
2111 
2112 	ksnprintf(dpcd_hex_dump,
2113 	          sizeof(dpcd_hex_dump),
2114 		  "%02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n",
2115 		  intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2],
2116 		  intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5],
2117 		  intel_dp->dpcd[6], intel_dp->dpcd[7]);
2118 	DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
2119 
2120 	if (intel_dp->dpcd[DP_DPCD_REV] == 0)
2121 		return false; /* DPCD not present */
2122 
2123 	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2124 	      DP_DWN_STRM_PORT_PRESENT))
2125 		return true; /* native DP sink */
2126 
2127 	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
2128 		return true; /* no per-port downstream info */
2129 
2130 	if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0,
2131 					   intel_dp->downstream_ports,
2132 					   DP_MAX_DOWNSTREAM_PORTS) == 0)
2133 		return false; /* downstream port status fetch failed */
2134 
2135 	return true;
2136 }
2137 
2138 static void
2139 intel_dp_probe_oui(struct intel_dp *intel_dp)
2140 {
2141 	u8 buf[3];
2142 
2143 	if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
2144 		return;
2145 
2146 	ironlake_edp_panel_vdd_on(intel_dp);
2147 
2148 	if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
2149 		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
2150 			      buf[0], buf[1], buf[2]);
2151 
2152 	if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
2153 		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
2154 			      buf[0], buf[1], buf[2]);
2155 
2156 	ironlake_edp_panel_vdd_off(intel_dp, false);
2157 }
2158 
2159 static bool
2160 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
2161 {
2162 	int ret;
2163 
2164 	ret = intel_dp_aux_native_read_retry(intel_dp,
2165 					     DP_DEVICE_SERVICE_IRQ_VECTOR,
2166 					     sink_irq_vector, 1);
2167 	if (!ret)
2168 		return false;
2169 
2170 	return true;
2171 }
2172 
2173 static void
2174 intel_dp_handle_test_request(struct intel_dp *intel_dp)
2175 {
2176 	/* NAK by default */
2177 	intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK);
2178 }
2179 
2180 /*
2181  * According to DP spec
2182  * 5.1.2:
2183  *  1. Read DPCD
2184  *  2. Configure link according to Receiver Capabilities
2185  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
2186  *  4. Check link status on receipt of hot-plug interrupt
2187  */
2188 
2189 void
2190 intel_dp_check_link_status(struct intel_dp *intel_dp)
2191 {
2192 	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
2193 	u8 sink_irq_vector;
2194 	u8 link_status[DP_LINK_STATUS_SIZE];
2195 
2196 	if (!intel_encoder->connectors_active)
2197 		return;
2198 
2199 	if (WARN_ON(!intel_encoder->base.crtc))
2200 		return;
2201 
2202 	/* Try to read receiver status if the link appears to be up */
2203 	if (!intel_dp_get_link_status(intel_dp, link_status)) {
2204 		intel_dp_link_down(intel_dp);
2205 		return;
2206 	}
2207 
2208 	/* Now read the DPCD to see if it's actually running */
2209 	if (!intel_dp_get_dpcd(intel_dp)) {
2210 		intel_dp_link_down(intel_dp);
2211 		return;
2212 	}
2213 
2214 	/* Try to read the source of the interrupt */
2215 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
2216 	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
2217 		/* Clear interrupt source */
2218 		intel_dp_aux_native_write_1(intel_dp,
2219 					    DP_DEVICE_SERVICE_IRQ_VECTOR,
2220 					    sink_irq_vector);
2221 
2222 		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
2223 			intel_dp_handle_test_request(intel_dp);
2224 		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
2225 			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
2226 	}
2227 
2228 	if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
2229 		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
2230 			      drm_get_encoder_name(&intel_encoder->base));
2231 		intel_dp_start_link_train(intel_dp);
2232 		intel_dp_complete_link_train(intel_dp);
2233 	}
2234 }
2235 
2236 /* XXX this is probably wrong for multiple downstream ports */
2237 static enum drm_connector_status
2238 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
2239 {
2240 	uint8_t *dpcd = intel_dp->dpcd;
2241 	bool hpd;
2242 	uint8_t type;
2243 
2244 	if (!intel_dp_get_dpcd(intel_dp))
2245 		return connector_status_disconnected;
2246 
2247 	/* if there's no downstream port, we're done */
2248 	if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
2249 		return connector_status_connected;
2250 
2251 	/* If we're HPD-aware, SINK_COUNT changes dynamically */
2252 	hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD);
2253 	if (hpd) {
2254 		uint8_t reg;
2255 		if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
2256 						    &reg, 1))
2257 			return connector_status_unknown;
2258 		return DP_GET_SINK_COUNT(reg) ? connector_status_connected
2259 					      : connector_status_disconnected;
2260 	}
2261 
2262 	/* If no HPD, poke DDC gently */
2263 	if (drm_probe_ddc(intel_dp->adapter))
2264 		return connector_status_connected;
2265 
2266 	/* Well we tried, say unknown for unreliable port types */
2267 	type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
2268 	if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID)
2269 		return connector_status_unknown;
2270 
2271 	/* Anything else is out of spec, warn and ignore */
2272 	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
2273 	return connector_status_disconnected;
2274 }
2275 
2276 static enum drm_connector_status
2277 ironlake_dp_detect(struct intel_dp *intel_dp)
2278 {
2279 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2280 	struct drm_i915_private *dev_priv = dev->dev_private;
2281 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2282 	enum drm_connector_status status;
2283 
2284 	/* Can't disconnect eDP, but you can close the lid... */
2285 	if (is_edp(intel_dp)) {
2286 		status = intel_panel_detect(dev);
2287 		if (status == connector_status_unknown)
2288 			status = connector_status_connected;
2289 		return status;
2290 	}
2291 
2292 	if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
2293 		return connector_status_disconnected;
2294 
2295 	return intel_dp_detect_dpcd(intel_dp);
2296 }
2297 
2298 static enum drm_connector_status
2299 g4x_dp_detect(struct intel_dp *intel_dp)
2300 {
2301 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2302 	struct drm_i915_private *dev_priv = dev->dev_private;
2303 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2304 	uint32_t bit;
2305 
2306 	switch (intel_dig_port->port) {
2307 	case PORT_B:
2308 		bit = PORTB_HOTPLUG_LIVE_STATUS;
2309 		break;
2310 	case PORT_C:
2311 		bit = PORTC_HOTPLUG_LIVE_STATUS;
2312 		break;
2313 	case PORT_D:
2314 		bit = PORTD_HOTPLUG_LIVE_STATUS;
2315 		break;
2316 	default:
2317 		return connector_status_unknown;
2318 	}
2319 
2320 	if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
2321 		return connector_status_disconnected;
2322 
2323 	return intel_dp_detect_dpcd(intel_dp);
2324 }
2325 
2326 static struct edid *
2327 intel_dp_get_edid(struct drm_connector *connector, struct device *adapter)
2328 {
2329 	struct intel_connector *intel_connector = to_intel_connector(connector);
2330 
2331 	/* use cached edid if we have one */
2332 	if (intel_connector->edid) {
2333 		struct edid *edid;
2334 		int size;
2335 
2336 		/* invalid edid */
2337 		if (IS_ERR(intel_connector->edid))
2338 			return NULL;
2339 
2340 		size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
2341 		edid = kmalloc(size, M_DRM, M_WAITOK);
2342 		if (!edid)
2343 			return NULL;
2344 
2345 		memcpy(edid, intel_connector->edid, size);
2346 		return edid;
2347 	}
2348 
2349 	return drm_get_edid(connector, adapter);
2350 }
2351 
2352 static int
2353 intel_dp_get_edid_modes(struct drm_connector *connector, struct device *adapter)
2354 {
2355 	struct intel_connector *intel_connector = to_intel_connector(connector);
2356 
2357 	/* use cached edid if we have one */
2358 	if (intel_connector->edid) {
2359 		/* invalid edid */
2360 		if (IS_ERR(intel_connector->edid))
2361 			return 0;
2362 
2363 		return intel_connector_update_modes(connector,
2364 						    intel_connector->edid);
2365 	}
2366 
2367 	return intel_ddc_get_modes(connector, adapter);
2368 }
2369 
2370 static enum drm_connector_status
2371 intel_dp_detect(struct drm_connector *connector, bool force)
2372 {
2373 	struct intel_dp *intel_dp = intel_attached_dp(connector);
2374 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2375 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
2376 	struct drm_device *dev = connector->dev;
2377 	enum drm_connector_status status;
2378 	struct edid *edid = NULL;
2379 
2380 	intel_dp->has_audio = false;
2381 
2382 	if (HAS_PCH_SPLIT(dev))
2383 		status = ironlake_dp_detect(intel_dp);
2384 	else
2385 		status = g4x_dp_detect(intel_dp);
2386 
2387 	if (status != connector_status_connected)
2388 		return status;
2389 
2390 	intel_dp_probe_oui(intel_dp);
2391 
2392 	if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
2393 		intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
2394 	} else {
2395 		edid = intel_dp_get_edid(connector, intel_dp->adapter);
2396 		if (edid) {
2397 			intel_dp->has_audio = drm_detect_monitor_audio(edid);
2398 			kfree(edid, M_DRM);
2399 		}
2400 	}
2401 
2402 	if (intel_encoder->type != INTEL_OUTPUT_EDP)
2403 		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
2404 	return connector_status_connected;
2405 }
2406 
2407 static int intel_dp_get_modes(struct drm_connector *connector)
2408 {
2409 	struct intel_dp *intel_dp = intel_attached_dp(connector);
2410 	struct intel_connector *intel_connector = to_intel_connector(connector);
2411 	struct drm_device *dev = connector->dev;
2412 	int ret;
2413 
2414 	/* We should parse the EDID data and find out if it has an audio sink
2415 	 */
2416 
2417 	ret = intel_dp_get_edid_modes(connector, intel_dp->adapter);
2418 	if (ret)
2419 		return ret;
2420 
2421 	/* if eDP has no EDID, fall back to fixed mode */
2422 	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
2423 		struct drm_display_mode *mode;
2424 		mode = drm_mode_duplicate(dev,
2425 					  intel_connector->panel.fixed_mode);
2426 		if (mode) {
2427 			drm_mode_probed_add(connector, mode);
2428 			return 1;
2429 		}
2430 	}
2431 	return 0;
2432 }
2433 
2434 static bool
2435 intel_dp_detect_audio(struct drm_connector *connector)
2436 {
2437 	struct intel_dp *intel_dp = intel_attached_dp(connector);
2438 	struct edid *edid;
2439 	bool has_audio = false;
2440 
2441 	edid = intel_dp_get_edid(connector, intel_dp->adapter);
2442 	if (edid) {
2443 		has_audio = drm_detect_monitor_audio(edid);
2444 		kfree(edid, M_DRM);
2445 	}
2446 
2447 	return has_audio;
2448 }
2449 
2450 static int
2451 intel_dp_set_property(struct drm_connector *connector,
2452 		      struct drm_property *property,
2453 		      uint64_t val)
2454 {
2455 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
2456 	struct intel_connector *intel_connector = to_intel_connector(connector);
2457 	struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
2458 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2459 	int ret;
2460 
2461 	ret = drm_object_property_set_value(&connector->base, property, val);
2462 	if (ret)
2463 		return ret;
2464 
2465 	if (property == dev_priv->force_audio_property) {
2466 		int i = val;
2467 		bool has_audio;
2468 
2469 		if (i == intel_dp->force_audio)
2470 			return 0;
2471 
2472 		intel_dp->force_audio = i;
2473 
2474 		if (i == HDMI_AUDIO_AUTO)
2475 			has_audio = intel_dp_detect_audio(connector);
2476 		else
2477 			has_audio = (i == HDMI_AUDIO_ON);
2478 
2479 		if (has_audio == intel_dp->has_audio)
2480 			return 0;
2481 
2482 		intel_dp->has_audio = has_audio;
2483 		goto done;
2484 	}
2485 
2486 	if (property == dev_priv->broadcast_rgb_property) {
2487 		switch (val) {
2488 		case INTEL_BROADCAST_RGB_AUTO:
2489 			intel_dp->color_range_auto = true;
2490 			break;
2491 		case INTEL_BROADCAST_RGB_FULL:
2492 			intel_dp->color_range_auto = false;
2493 			intel_dp->color_range = 0;
2494 			break;
2495 		case INTEL_BROADCAST_RGB_LIMITED:
2496 			intel_dp->color_range_auto = false;
2497 			intel_dp->color_range = DP_COLOR_RANGE_16_235;
2498 			break;
2499 		default:
2500 			return -EINVAL;
2501 		}
2502 		goto done;
2503 	}
2504 
2505 	if (is_edp(intel_dp) &&
2506 	    property == connector->dev->mode_config.scaling_mode_property) {
2507 		if (val == DRM_MODE_SCALE_NONE) {
2508 			DRM_DEBUG_KMS("no scaling not supported\n");
2509 			return -EINVAL;
2510 		}
2511 
2512 		if (intel_connector->panel.fitting_mode == val) {
2513 			/* the eDP scaling property is not changed */
2514 			return 0;
2515 		}
2516 		intel_connector->panel.fitting_mode = val;
2517 
2518 		goto done;
2519 	}
2520 
2521 	return -EINVAL;
2522 
2523 done:
2524 	if (intel_encoder->base.crtc)
2525 		intel_crtc_restore_mode(intel_encoder->base.crtc);
2526 
2527 	return 0;
2528 }
2529 
2530 static void
2531 intel_dp_destroy(struct drm_connector *connector)
2532 {
2533 	struct intel_dp *intel_dp = intel_attached_dp(connector);
2534 	struct intel_connector *intel_connector = to_intel_connector(connector);
2535 
2536 	if (!IS_ERR_OR_NULL(intel_connector->edid))
2537 		kfree(intel_connector->edid, M_DRM);
2538 
2539 	if (is_edp(intel_dp))
2540 		intel_panel_fini(&intel_connector->panel);
2541 
2542 #if 0
2543 	drm_sysfs_connector_remove(connector);
2544 #endif
2545 	drm_connector_cleanup(connector);
2546 	kfree(connector, M_DRM);
2547 }
2548 
2549 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
2550 {
2551 	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
2552 	struct intel_dp *intel_dp = &intel_dig_port->dp;
2553 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2554 
2555 	if (intel_dp->dp_iic_bus != NULL) {
2556 		if (intel_dp->adapter != NULL) {
2557 			device_delete_child(intel_dp->dp_iic_bus,
2558 			    intel_dp->adapter);
2559 		}
2560 		device_delete_child(dev->dev, intel_dp->dp_iic_bus);
2561 	}
2562 	drm_encoder_cleanup(encoder);
2563 	if (is_edp(intel_dp)) {
2564 		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
2565 		mutex_lock(&dev->mode_config.mutex);
2566 		ironlake_panel_vdd_off_sync(intel_dp);
2567 		mutex_unlock(&dev->mode_config.mutex);
2568 	}
2569 	kfree(intel_dig_port, M_DRM);
2570 }
2571 
2572 static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
2573 	.mode_fixup = intel_dp_mode_fixup,
2574 	.mode_set = intel_dp_mode_set,
2575 	.disable = intel_encoder_noop,
2576 };
2577 
2578 static const struct drm_connector_funcs intel_dp_connector_funcs = {
2579 	.dpms = intel_connector_dpms,
2580 	.detect = intel_dp_detect,
2581 	.fill_modes = drm_helper_probe_single_connector_modes,
2582 	.set_property = intel_dp_set_property,
2583 	.destroy = intel_dp_destroy,
2584 };
2585 
2586 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
2587 	.get_modes = intel_dp_get_modes,
2588 	.mode_valid = intel_dp_mode_valid,
2589 	.best_encoder = intel_best_encoder,
2590 };
2591 
2592 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
2593 	.destroy = intel_dp_encoder_destroy,
2594 };
2595 
2596 static void
2597 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
2598 {
2599 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2600 
2601 	intel_dp_check_link_status(intel_dp);
2602 }
2603 
2604 /* Return which DP Port should be selected for Transcoder DP control */
2605 int
2606 intel_trans_dp_port_sel(struct drm_crtc *crtc)
2607 {
2608 	struct drm_device *dev = crtc->dev;
2609 	struct intel_encoder *intel_encoder;
2610 	struct intel_dp *intel_dp;
2611 
2612 	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
2613 		intel_dp = enc_to_intel_dp(&intel_encoder->base);
2614 
2615 		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
2616 		    intel_encoder->type == INTEL_OUTPUT_EDP)
2617 			return intel_dp->output_reg;
2618 	}
2619 
2620 	return -1;
2621 }
2622 
2623 /* check the VBT to see whether the eDP is on DP-D port */
2624 bool intel_dpd_is_edp(struct drm_device *dev)
2625 {
2626 	struct drm_i915_private *dev_priv = dev->dev_private;
2627 	struct child_device_config *p_child;
2628 	int i;
2629 
2630 	if (!dev_priv->child_dev_num)
2631 		return false;
2632 
2633 	for (i = 0; i < dev_priv->child_dev_num; i++) {
2634 		p_child = dev_priv->child_dev + i;
2635 
2636 		if (p_child->dvo_port == PORT_IDPD &&
2637 		    p_child->device_type == DEVICE_TYPE_eDP)
2638 			return true;
2639 	}
2640 	return false;
2641 }
2642 
2643 static void
2644 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
2645 {
2646 	struct intel_connector *intel_connector = to_intel_connector(connector);
2647 
2648 	intel_attach_force_audio_property(connector);
2649 	intel_attach_broadcast_rgb_property(connector);
2650 	intel_dp->color_range_auto = true;
2651 
2652 	if (is_edp(intel_dp)) {
2653 		drm_mode_create_scaling_mode_property(connector->dev);
2654 		drm_object_attach_property(
2655 			&connector->base,
2656 			connector->dev->mode_config.scaling_mode_property,
2657 			DRM_MODE_SCALE_ASPECT);
2658 		intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
2659 	}
2660 }
2661 
2662 static void
2663 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
2664 				    struct intel_dp *intel_dp,
2665 				    struct edp_power_seq *out)
2666 {
2667 	struct drm_i915_private *dev_priv = dev->dev_private;
2668 	struct edp_power_seq cur, vbt, spec, final;
2669 	u32 pp_on, pp_off, pp_div, pp;
2670 
2671 	/* Workaround: Need to write PP_CONTROL with the unlock key as
2672 	 * the very first thing. */
2673 	pp = ironlake_get_pp_control(dev_priv);
2674 	I915_WRITE(PCH_PP_CONTROL, pp);
2675 
2676 	pp_on = I915_READ(PCH_PP_ON_DELAYS);
2677 	pp_off = I915_READ(PCH_PP_OFF_DELAYS);
2678 	pp_div = I915_READ(PCH_PP_DIVISOR);
2679 
2680 	/* Pull timing values out of registers */
2681 	cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
2682 		PANEL_POWER_UP_DELAY_SHIFT;
2683 
2684 	cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
2685 		PANEL_LIGHT_ON_DELAY_SHIFT;
2686 
2687 	cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
2688 		PANEL_LIGHT_OFF_DELAY_SHIFT;
2689 
2690 	cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
2691 		PANEL_POWER_DOWN_DELAY_SHIFT;
2692 
2693 	cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
2694 		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
2695 
2696 	DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2697 		      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
2698 
2699 	vbt = dev_priv->edp.pps;
2700 
2701 	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
2702 	 * our hw here, which are all in 100usec. */
2703 	spec.t1_t3 = 210 * 10;
2704 	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
2705 	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
2706 	spec.t10 = 500 * 10;
2707 	/* This one is special and actually in units of 100ms, but zero
2708 	 * based in the hw (so we need to add 100 ms). But the sw vbt
2709 	 * table multiplies it with 1000 to make it in units of 100usec,
2710 	 * too. */
2711 	spec.t11_t12 = (510 + 100) * 10;
2712 
2713 	DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2714 		      vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
2715 
2716 	/* Use the max of the register settings and vbt. If both are
2717 	 * unset, fall back to the spec limits. */
2718 #define assign_final(field)	final.field = (max(cur.field, vbt.field) == 0 ? \
2719 				       spec.field : \
2720 				       max(cur.field, vbt.field))
2721 	assign_final(t1_t3);
2722 	assign_final(t8);
2723 	assign_final(t9);
2724 	assign_final(t10);
2725 	assign_final(t11_t12);
2726 #undef assign_final
2727 
2728 #define get_delay(field)	(DIV_ROUND_UP(final.field, 10))
2729 	intel_dp->panel_power_up_delay = get_delay(t1_t3);
2730 	intel_dp->backlight_on_delay = get_delay(t8);
2731 	intel_dp->backlight_off_delay = get_delay(t9);
2732 	intel_dp->panel_power_down_delay = get_delay(t10);
2733 	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
2734 #undef get_delay
2735 
2736 	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
2737 		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
2738 		      intel_dp->panel_power_cycle_delay);
2739 
2740 	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
2741 		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
2742 
2743 	if (out)
2744 		*out = final;
2745 }
2746 
2747 static void
2748 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
2749 					      struct intel_dp *intel_dp,
2750 					      struct edp_power_seq *seq)
2751 {
2752 	struct drm_i915_private *dev_priv = dev->dev_private;
2753 	u32 pp_on, pp_off, pp_div;
2754 
2755 	/* And finally store the new values in the power sequencer. */
2756 	pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
2757 		(seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
2758 	pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
2759 		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
2760 	/* Compute the divisor for the pp clock, simply match the Bspec
2761 	 * formula. */
2762 	pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1)
2763 			<< PP_REFERENCE_DIVIDER_SHIFT;
2764 	pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
2765 			<< PANEL_POWER_CYCLE_DELAY_SHIFT);
2766 
2767 	/* Haswell doesn't have any port selection bits for the panel
2768 	 * power sequencer any more. */
2769 	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
2770 		if (is_cpu_edp(intel_dp))
2771 			pp_on |= PANEL_POWER_PORT_DP_A;
2772 		else
2773 			pp_on |= PANEL_POWER_PORT_DP_D;
2774 	}
2775 
2776 	I915_WRITE(PCH_PP_ON_DELAYS, pp_on);
2777 	I915_WRITE(PCH_PP_OFF_DELAYS, pp_off);
2778 	I915_WRITE(PCH_PP_DIVISOR, pp_div);
2779 
2780 	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
2781 		      I915_READ(PCH_PP_ON_DELAYS),
2782 		      I915_READ(PCH_PP_OFF_DELAYS),
2783 		      I915_READ(PCH_PP_DIVISOR));
2784 }
2785 
2786 void
2787 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
2788 			struct intel_connector *intel_connector)
2789 {
2790 	struct drm_connector *connector = &intel_connector->base;
2791 	struct intel_dp *intel_dp = &intel_dig_port->dp;
2792 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
2793 	struct drm_device *dev = intel_encoder->base.dev;
2794 	struct drm_i915_private *dev_priv = dev->dev_private;
2795 	struct drm_display_mode *fixed_mode = NULL;
2796 	struct edp_power_seq power_seq = { 0 };
2797 	enum port port = intel_dig_port->port;
2798 	const char *name = NULL;
2799 	int type;
2800 
2801 	/* Preserve the current hw state. */
2802 	intel_dp->DP = I915_READ(intel_dp->output_reg);
2803 	intel_dp->attached_connector = intel_connector;
2804 
2805 	if (HAS_PCH_SPLIT(dev) && port == PORT_D)
2806 		if (intel_dpd_is_edp(dev))
2807 			intel_dp->is_pch_edp = true;
2808 
2809 	/*
2810 	 * FIXME : We need to initialize built-in panels before external panels.
2811 	 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
2812 	 */
2813 	if (IS_VALLEYVIEW(dev) && port == PORT_C) {
2814 		type = DRM_MODE_CONNECTOR_eDP;
2815 		intel_encoder->type = INTEL_OUTPUT_EDP;
2816 	} else if (port == PORT_A || is_pch_edp(intel_dp)) {
2817 		type = DRM_MODE_CONNECTOR_eDP;
2818 		intel_encoder->type = INTEL_OUTPUT_EDP;
2819 	} else {
2820 		/* The intel_encoder->type value may be INTEL_OUTPUT_UNKNOWN for
2821 		 * DDI or INTEL_OUTPUT_DISPLAYPORT for the older gens, so don't
2822 		 * rewrite it.
2823 		 */
2824 		type = DRM_MODE_CONNECTOR_DisplayPort;
2825 	}
2826 
2827 	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
2828 	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
2829 
2830 	connector->polled = DRM_CONNECTOR_POLL_HPD;
2831 	connector->interlace_allowed = true;
2832 	connector->doublescan_allowed = 0;
2833 
2834 	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
2835 			  ironlake_panel_vdd_work);
2836 
2837 	intel_connector_attach_encoder(intel_connector, intel_encoder);
2838 #if 0
2839 	drm_sysfs_connector_add(connector);
2840 #endif
2841 
2842 	if (HAS_DDI(dev))
2843 		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
2844 	else
2845 		intel_connector->get_hw_state = intel_connector_get_hw_state;
2846 
2847 
2848 	/* Set up the DDC bus. */
2849 	switch (port) {
2850 	case PORT_A:
2851 		name = "DPDDC-A";
2852 		break;
2853 	case PORT_B:
2854 		dev_priv->hotplug_supported_mask |= PORTB_HOTPLUG_INT_STATUS;
2855 		name = "DPDDC-B";
2856 		break;
2857 	case PORT_C:
2858 		dev_priv->hotplug_supported_mask |= PORTC_HOTPLUG_INT_STATUS;
2859 		name = "DPDDC-C";
2860 		break;
2861 	case PORT_D:
2862 		dev_priv->hotplug_supported_mask |= PORTD_HOTPLUG_INT_STATUS;
2863 		name = "DPDDC-D";
2864 		break;
2865 	default:
2866 		WARN(1, "Invalid port %c\n", port_name(port));
2867 		break;
2868 	}
2869 
2870 	if (is_edp(intel_dp))
2871 		intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
2872 
2873 	intel_dp_i2c_init(intel_dp, intel_connector, name);
2874 
2875 	/* Cache DPCD and EDID for edp. */
2876 	if (is_edp(intel_dp)) {
2877 		bool ret;
2878 		struct drm_display_mode *scan;
2879 		struct edid *edid;
2880 
2881 		ironlake_edp_panel_vdd_on(intel_dp);
2882 		ret = intel_dp_get_dpcd(intel_dp);
2883 		ironlake_edp_panel_vdd_off(intel_dp, false);
2884 
2885 		if (ret) {
2886 			if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
2887 				dev_priv->no_aux_handshake =
2888 					intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
2889 					DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
2890 		} else {
2891 			/* if this fails, presume the device is a ghost */
2892 			DRM_INFO("failed to retrieve link info, disabling eDP\n");
2893 			intel_dp_encoder_destroy(&intel_encoder->base);
2894 			intel_dp_destroy(connector);
2895 			return;
2896 		}
2897 
2898 		/* We now know it's not a ghost, init power sequence regs. */
2899 		intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
2900 							      &power_seq);
2901 
2902 		ironlake_edp_panel_vdd_on(intel_dp);
2903 		edid = drm_get_edid(connector, intel_dp->adapter);
2904 		if (edid) {
2905 			if (drm_add_edid_modes(connector, edid)) {
2906 				drm_mode_connector_update_edid_property(connector, edid);
2907 				drm_edid_to_eld(connector, edid);
2908 			} else {
2909 				kfree(edid, M_DRM);
2910 				edid = ERR_PTR(-EINVAL);
2911 			}
2912 		} else {
2913 			edid = ERR_PTR(-ENOENT);
2914 		}
2915 		intel_connector->edid = edid;
2916 
2917 		/* prefer fixed mode from EDID if available */
2918 		list_for_each_entry(scan, &connector->probed_modes, head) {
2919 			if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
2920 				fixed_mode = drm_mode_duplicate(dev, scan);
2921 				break;
2922 			}
2923 		}
2924 
2925 		/* fallback to VBT if available for eDP */
2926 		if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) {
2927 			fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
2928 			if (fixed_mode)
2929 				fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
2930 		}
2931 
2932 		ironlake_edp_panel_vdd_off(intel_dp, false);
2933 	}
2934 
2935 	if (is_edp(intel_dp)) {
2936 		intel_panel_init(&intel_connector->panel, fixed_mode);
2937 		intel_panel_setup_backlight(connector);
2938 	}
2939 
2940 	intel_dp_add_properties(intel_dp, connector);
2941 
2942 	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
2943 	 * 0xd.  Failure to do so will result in spurious interrupts being
2944 	 * generated on the port when a cable is not attached.
2945 	 */
2946 	if (IS_G4X(dev) && !IS_GM45(dev)) {
2947 		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
2948 		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
2949 	}
2950 }
2951 
2952 void
2953 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
2954 {
2955 	struct intel_digital_port *intel_dig_port;
2956 	struct intel_encoder *intel_encoder;
2957 	struct drm_encoder *encoder;
2958 	struct intel_connector *intel_connector;
2959 
2960 	intel_dig_port = kmalloc(sizeof(struct intel_digital_port), M_DRM,
2961 	    M_WAITOK | M_ZERO);
2962 	if (!intel_dig_port)
2963 		return;
2964 
2965 	intel_connector = kmalloc(sizeof(struct intel_connector), M_DRM,
2966 	    M_WAITOK | M_ZERO);
2967 	if (!intel_connector) {
2968 		kfree(intel_dig_port, M_DRM);
2969 		return;
2970 	}
2971 
2972 	intel_encoder = &intel_dig_port->base;
2973 	encoder = &intel_encoder->base;
2974 
2975 	drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
2976 			 DRM_MODE_ENCODER_TMDS);
2977 	drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
2978 
2979 	intel_encoder->enable = intel_enable_dp;
2980 	intel_encoder->pre_enable = intel_pre_enable_dp;
2981 	intel_encoder->disable = intel_disable_dp;
2982 	intel_encoder->post_disable = intel_post_disable_dp;
2983 	intel_encoder->get_hw_state = intel_dp_get_hw_state;
2984 
2985 	intel_dig_port->port = port;
2986 	intel_dig_port->dp.output_reg = output_reg;
2987 
2988 	intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
2989 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
2990 	intel_encoder->cloneable = false;
2991 	intel_encoder->hot_plug = intel_dp_hot_plug;
2992 
2993 	intel_dp_init_connector(intel_dig_port, intel_connector);
2994 }
2995