xref: /dragonfly/sys/dev/drm/i915/intel_dp.c (revision 65867155)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27 
28 #include <linux/i2c.h>
29 #include <linux/export.h>
30 #include <drm/drmP.h>
31 #include <linux/slab.h>
32 #include <drm/drm_atomic_helper.h>
33 #include <drm/drm_crtc.h>
34 #include <drm/drm_crtc_helper.h>
35 #include <drm/drm_edid.h>
36 #include "intel_drv.h"
37 #include <drm/i915_drm.h>
38 #include "i915_drv.h"
39 
40 #define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
41 
42 static int disable_aux_irq = 0;
43 TUNABLE_INT("drm.i915.disable_aux_irq", &disable_aux_irq);
44 
45 /* Compliance test status bits  */
46 #define INTEL_DP_RESOLUTION_SHIFT_MASK	0
47 #define INTEL_DP_RESOLUTION_PREFERRED	(1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_STANDARD	(2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49 #define INTEL_DP_RESOLUTION_FAILSAFE	(3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
50 
51 struct dp_link_dpll {
52 	int clock;
53 	struct dpll dpll;
54 };
55 
56 static const struct dp_link_dpll gen4_dpll[] = {
57 	{ 162000,
58 		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
59 	{ 270000,
60 		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
61 };
62 
63 static const struct dp_link_dpll pch_dpll[] = {
64 	{ 162000,
65 		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
66 	{ 270000,
67 		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
68 };
69 
70 static const struct dp_link_dpll vlv_dpll[] = {
71 	{ 162000,
72 		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
73 	{ 270000,
74 		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
75 };
76 
77 /*
78  * CHV supports eDP 1.4 that have  more link rates.
79  * Below only provides the fixed rate but exclude variable rate.
80  */
81 static const struct dp_link_dpll chv_dpll[] = {
82 	/*
83 	 * CHV requires to program fractional division for m2.
84 	 * m2 is stored in fixed point format using formula below
85 	 * (m2_int << 22) | m2_fraction
86 	 */
87 	{ 162000,	/* m2_int = 32, m2_fraction = 1677722 */
88 		{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
89 	{ 270000,	/* m2_int = 27, m2_fraction = 0 */
90 		{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
91 	{ 540000,	/* m2_int = 27, m2_fraction = 0 */
92 		{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
93 };
94 
95 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
96 				  324000, 432000, 540000 };
97 static const int skl_rates[] = { 162000, 216000, 270000,
98 				  324000, 432000, 540000 };
99 static const int default_rates[] = { 162000, 270000, 540000 };
100 
101 /**
102  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
103  * @intel_dp: DP struct
104  *
105  * If a CPU or PCH DP output is attached to an eDP panel, this function
106  * will return true, and false otherwise.
107  */
108 static bool is_edp(struct intel_dp *intel_dp)
109 {
110 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
111 
112 	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
113 }
114 
115 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
116 {
117 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
118 
119 	return intel_dig_port->base.base.dev;
120 }
121 
122 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
123 {
124 	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
125 }
126 
127 static void intel_dp_link_down(struct intel_dp *intel_dp);
128 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
129 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
130 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
131 static void vlv_steal_power_sequencer(struct drm_device *dev,
132 				      enum i915_pipe pipe);
133 
134 static int
135 intel_dp_max_link_bw(struct intel_dp  *intel_dp)
136 {
137 	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
138 
139 	switch (max_link_bw) {
140 	case DP_LINK_BW_1_62:
141 	case DP_LINK_BW_2_7:
142 	case DP_LINK_BW_5_4:
143 		break;
144 	default:
145 		WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
146 		     max_link_bw);
147 		max_link_bw = DP_LINK_BW_1_62;
148 		break;
149 	}
150 	return max_link_bw;
151 }
152 
153 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
154 {
155 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
156 	struct drm_device *dev = intel_dig_port->base.base.dev;
157 	u8 source_max, sink_max;
158 
159 	source_max = 4;
160 	if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
161 	    (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
162 		source_max = 2;
163 
164 	sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
165 
166 	return min(source_max, sink_max);
167 }
168 
169 /*
170  * The units on the numbers in the next two are... bizarre.  Examples will
171  * make it clearer; this one parallels an example in the eDP spec.
172  *
173  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
174  *
175  *     270000 * 1 * 8 / 10 == 216000
176  *
177  * The actual data capacity of that configuration is 2.16Gbit/s, so the
178  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
179  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
180  * 119000.  At 18bpp that's 2142000 kilobits per second.
181  *
182  * Thus the strange-looking division by 10 in intel_dp_link_required, to
183  * get the result in decakilobits instead of kilobits.
184  */
185 
186 static int
187 intel_dp_link_required(int pixel_clock, int bpp)
188 {
189 	return (pixel_clock * bpp + 9) / 10;
190 }
191 
192 static int
193 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
194 {
195 	return (max_link_clock * max_lanes * 8) / 10;
196 }
197 
198 static enum drm_mode_status
199 intel_dp_mode_valid(struct drm_connector *connector,
200 		    struct drm_display_mode *mode)
201 {
202 	struct intel_dp *intel_dp = intel_attached_dp(connector);
203 	struct intel_connector *intel_connector = to_intel_connector(connector);
204 	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
205 	int target_clock = mode->clock;
206 	int max_rate, mode_rate, max_lanes, max_link_clock;
207 
208 	if (is_edp(intel_dp) && fixed_mode) {
209 		if (mode->hdisplay > fixed_mode->hdisplay)
210 			return MODE_PANEL;
211 
212 		if (mode->vdisplay > fixed_mode->vdisplay)
213 			return MODE_PANEL;
214 
215 		target_clock = fixed_mode->clock;
216 	}
217 
218 	max_link_clock = intel_dp_max_link_rate(intel_dp);
219 	max_lanes = intel_dp_max_lane_count(intel_dp);
220 
221 	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
222 	mode_rate = intel_dp_link_required(target_clock, 18);
223 
224 	if (mode_rate > max_rate)
225 		return MODE_CLOCK_HIGH;
226 
227 	if (mode->clock < 10000)
228 		return MODE_CLOCK_LOW;
229 
230 	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
231 		return MODE_H_ILLEGAL;
232 
233 	return MODE_OK;
234 }
235 
236 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
237 {
238 	int	i;
239 	uint32_t v = 0;
240 
241 	if (src_bytes > 4)
242 		src_bytes = 4;
243 	for (i = 0; i < src_bytes; i++)
244 		v |= ((uint32_t) src[i]) << ((3-i) * 8);
245 	return v;
246 }
247 
248 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
249 {
250 	int i;
251 	if (dst_bytes > 4)
252 		dst_bytes = 4;
253 	for (i = 0; i < dst_bytes; i++)
254 		dst[i] = src >> ((3-i) * 8);
255 }
256 
257 /* hrawclock is 1/4 the FSB frequency */
258 static int
259 intel_hrawclk(struct drm_device *dev)
260 {
261 	struct drm_i915_private *dev_priv = dev->dev_private;
262 	uint32_t clkcfg;
263 
264 	/* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
265 	if (IS_VALLEYVIEW(dev))
266 		return 200;
267 
268 	clkcfg = I915_READ(CLKCFG);
269 	switch (clkcfg & CLKCFG_FSB_MASK) {
270 	case CLKCFG_FSB_400:
271 		return 100;
272 	case CLKCFG_FSB_533:
273 		return 133;
274 	case CLKCFG_FSB_667:
275 		return 166;
276 	case CLKCFG_FSB_800:
277 		return 200;
278 	case CLKCFG_FSB_1067:
279 		return 266;
280 	case CLKCFG_FSB_1333:
281 		return 333;
282 	/* these two are just a guess; one of them might be right */
283 	case CLKCFG_FSB_1600:
284 	case CLKCFG_FSB_1600_ALT:
285 		return 400;
286 	default:
287 		return 133;
288 	}
289 }
290 
291 static void
292 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
293 				    struct intel_dp *intel_dp);
294 static void
295 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
296 					      struct intel_dp *intel_dp);
297 
298 static void pps_lock(struct intel_dp *intel_dp)
299 {
300 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
301 	struct intel_encoder *encoder = &intel_dig_port->base;
302 	struct drm_device *dev = encoder->base.dev;
303 	struct drm_i915_private *dev_priv = dev->dev_private;
304 	enum intel_display_power_domain power_domain;
305 
306 	/*
307 	 * See vlv_power_sequencer_reset() why we need
308 	 * a power domain reference here.
309 	 */
310 	power_domain = intel_display_port_power_domain(encoder);
311 	intel_display_power_get(dev_priv, power_domain);
312 
313 	mutex_lock(&dev_priv->pps_mutex);
314 }
315 
316 static void pps_unlock(struct intel_dp *intel_dp)
317 {
318 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
319 	struct intel_encoder *encoder = &intel_dig_port->base;
320 	struct drm_device *dev = encoder->base.dev;
321 	struct drm_i915_private *dev_priv = dev->dev_private;
322 	enum intel_display_power_domain power_domain;
323 
324 	mutex_unlock(&dev_priv->pps_mutex);
325 
326 	power_domain = intel_display_port_power_domain(encoder);
327 	intel_display_power_put(dev_priv, power_domain);
328 }
329 
330 static void
331 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
332 {
333 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
334 	struct drm_device *dev = intel_dig_port->base.base.dev;
335 	struct drm_i915_private *dev_priv = dev->dev_private;
336 	enum i915_pipe pipe = intel_dp->pps_pipe;
337 	bool pll_enabled;
338 	uint32_t DP;
339 
340 	if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
341 		 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
342 		 pipe_name(pipe), port_name(intel_dig_port->port)))
343 		return;
344 
345 	DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
346 		      pipe_name(pipe), port_name(intel_dig_port->port));
347 
348 	/* Preserve the BIOS-computed detected bit. This is
349 	 * supposed to be read-only.
350 	 */
351 	DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
352 	DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
353 	DP |= DP_PORT_WIDTH(1);
354 	DP |= DP_LINK_TRAIN_PAT_1;
355 
356 	if (IS_CHERRYVIEW(dev))
357 		DP |= DP_PIPE_SELECT_CHV(pipe);
358 	else if (pipe == PIPE_B)
359 		DP |= DP_PIPEB_SELECT;
360 
361 	pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
362 
363 	/*
364 	 * The DPLL for the pipe must be enabled for this to work.
365 	 * So enable temporarily it if it's not already enabled.
366 	 */
367 	if (!pll_enabled)
368 		vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
369 				 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
370 
371 	/*
372 	 * Similar magic as in intel_dp_enable_port().
373 	 * We _must_ do this port enable + disable trick
374 	 * to make this power seqeuencer lock onto the port.
375 	 * Otherwise even VDD force bit won't work.
376 	 */
377 	I915_WRITE(intel_dp->output_reg, DP);
378 	POSTING_READ(intel_dp->output_reg);
379 
380 	I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
381 	POSTING_READ(intel_dp->output_reg);
382 
383 	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
384 	POSTING_READ(intel_dp->output_reg);
385 
386 	if (!pll_enabled)
387 		vlv_force_pll_off(dev, pipe);
388 }
389 
390 static enum i915_pipe
391 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
392 {
393 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
394 	struct drm_device *dev = intel_dig_port->base.base.dev;
395 	struct drm_i915_private *dev_priv = dev->dev_private;
396 	struct intel_encoder *encoder;
397 	unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
398 	enum i915_pipe pipe;
399 
400 	lockdep_assert_held(&dev_priv->pps_mutex);
401 
402 	/* We should never land here with regular DP ports */
403 	WARN_ON(!is_edp(intel_dp));
404 
405 	if (intel_dp->pps_pipe != INVALID_PIPE)
406 		return intel_dp->pps_pipe;
407 
408 	/*
409 	 * We don't have power sequencer currently.
410 	 * Pick one that's not used by other ports.
411 	 */
412 	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
413 			    base.head) {
414 		struct intel_dp *tmp;
415 
416 		if (encoder->type != INTEL_OUTPUT_EDP)
417 			continue;
418 
419 		tmp = enc_to_intel_dp(&encoder->base);
420 
421 		if (tmp->pps_pipe != INVALID_PIPE)
422 			pipes &= ~(1 << tmp->pps_pipe);
423 	}
424 
425 	/*
426 	 * Didn't find one. This should not happen since there
427 	 * are two power sequencers and up to two eDP ports.
428 	 */
429 	if (WARN_ON(pipes == 0))
430 		pipe = PIPE_A;
431 	else
432 		pipe = ffs(pipes) - 1;
433 
434 	vlv_steal_power_sequencer(dev, pipe);
435 	intel_dp->pps_pipe = pipe;
436 
437 	DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
438 		      pipe_name(intel_dp->pps_pipe),
439 		      port_name(intel_dig_port->port));
440 
441 	/* init power sequencer on this pipe and port */
442 	intel_dp_init_panel_power_sequencer(dev, intel_dp);
443 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
444 
445 	/*
446 	 * Even vdd force doesn't work until we've made
447 	 * the power sequencer lock in on the port.
448 	 */
449 	vlv_power_sequencer_kick(intel_dp);
450 
451 	return intel_dp->pps_pipe;
452 }
453 
454 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
455 			       enum i915_pipe pipe);
456 
457 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
458 			       enum i915_pipe pipe)
459 {
460 	return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
461 }
462 
463 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
464 				enum i915_pipe pipe)
465 {
466 	return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
467 }
468 
469 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
470 			 enum i915_pipe pipe)
471 {
472 	return true;
473 }
474 
475 static enum i915_pipe
476 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
477 		     enum port port,
478 		     vlv_pipe_check pipe_check)
479 {
480 	enum i915_pipe pipe;
481 
482 	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
483 		u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
484 			PANEL_PORT_SELECT_MASK;
485 
486 		if (port_sel != PANEL_PORT_SELECT_VLV(port))
487 			continue;
488 
489 		if (!pipe_check(dev_priv, pipe))
490 			continue;
491 
492 		return pipe;
493 	}
494 
495 	return INVALID_PIPE;
496 }
497 
498 static void
499 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
500 {
501 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
502 	struct drm_device *dev = intel_dig_port->base.base.dev;
503 	struct drm_i915_private *dev_priv = dev->dev_private;
504 	enum port port = intel_dig_port->port;
505 
506 	lockdep_assert_held(&dev_priv->pps_mutex);
507 
508 	/* try to find a pipe with this port selected */
509 	/* first pick one where the panel is on */
510 	intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
511 						  vlv_pipe_has_pp_on);
512 	/* didn't find one? pick one where vdd is on */
513 	if (intel_dp->pps_pipe == INVALID_PIPE)
514 		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
515 							  vlv_pipe_has_vdd_on);
516 	/* didn't find one? pick one with just the correct port */
517 	if (intel_dp->pps_pipe == INVALID_PIPE)
518 		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
519 							  vlv_pipe_any);
520 
521 	/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
522 	if (intel_dp->pps_pipe == INVALID_PIPE) {
523 		DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
524 			      port_name(port));
525 		return;
526 	}
527 
528 	DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
529 		      port_name(port), pipe_name(intel_dp->pps_pipe));
530 
531 	intel_dp_init_panel_power_sequencer(dev, intel_dp);
532 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
533 }
534 
535 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
536 {
537 	struct drm_device *dev = dev_priv->dev;
538 	struct intel_encoder *encoder;
539 
540 	if (WARN_ON(!IS_VALLEYVIEW(dev)))
541 		return;
542 
543 	/*
544 	 * We can't grab pps_mutex here due to deadlock with power_domain
545 	 * mutex when power_domain functions are called while holding pps_mutex.
546 	 * That also means that in order to use pps_pipe the code needs to
547 	 * hold both a power domain reference and pps_mutex, and the power domain
548 	 * reference get/put must be done while _not_ holding pps_mutex.
549 	 * pps_{lock,unlock}() do these steps in the correct order, so one
550 	 * should use them always.
551 	 */
552 
553 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
554 		struct intel_dp *intel_dp;
555 
556 		if (encoder->type != INTEL_OUTPUT_EDP)
557 			continue;
558 
559 		intel_dp = enc_to_intel_dp(&encoder->base);
560 		intel_dp->pps_pipe = INVALID_PIPE;
561 	}
562 }
563 
564 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
565 {
566 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
567 
568 	if (IS_BROXTON(dev))
569 		return BXT_PP_CONTROL(0);
570 	else if (HAS_PCH_SPLIT(dev))
571 		return PCH_PP_CONTROL;
572 	else
573 		return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
574 }
575 
576 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
577 {
578 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
579 
580 	if (IS_BROXTON(dev))
581 		return BXT_PP_STATUS(0);
582 	else if (HAS_PCH_SPLIT(dev))
583 		return PCH_PP_STATUS;
584 	else
585 		return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
586 }
587 
588 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
589    This function only applicable when panel PM state is not to be tracked */
590 #if 0
591 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
592 			      void *unused)
593 {
594 	struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
595 						 edp_notifier);
596 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
597 	struct drm_i915_private *dev_priv = dev->dev_private;
598 	u32 pp_div;
599 	u32 pp_ctrl_reg, pp_div_reg;
600 
601 	if (!is_edp(intel_dp) || code != SYS_RESTART)
602 		return 0;
603 
604 	pps_lock(intel_dp);
605 
606 	if (IS_VALLEYVIEW(dev)) {
607 		enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
608 
609 		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
610 		pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
611 		pp_div = I915_READ(pp_div_reg);
612 		pp_div &= PP_REFERENCE_DIVIDER_MASK;
613 
614 		/* 0x1F write to PP_DIV_REG sets max cycle delay */
615 		I915_WRITE(pp_div_reg, pp_div | 0x1F);
616 		I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
617 		msleep(intel_dp->panel_power_cycle_delay);
618 	}
619 
620 	pps_unlock(intel_dp);
621 
622 	return 0;
623 }
624 #endif
625 
626 static bool edp_have_panel_power(struct intel_dp *intel_dp)
627 {
628 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
629 	struct drm_i915_private *dev_priv = dev->dev_private;
630 
631 	lockdep_assert_held(&dev_priv->pps_mutex);
632 
633 	if (IS_VALLEYVIEW(dev) &&
634 	    intel_dp->pps_pipe == INVALID_PIPE)
635 		return false;
636 
637 	return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
638 }
639 
640 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
641 {
642 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
643 	struct drm_i915_private *dev_priv = dev->dev_private;
644 
645 	lockdep_assert_held(&dev_priv->pps_mutex);
646 
647 	if (IS_VALLEYVIEW(dev) &&
648 	    intel_dp->pps_pipe == INVALID_PIPE)
649 		return false;
650 
651 	return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
652 }
653 
654 static void
655 intel_dp_check_edp(struct intel_dp *intel_dp)
656 {
657 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
658 	struct drm_i915_private *dev_priv = dev->dev_private;
659 
660 	if (!is_edp(intel_dp))
661 		return;
662 
663 	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
664 		WARN(1, "eDP powered off while attempting aux channel communication.\n");
665 		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
666 			      I915_READ(_pp_stat_reg(intel_dp)),
667 			      I915_READ(_pp_ctrl_reg(intel_dp)));
668 	}
669 }
670 
671 static uint32_t
672 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
673 {
674 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
675 	struct drm_device *dev = intel_dig_port->base.base.dev;
676 	struct drm_i915_private *dev_priv = dev->dev_private;
677 	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
678 	uint32_t status;
679 	bool done;
680 
681 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
682 	if (has_aux_irq)
683 		done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
684 					  msecs_to_jiffies_timeout(10));
685 	else
686 		done = wait_for_atomic(C, 10) == 0;
687 	if (!done)
688 		DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
689 			  has_aux_irq);
690 #undef C
691 
692 	return status;
693 }
694 
695 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
696 {
697 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
698 	struct drm_device *dev = intel_dig_port->base.base.dev;
699 
700 	/*
701 	 * The clock divider is based off the hrawclk, and would like to run at
702 	 * 2MHz.  So, take the hrawclk value and divide by 2 and use that
703 	 */
704 	return index ? 0 : intel_hrawclk(dev) / 2;
705 }
706 
707 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
708 {
709 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
710 	struct drm_device *dev = intel_dig_port->base.base.dev;
711 	struct drm_i915_private *dev_priv = dev->dev_private;
712 
713 	if (index)
714 		return 0;
715 
716 	if (intel_dig_port->port == PORT_A) {
717 		return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
718 
719 	} else {
720 		return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
721 	}
722 }
723 
724 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
725 {
726 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
727 	struct drm_device *dev = intel_dig_port->base.base.dev;
728 	struct drm_i915_private *dev_priv = dev->dev_private;
729 
730 	if (intel_dig_port->port == PORT_A) {
731 		if (index)
732 			return 0;
733 		return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
734 	} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
735 		/* Workaround for non-ULT HSW */
736 		switch (index) {
737 		case 0: return 63;
738 		case 1: return 72;
739 		default: return 0;
740 		}
741 	} else  {
742 		return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
743 	}
744 }
745 
746 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
747 {
748 	return index ? 0 : 100;
749 }
750 
751 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
752 {
753 	/*
754 	 * SKL doesn't need us to program the AUX clock divider (Hardware will
755 	 * derive the clock from CDCLK automatically). We still implement the
756 	 * get_aux_clock_divider vfunc to plug-in into the existing code.
757 	 */
758 	return index ? 0 : 1;
759 }
760 
761 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
762 				      bool has_aux_irq,
763 				      int send_bytes,
764 				      uint32_t aux_clock_divider)
765 {
766 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
767 	struct drm_device *dev = intel_dig_port->base.base.dev;
768 	uint32_t precharge, timeout;
769 
770 	if (IS_GEN6(dev))
771 		precharge = 3;
772 	else
773 		precharge = 5;
774 
775 	if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
776 		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
777 	else
778 		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
779 
780 	return DP_AUX_CH_CTL_SEND_BUSY |
781 	       DP_AUX_CH_CTL_DONE |
782 	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
783 	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
784 	       timeout |
785 	       DP_AUX_CH_CTL_RECEIVE_ERROR |
786 	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
787 	       (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788 	       (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
789 }
790 
791 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
792 				      bool has_aux_irq,
793 				      int send_bytes,
794 				      uint32_t unused)
795 {
796 	return DP_AUX_CH_CTL_SEND_BUSY |
797 	       DP_AUX_CH_CTL_DONE |
798 	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
799 	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
800 	       DP_AUX_CH_CTL_TIME_OUT_1600us |
801 	       DP_AUX_CH_CTL_RECEIVE_ERROR |
802 	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
803 	       DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
804 }
805 
806 static int
807 intel_dp_aux_ch(struct intel_dp *intel_dp,
808 		const uint8_t *send, int send_bytes,
809 		uint8_t *recv, int recv_size)
810 {
811 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
812 	struct drm_device *dev = intel_dig_port->base.base.dev;
813 	struct drm_i915_private *dev_priv = dev->dev_private;
814 	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
815 	uint32_t ch_data = ch_ctl + 4;
816 	uint32_t aux_clock_divider;
817 	int i, ret, recv_bytes;
818 	uint32_t status;
819 	int try, clock = 0;
820 	bool has_aux_irq = HAS_AUX_IRQ(dev) && !disable_aux_irq;
821 	bool vdd;
822 
823 	pps_lock(intel_dp);
824 
825 	/*
826 	 * We will be called with VDD already enabled for dpcd/edid/oui reads.
827 	 * In such cases we want to leave VDD enabled and it's up to upper layers
828 	 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
829 	 * ourselves.
830 	 */
831 	vdd = edp_panel_vdd_on(intel_dp);
832 
833 	/* dp aux is extremely sensitive to irq latency, hence request the
834 	 * lowest possible wakeup latency and so prevent the cpu from going into
835 	 * deep sleep states.
836 	 */
837 	pm_qos_update_request(&dev_priv->pm_qos, 0);
838 
839 	intel_dp_check_edp(intel_dp);
840 
841 	intel_aux_display_runtime_get(dev_priv);
842 
843 	/* Try to wait for any previous AUX channel activity */
844 	for (try = 0; try < 3; try++) {
845 		status = I915_READ_NOTRACE(ch_ctl);
846 		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
847 			break;
848 		msleep(1);
849 	}
850 
851 	if (try == 3) {
852 		static u32 last_status = -1;
853 		const u32 status = I915_READ(ch_ctl);
854 
855 		if (status != last_status) {
856 			WARN(1, "dp_aux_ch not started status 0x%08x\n",
857 			     status);
858 			last_status = status;
859 		}
860 
861 		ret = -EBUSY;
862 		goto out;
863 	}
864 
865 	/* Only 5 data registers! */
866 	if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
867 		ret = -E2BIG;
868 		goto out;
869 	}
870 
871 	while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
872 		u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
873 							  has_aux_irq,
874 							  send_bytes,
875 							  aux_clock_divider);
876 
877 		/* Must try at least 3 times according to DP spec */
878 		for (try = 0; try < 5; try++) {
879 			/* Load the send data into the aux channel data registers */
880 			for (i = 0; i < send_bytes; i += 4)
881 				I915_WRITE(ch_data + i,
882 					   intel_dp_pack_aux(send + i,
883 							     send_bytes - i));
884 
885 			/* Send the command and wait for it to complete */
886 			I915_WRITE(ch_ctl, send_ctl);
887 
888 			status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
889 
890 			/* Clear done status and any errors */
891 			I915_WRITE(ch_ctl,
892 				   status |
893 				   DP_AUX_CH_CTL_DONE |
894 				   DP_AUX_CH_CTL_TIME_OUT_ERROR |
895 				   DP_AUX_CH_CTL_RECEIVE_ERROR);
896 
897 			if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
898 				continue;
899 
900 			/* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
901 			 *   400us delay required for errors and timeouts
902 			 *   Timeout errors from the HW already meet this
903 			 *   requirement so skip to next iteration
904 			 */
905 			if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
906 				usleep_range(400, 500);
907 				continue;
908 			}
909 			if (status & DP_AUX_CH_CTL_DONE)
910 				goto done;
911 		}
912 	}
913 
914 	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
915 		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
916 		ret = -EBUSY;
917 		goto out;
918 	}
919 
920 done:
921 	/* Check for timeout or receive error.
922 	 * Timeouts occur when the sink is not connected
923 	 */
924 	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
925 		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
926 		ret = -EIO;
927 		goto out;
928 	}
929 
930 	/* Timeouts occur when the device isn't connected, so they're
931 	 * "normal" -- don't fill the kernel log with these */
932 	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
933 		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
934 		ret = -ETIMEDOUT;
935 		goto out;
936 	}
937 
938 	/* Unload any bytes sent back from the other side */
939 	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
940 		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
941 	if (recv_bytes > recv_size)
942 		recv_bytes = recv_size;
943 
944 	for (i = 0; i < recv_bytes; i += 4)
945 		intel_dp_unpack_aux(I915_READ(ch_data + i),
946 				    recv + i, recv_bytes - i);
947 
948 	ret = recv_bytes;
949 out:
950 	pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
951 	intel_aux_display_runtime_put(dev_priv);
952 
953 	if (vdd)
954 		edp_panel_vdd_off(intel_dp, false);
955 
956 	pps_unlock(intel_dp);
957 
958 	return ret;
959 }
960 
961 #define BARE_ADDRESS_SIZE	3
962 #define HEADER_SIZE		(BARE_ADDRESS_SIZE + 1)
963 static ssize_t
964 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
965 {
966 	struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
967 	uint8_t txbuf[20], rxbuf[20];
968 	size_t txsize, rxsize;
969 	int ret;
970 
971 	txbuf[0] = (msg->request << 4) |
972 		((msg->address >> 16) & 0xf);
973 	txbuf[1] = (msg->address >> 8) & 0xff;
974 	txbuf[2] = msg->address & 0xff;
975 	txbuf[3] = msg->size - 1;
976 
977 	switch (msg->request & ~DP_AUX_I2C_MOT) {
978 	case DP_AUX_NATIVE_WRITE:
979 	case DP_AUX_I2C_WRITE:
980 		txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
981 		rxsize = 2; /* 0 or 1 data bytes */
982 
983 		if (WARN_ON(txsize > 20))
984 			return -E2BIG;
985 
986 		memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
987 
988 		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
989 		if (ret > 0) {
990 			msg->reply = rxbuf[0] >> 4;
991 
992 			if (ret > 1) {
993 				/* Number of bytes written in a short write. */
994 				ret = clamp_t(int, rxbuf[1], 0, msg->size);
995 			} else {
996 				/* Return payload size. */
997 				ret = msg->size;
998 			}
999 		}
1000 		break;
1001 
1002 	case DP_AUX_NATIVE_READ:
1003 	case DP_AUX_I2C_READ:
1004 		txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1005 		rxsize = msg->size + 1;
1006 
1007 		if (WARN_ON(rxsize > 20))
1008 			return -E2BIG;
1009 
1010 		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1011 		if (ret > 0) {
1012 			msg->reply = rxbuf[0] >> 4;
1013 			/*
1014 			 * Assume happy day, and copy the data. The caller is
1015 			 * expected to check msg->reply before touching it.
1016 			 *
1017 			 * Return payload size.
1018 			 */
1019 			ret--;
1020 			memcpy(msg->buffer, rxbuf + 1, ret);
1021 		}
1022 		break;
1023 
1024 	default:
1025 		ret = -EINVAL;
1026 		break;
1027 	}
1028 
1029 	return ret;
1030 }
1031 
1032 static int
1033 intel_dp_i2c_aux_ch(struct device *adapter, int mode,
1034 		    uint8_t write_byte, uint8_t *read_byte)
1035 {
1036 	struct i2c_algo_dp_aux_data *data = device_get_softc(adapter);
1037 	struct intel_dp *intel_dp = data->priv;
1038 	uint16_t address = data->address;
1039 	uint8_t msg[5];
1040 	uint8_t reply[2];
1041 	unsigned retry;
1042 	int msg_bytes;
1043 	int reply_bytes;
1044 	int ret;
1045 
1046 	intel_edp_panel_vdd_on(intel_dp);
1047 	intel_dp_check_edp(intel_dp);
1048 	/* Set up the command byte */
1049 	if (mode & MODE_I2C_READ)
1050 		msg[0] = DP_AUX_I2C_READ << 4;
1051 	else
1052 		msg[0] = DP_AUX_I2C_WRITE << 4;
1053 
1054 	if (!(mode & MODE_I2C_STOP))
1055 		msg[0] |= DP_AUX_I2C_MOT << 4;
1056 
1057 	msg[1] = address >> 8;
1058 	msg[2] = address;
1059 
1060 	switch (mode) {
1061 	case MODE_I2C_WRITE:
1062 		msg[3] = 0;
1063 		msg[4] = write_byte;
1064 		msg_bytes = 5;
1065 		reply_bytes = 1;
1066 		break;
1067 	case MODE_I2C_READ:
1068 		msg[3] = 0;
1069 		msg_bytes = 4;
1070 		reply_bytes = 2;
1071 		break;
1072 	default:
1073 		msg_bytes = 3;
1074 		reply_bytes = 1;
1075 		break;
1076 	}
1077 
1078 	/*
1079 	 * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is
1080 	 * required to retry at least seven times upon receiving AUX_DEFER
1081 	 * before giving up the AUX transaction.
1082 	 */
1083 	for (retry = 0; retry < 7; retry++) {
1084 		ret = intel_dp_aux_ch(intel_dp,
1085 				      msg, msg_bytes,
1086 				      reply, reply_bytes);
1087 		if (ret < 0) {
1088 			DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
1089 			goto out;
1090 		}
1091 
1092 		switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
1093 		case DP_AUX_NATIVE_REPLY_ACK:
1094 			/* I2C-over-AUX Reply field is only valid
1095 			 * when paired with AUX ACK.
1096 			 */
1097 			break;
1098 		case DP_AUX_NATIVE_REPLY_NACK:
1099 			DRM_DEBUG_KMS("aux_ch native nack\n");
1100 			ret = -EREMOTEIO;
1101 			goto out;
1102 		case DP_AUX_NATIVE_REPLY_DEFER:
1103 			/*
1104 			 * For now, just give more slack to branch devices. We
1105 			 * could check the DPCD for I2C bit rate capabilities,
1106 			 * and if available, adjust the interval. We could also
1107 			 * be more careful with DP-to-Legacy adapters where a
1108 			 * long legacy cable may force very low I2C bit rates.
1109 			 */
1110 			if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
1111 			    DP_DWN_STRM_PORT_PRESENT)
1112 				usleep_range(500, 600);
1113 			else
1114 				usleep_range(300, 400);
1115 			continue;
1116 		default:
1117 			DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
1118 				  reply[0]);
1119 			ret = -EREMOTEIO;
1120 			goto out;
1121 		}
1122 
1123 		switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) {
1124 		case DP_AUX_I2C_REPLY_ACK:
1125 			if (mode == MODE_I2C_READ) {
1126 				*read_byte = reply[1];
1127 			}
1128 			ret = 0;	/* reply_bytes - 1 */
1129 			goto out;
1130 		case DP_AUX_I2C_REPLY_NACK:
1131 			DRM_DEBUG_KMS("aux_i2c nack\n");
1132 			ret = -EREMOTEIO;
1133 			goto out;
1134 		case DP_AUX_I2C_REPLY_DEFER:
1135 			DRM_DEBUG_KMS("aux_i2c defer\n");
1136 			udelay(100);
1137 			break;
1138 		default:
1139 			DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
1140 			ret = -EREMOTEIO;
1141 			goto out;
1142 		}
1143 	}
1144 
1145 	DRM_ERROR("too many retries, giving up\n");
1146 	ret = -EREMOTEIO;
1147 
1148 out:
1149 	return ret;
1150 }
1151 
1152 static void
1153 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1154 {
1155 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1156 	struct drm_i915_private *dev_priv = dev->dev_private;
1157 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1158 	enum port port = intel_dig_port->port;
1159 	struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
1160 	const char *name = NULL;
1161 	uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1162 	int ret;
1163 
1164 	/* On SKL we don't have Aux for port E so we rely on VBT to set
1165 	 * a proper alternate aux channel.
1166 	 */
1167 	if (IS_SKYLAKE(dev) && port == PORT_E) {
1168 		switch (info->alternate_aux_channel) {
1169 		case DP_AUX_B:
1170 			porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1171 			break;
1172 		case DP_AUX_C:
1173 			porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1174 			break;
1175 		case DP_AUX_D:
1176 			porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1177 			break;
1178 		case DP_AUX_A:
1179 		default:
1180 			porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1181 		}
1182 	}
1183 
1184 	switch (port) {
1185 	case PORT_A:
1186 		intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1187 		name = "DPDDC-A";
1188 		break;
1189 	case PORT_B:
1190 		intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1191 		name = "DPDDC-B";
1192 		break;
1193 	case PORT_C:
1194 		intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1195 		name = "DPDDC-C";
1196 		break;
1197 	case PORT_D:
1198 		intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1199 		name = "DPDDC-D";
1200 		break;
1201 	case PORT_E:
1202 		intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1203 		name = "DPDDC-E";
1204 		break;
1205 	default:
1206 		BUG();
1207 	}
1208 
1209 	/*
1210 	 * The AUX_CTL register is usually DP_CTL + 0x10.
1211 	 *
1212 	 * On Haswell and Broadwell though:
1213 	 *   - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1214 	 *   - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1215 	 *
1216 	 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1217 	 */
1218 	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
1219 		intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1220 
1221 	intel_dp->aux.name = name;
1222 	intel_dp->aux.dev = dev->dev;
1223 	intel_dp->aux.transfer = intel_dp_aux_transfer;
1224 
1225 	DRM_DEBUG_KMS("i2c_init %s\n", name);
1226 	ret = iic_dp_aux_add_bus(connector->base.dev->dev, name,
1227 	    intel_dp_i2c_aux_ch, intel_dp, &intel_dp->dp_iic_bus,
1228 	    &intel_dp->aux.ddc);
1229 	WARN(ret, "intel_dp_i2c_init failed with error %d for port %c\n",
1230 	     ret, port_name(port));
1231 
1232 }
1233 
1234 static void
1235 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1236 {
1237 	intel_connector_unregister(intel_connector);
1238 }
1239 
1240 #if 0
1241 static int
1242 intel_dp_i2c_init(struct intel_dp *intel_dp,
1243 		  struct intel_connector *intel_connector, const char *name)
1244 {
1245 	int	ret;
1246 
1247 	DRM_DEBUG_KMS("i2c_init %s\n", name);
1248 #if 0
1249 	memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter));
1250 	intel_dp->adapter.owner = THIS_MODULE;
1251 	intel_dp->adapter.class = I2C_CLASS_DDC;
1252 	strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
1253 	intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
1254 	intel_dp->adapter.algo_data = &intel_dp->algo;
1255 	intel_dp->adapter.dev.parent = intel_connector->base.dev->dev;
1256 
1257 	ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
1258 	if (ret < 0)
1259 		return ret;
1260 
1261 	ret = sysfs_create_link(&intel_connector->base.kdev->kobj,
1262 				&intel_dp->adapter.dev.kobj,
1263 				intel_dp->adapter.dev.kobj.name);
1264 #endif
1265 	ret = iic_dp_aux_add_bus(intel_connector->base.dev->dev, name,
1266 	    intel_dp_i2c_aux_ch, intel_dp, &intel_dp->dp_iic_bus,
1267 	    &intel_dp->adapter);
1268 
1269 	return ret;
1270 }
1271 #endif
1272 
1273 static void
1274 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
1275 {
1276 	u32 ctrl1;
1277 
1278 	memset(&pipe_config->dpll_hw_state, 0,
1279 	       sizeof(pipe_config->dpll_hw_state));
1280 
1281 	pipe_config->ddi_pll_sel = SKL_DPLL0;
1282 	pipe_config->dpll_hw_state.cfgcr1 = 0;
1283 	pipe_config->dpll_hw_state.cfgcr2 = 0;
1284 
1285 	ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1286 	switch (pipe_config->port_clock / 2) {
1287 	case 81000:
1288 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1289 					      SKL_DPLL0);
1290 		break;
1291 	case 135000:
1292 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1293 					      SKL_DPLL0);
1294 		break;
1295 	case 270000:
1296 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1297 					      SKL_DPLL0);
1298 		break;
1299 	case 162000:
1300 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1301 					      SKL_DPLL0);
1302 		break;
1303 	/* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1304 	results in CDCLK change. Need to handle the change of CDCLK by
1305 	disabling pipes and re-enabling them */
1306 	case 108000:
1307 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1308 					      SKL_DPLL0);
1309 		break;
1310 	case 216000:
1311 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1312 					      SKL_DPLL0);
1313 		break;
1314 
1315 	}
1316 	pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1317 }
1318 
1319 void
1320 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
1321 {
1322 	memset(&pipe_config->dpll_hw_state, 0,
1323 	       sizeof(pipe_config->dpll_hw_state));
1324 
1325 	switch (pipe_config->port_clock / 2) {
1326 	case 81000:
1327 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1328 		break;
1329 	case 135000:
1330 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1331 		break;
1332 	case 270000:
1333 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1334 		break;
1335 	}
1336 }
1337 
1338 static int
1339 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1340 {
1341 	if (intel_dp->num_sink_rates) {
1342 		*sink_rates = intel_dp->sink_rates;
1343 		return intel_dp->num_sink_rates;
1344 	}
1345 
1346 	*sink_rates = default_rates;
1347 
1348 	return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1349 }
1350 
1351 static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
1352 {
1353 	/* WaDisableHBR2:skl */
1354 	if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1355 		return false;
1356 
1357 	if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1358 	    (INTEL_INFO(dev)->gen >= 9))
1359 		return true;
1360 	else
1361 		return false;
1362 }
1363 
1364 static int
1365 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1366 {
1367 	int size;
1368 
1369 	if (IS_BROXTON(dev)) {
1370 		*source_rates = bxt_rates;
1371 		size = ARRAY_SIZE(bxt_rates);
1372 	} else if (IS_SKYLAKE(dev)) {
1373 		*source_rates = skl_rates;
1374 		size = ARRAY_SIZE(skl_rates);
1375 	} else {
1376 		*source_rates = default_rates;
1377 		size = ARRAY_SIZE(default_rates);
1378 	}
1379 
1380 	/* This depends on the fact that 5.4 is last value in the array */
1381 	if (!intel_dp_source_supports_hbr2(dev))
1382 		size--;
1383 
1384 	return size;
1385 }
1386 
1387 static void
1388 intel_dp_set_clock(struct intel_encoder *encoder,
1389 		   struct intel_crtc_state *pipe_config)
1390 {
1391 	struct drm_device *dev = encoder->base.dev;
1392 	const struct dp_link_dpll *divisor = NULL;
1393 	int i, count = 0;
1394 
1395 	if (IS_G4X(dev)) {
1396 		divisor = gen4_dpll;
1397 		count = ARRAY_SIZE(gen4_dpll);
1398 	} else if (HAS_PCH_SPLIT(dev)) {
1399 		divisor = pch_dpll;
1400 		count = ARRAY_SIZE(pch_dpll);
1401 	} else if (IS_CHERRYVIEW(dev)) {
1402 		divisor = chv_dpll;
1403 		count = ARRAY_SIZE(chv_dpll);
1404 	} else if (IS_VALLEYVIEW(dev)) {
1405 		divisor = vlv_dpll;
1406 		count = ARRAY_SIZE(vlv_dpll);
1407 	}
1408 
1409 	if (divisor && count) {
1410 		for (i = 0; i < count; i++) {
1411 			if (pipe_config->port_clock == divisor[i].clock) {
1412 				pipe_config->dpll = divisor[i].dpll;
1413 				pipe_config->clock_set = true;
1414 				break;
1415 			}
1416 		}
1417 	}
1418 }
1419 
1420 static int intersect_rates(const int *source_rates, int source_len,
1421 			   const int *sink_rates, int sink_len,
1422 			   int *common_rates)
1423 {
1424 	int i = 0, j = 0, k = 0;
1425 
1426 	while (i < source_len && j < sink_len) {
1427 		if (source_rates[i] == sink_rates[j]) {
1428 			if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1429 				return k;
1430 			common_rates[k] = source_rates[i];
1431 			++k;
1432 			++i;
1433 			++j;
1434 		} else if (source_rates[i] < sink_rates[j]) {
1435 			++i;
1436 		} else {
1437 			++j;
1438 		}
1439 	}
1440 	return k;
1441 }
1442 
1443 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1444 				 int *common_rates)
1445 {
1446 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1447 	const int *source_rates, *sink_rates;
1448 	int source_len, sink_len;
1449 
1450 	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1451 	source_len = intel_dp_source_rates(dev, &source_rates);
1452 
1453 	return intersect_rates(source_rates, source_len,
1454 			       sink_rates, sink_len,
1455 			       common_rates);
1456 }
1457 
1458 static void snprintf_int_array(char *str, size_t len,
1459 			       const int *array, int nelem)
1460 {
1461 	int i;
1462 
1463 	str[0] = '\0';
1464 
1465 	for (i = 0; i < nelem; i++) {
1466 		int r = ksnprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1467 		if (r >= len)
1468 			return;
1469 		str += r;
1470 		len -= r;
1471 	}
1472 }
1473 
1474 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1475 {
1476 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1477 	const int *source_rates, *sink_rates;
1478 	int source_len, sink_len, common_len;
1479 	int common_rates[DP_MAX_SUPPORTED_RATES];
1480 	char str[128]; /* FIXME: too big for stack? */
1481 
1482 	if ((drm_debug & DRM_UT_KMS) == 0)
1483 		return;
1484 
1485 	source_len = intel_dp_source_rates(dev, &source_rates);
1486 	snprintf_int_array(str, sizeof(str), source_rates, source_len);
1487 	DRM_DEBUG_KMS("source rates: %s\n", str);
1488 
1489 	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1490 	snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1491 	DRM_DEBUG_KMS("sink rates: %s\n", str);
1492 
1493 	common_len = intel_dp_common_rates(intel_dp, common_rates);
1494 	snprintf_int_array(str, sizeof(str), common_rates, common_len);
1495 	DRM_DEBUG_KMS("common rates: %s\n", str);
1496 }
1497 
1498 static int rate_to_index(int find, const int *rates)
1499 {
1500 	int i = 0;
1501 
1502 	for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1503 		if (find == rates[i])
1504 			break;
1505 
1506 	return i;
1507 }
1508 
1509 int
1510 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1511 {
1512 	int rates[DP_MAX_SUPPORTED_RATES] = {};
1513 	int len;
1514 
1515 	len = intel_dp_common_rates(intel_dp, rates);
1516 	if (WARN_ON(len <= 0))
1517 		return 162000;
1518 
1519 	return rates[rate_to_index(0, rates) - 1];
1520 }
1521 
1522 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1523 {
1524 	return rate_to_index(rate, intel_dp->sink_rates);
1525 }
1526 
1527 bool
1528 intel_dp_compute_config(struct intel_encoder *encoder,
1529 			struct intel_crtc_state *pipe_config)
1530 {
1531 	struct drm_device *dev = encoder->base.dev;
1532 	struct drm_i915_private *dev_priv = dev->dev_private;
1533 	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1534 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1535 	enum port port = dp_to_dig_port(intel_dp)->port;
1536 	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1537 	struct intel_connector *intel_connector = intel_dp->attached_connector;
1538 	int lane_count, clock;
1539 	int min_lane_count = 1;
1540 	int max_lane_count = intel_dp_max_lane_count(intel_dp);
1541 	/* Conveniently, the link BW constants become indices with a shift...*/
1542 	int min_clock = 0;
1543 	int max_clock;
1544 	int bpp, mode_rate;
1545 	int link_avail, link_clock;
1546 	int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1547 	int common_len;
1548 
1549 	common_len = intel_dp_common_rates(intel_dp, common_rates);
1550 
1551 	/* No common link rates between source and sink */
1552 	WARN_ON(common_len <= 0);
1553 
1554 	max_clock = common_len - 1;
1555 
1556 	if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1557 		pipe_config->has_pch_encoder = true;
1558 
1559 	pipe_config->has_dp_encoder = true;
1560 	pipe_config->has_drrs = false;
1561 	pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1562 
1563 	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1564 		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1565 				       adjusted_mode);
1566 
1567 		if (INTEL_INFO(dev)->gen >= 9) {
1568 			int ret;
1569 			ret = skl_update_scaler_crtc(pipe_config);
1570 			if (ret)
1571 				return ret;
1572 		}
1573 
1574 		if (!HAS_PCH_SPLIT(dev))
1575 			intel_gmch_panel_fitting(intel_crtc, pipe_config,
1576 						 intel_connector->panel.fitting_mode);
1577 		else
1578 			intel_pch_panel_fitting(intel_crtc, pipe_config,
1579 						intel_connector->panel.fitting_mode);
1580 	}
1581 
1582 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1583 		return false;
1584 
1585 	DRM_DEBUG_KMS("DP link computation with max lane count %i "
1586 		      "max bw %d pixel clock %iKHz\n",
1587 		      max_lane_count, common_rates[max_clock],
1588 		      adjusted_mode->crtc_clock);
1589 
1590 	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
1591 	 * bpc in between. */
1592 	bpp = pipe_config->pipe_bpp;
1593 	if (is_edp(intel_dp)) {
1594 
1595 		/* Get bpp from vbt only for panels that dont have bpp in edid */
1596 		if (intel_connector->base.display_info.bpc == 0 &&
1597 			(dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1598 			DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1599 				      dev_priv->vbt.edp_bpp);
1600 			bpp = dev_priv->vbt.edp_bpp;
1601 		}
1602 
1603 		/*
1604 		 * Use the maximum clock and number of lanes the eDP panel
1605 		 * advertizes being capable of. The panels are generally
1606 		 * designed to support only a single clock and lane
1607 		 * configuration, and typically these values correspond to the
1608 		 * native resolution of the panel.
1609 		 */
1610 		min_lane_count = max_lane_count;
1611 		min_clock = max_clock;
1612 	}
1613 
1614 	for (; bpp >= 6*3; bpp -= 2*3) {
1615 		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1616 						   bpp);
1617 
1618 		for (clock = min_clock; clock <= max_clock; clock++) {
1619 			for (lane_count = min_lane_count;
1620 				lane_count <= max_lane_count;
1621 				lane_count <<= 1) {
1622 
1623 				link_clock = common_rates[clock];
1624 				link_avail = intel_dp_max_data_rate(link_clock,
1625 								    lane_count);
1626 
1627 				if (mode_rate <= link_avail) {
1628 					goto found;
1629 				}
1630 			}
1631 		}
1632 	}
1633 
1634 	return false;
1635 
1636 found:
1637 	if (intel_dp->color_range_auto) {
1638 		/*
1639 		 * See:
1640 		 * CEA-861-E - 5.1 Default Encoding Parameters
1641 		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1642 		 */
1643 		if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1644 			intel_dp->color_range = DP_COLOR_RANGE_16_235;
1645 		else
1646 			intel_dp->color_range = 0;
1647 	}
1648 
1649 	if (intel_dp->color_range)
1650 		pipe_config->limited_color_range = true;
1651 
1652 	intel_dp->lane_count = lane_count;
1653 
1654 	if (intel_dp->num_sink_rates) {
1655 		intel_dp->link_bw = 0;
1656 		intel_dp->rate_select =
1657 			intel_dp_rate_select(intel_dp, common_rates[clock]);
1658 	} else {
1659 		intel_dp->link_bw =
1660 			drm_dp_link_rate_to_bw_code(common_rates[clock]);
1661 		intel_dp->rate_select = 0;
1662 	}
1663 
1664 	pipe_config->pipe_bpp = bpp;
1665 	pipe_config->port_clock = common_rates[clock];
1666 
1667 	DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1668 		      intel_dp->link_bw, intel_dp->lane_count,
1669 		      pipe_config->port_clock, bpp);
1670 	DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1671 		      mode_rate, link_avail);
1672 
1673 	intel_link_compute_m_n(bpp, lane_count,
1674 			       adjusted_mode->crtc_clock,
1675 			       pipe_config->port_clock,
1676 			       &pipe_config->dp_m_n);
1677 
1678 	if (intel_connector->panel.downclock_mode != NULL &&
1679 		dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1680 			pipe_config->has_drrs = true;
1681 			intel_link_compute_m_n(bpp, lane_count,
1682 				intel_connector->panel.downclock_mode->clock,
1683 				pipe_config->port_clock,
1684 				&pipe_config->dp_m2_n2);
1685 	}
1686 
1687 	if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1688 		skl_edp_set_pll_config(pipe_config);
1689 	else if (IS_BROXTON(dev))
1690 		/* handled in ddi */;
1691 	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1692 		hsw_dp_set_ddi_pll_sel(pipe_config);
1693 	else
1694 		intel_dp_set_clock(encoder, pipe_config);
1695 
1696 	return true;
1697 }
1698 
1699 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1700 {
1701 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1702 	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1703 	struct drm_device *dev = crtc->base.dev;
1704 	struct drm_i915_private *dev_priv = dev->dev_private;
1705 	u32 dpa_ctl;
1706 
1707 	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1708 		      crtc->config->port_clock);
1709 	dpa_ctl = I915_READ(DP_A);
1710 	dpa_ctl &= ~DP_PLL_FREQ_MASK;
1711 
1712 	if (crtc->config->port_clock == 162000) {
1713 		/* For a long time we've carried around a ILK-DevA w/a for the
1714 		 * 160MHz clock. If we're really unlucky, it's still required.
1715 		 */
1716 		DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1717 		dpa_ctl |= DP_PLL_FREQ_160MHZ;
1718 		intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1719 	} else {
1720 		dpa_ctl |= DP_PLL_FREQ_270MHZ;
1721 		intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1722 	}
1723 
1724 	I915_WRITE(DP_A, dpa_ctl);
1725 
1726 	POSTING_READ(DP_A);
1727 	udelay(500);
1728 }
1729 
1730 static void intel_dp_prepare(struct intel_encoder *encoder)
1731 {
1732 	struct drm_device *dev = encoder->base.dev;
1733 	struct drm_i915_private *dev_priv = dev->dev_private;
1734 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1735 	enum port port = dp_to_dig_port(intel_dp)->port;
1736 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1737 	struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1738 
1739 	/*
1740 	 * There are four kinds of DP registers:
1741 	 *
1742 	 * 	IBX PCH
1743 	 * 	SNB CPU
1744 	 *	IVB CPU
1745 	 * 	CPT PCH
1746 	 *
1747 	 * IBX PCH and CPU are the same for almost everything,
1748 	 * except that the CPU DP PLL is configured in this
1749 	 * register
1750 	 *
1751 	 * CPT PCH is quite different, having many bits moved
1752 	 * to the TRANS_DP_CTL register instead. That
1753 	 * configuration happens (oddly) in ironlake_pch_enable
1754 	 */
1755 
1756 	/* Preserve the BIOS-computed detected bit. This is
1757 	 * supposed to be read-only.
1758 	 */
1759 	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1760 
1761 	/* Handle DP bits in common between all three register formats */
1762 	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1763 	intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1764 
1765 	if (crtc->config->has_audio)
1766 		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1767 
1768 	/* Split out the IBX/CPU vs CPT settings */
1769 
1770 	if (IS_GEN7(dev) && port == PORT_A) {
1771 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1772 			intel_dp->DP |= DP_SYNC_HS_HIGH;
1773 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1774 			intel_dp->DP |= DP_SYNC_VS_HIGH;
1775 		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1776 
1777 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1778 			intel_dp->DP |= DP_ENHANCED_FRAMING;
1779 
1780 		intel_dp->DP |= crtc->pipe << 29;
1781 	} else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1782 		u32 trans_dp;
1783 
1784 		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1785 
1786 		trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1787 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1788 			trans_dp |= TRANS_DP_ENH_FRAMING;
1789 		else
1790 			trans_dp &= ~TRANS_DP_ENH_FRAMING;
1791 		I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1792 	} else {
1793 		if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1794 			intel_dp->DP |= intel_dp->color_range;
1795 
1796 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1797 			intel_dp->DP |= DP_SYNC_HS_HIGH;
1798 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1799 			intel_dp->DP |= DP_SYNC_VS_HIGH;
1800 		intel_dp->DP |= DP_LINK_TRAIN_OFF;
1801 
1802 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1803 			intel_dp->DP |= DP_ENHANCED_FRAMING;
1804 
1805 		if (IS_CHERRYVIEW(dev))
1806 			intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1807 		else if (crtc->pipe == PIPE_B)
1808 			intel_dp->DP |= DP_PIPEB_SELECT;
1809 	}
1810 }
1811 
1812 #define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1813 #define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1814 
1815 #define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1816 #define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
1817 
1818 #define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1819 #define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1820 
1821 static void wait_panel_status(struct intel_dp *intel_dp,
1822 				       u32 mask,
1823 				       u32 value)
1824 {
1825 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1826 	struct drm_i915_private *dev_priv = dev->dev_private;
1827 	u32 pp_stat_reg, pp_ctrl_reg;
1828 
1829 	lockdep_assert_held(&dev_priv->pps_mutex);
1830 
1831 	pp_stat_reg = _pp_stat_reg(intel_dp);
1832 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1833 
1834 	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1835 			mask, value,
1836 			I915_READ(pp_stat_reg),
1837 			I915_READ(pp_ctrl_reg));
1838 
1839 	if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1840 		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1841 				I915_READ(pp_stat_reg),
1842 				I915_READ(pp_ctrl_reg));
1843 	}
1844 
1845 	DRM_DEBUG_KMS("Wait complete\n");
1846 }
1847 
1848 static void wait_panel_on(struct intel_dp *intel_dp)
1849 {
1850 	DRM_DEBUG_KMS("Wait for panel power on\n");
1851 	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1852 }
1853 
1854 static void wait_panel_off(struct intel_dp *intel_dp)
1855 {
1856 	DRM_DEBUG_KMS("Wait for panel power off time\n");
1857 	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1858 }
1859 
1860 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1861 {
1862 	DRM_DEBUG_KMS("Wait for panel power cycle\n");
1863 
1864 	/* When we disable the VDD override bit last we have to do the manual
1865 	 * wait. */
1866 	wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1867 				       intel_dp->panel_power_cycle_delay);
1868 
1869 	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1870 }
1871 
1872 static void wait_backlight_on(struct intel_dp *intel_dp)
1873 {
1874 	wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1875 				       intel_dp->backlight_on_delay);
1876 }
1877 
1878 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1879 {
1880 	wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1881 				       intel_dp->backlight_off_delay);
1882 }
1883 
1884 /* Read the current pp_control value, unlocking the register if it
1885  * is locked
1886  */
1887 
1888 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1889 {
1890 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1891 	struct drm_i915_private *dev_priv = dev->dev_private;
1892 	u32 control;
1893 
1894 	lockdep_assert_held(&dev_priv->pps_mutex);
1895 
1896 	control = I915_READ(_pp_ctrl_reg(intel_dp));
1897 	if (!IS_BROXTON(dev)) {
1898 		control &= ~PANEL_UNLOCK_MASK;
1899 		control |= PANEL_UNLOCK_REGS;
1900 	}
1901 	return control;
1902 }
1903 
1904 /*
1905  * Must be paired with edp_panel_vdd_off().
1906  * Must hold pps_mutex around the whole on/off sequence.
1907  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1908  */
1909 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1910 {
1911 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1912 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1913 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1914 	struct drm_i915_private *dev_priv = dev->dev_private;
1915 	enum intel_display_power_domain power_domain;
1916 	u32 pp;
1917 	u32 pp_stat_reg, pp_ctrl_reg;
1918 	bool need_to_disable = !intel_dp->want_panel_vdd;
1919 
1920 	lockdep_assert_held(&dev_priv->pps_mutex);
1921 
1922 	if (!is_edp(intel_dp))
1923 		return false;
1924 
1925 	cancel_delayed_work(&intel_dp->panel_vdd_work);
1926 	intel_dp->want_panel_vdd = true;
1927 
1928 	if (edp_have_panel_vdd(intel_dp))
1929 		return need_to_disable;
1930 
1931 	power_domain = intel_display_port_power_domain(intel_encoder);
1932 	intel_display_power_get(dev_priv, power_domain);
1933 
1934 	DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1935 		      port_name(intel_dig_port->port));
1936 
1937 	if (!edp_have_panel_power(intel_dp))
1938 		wait_panel_power_cycle(intel_dp);
1939 
1940 	pp = ironlake_get_pp_control(intel_dp);
1941 	pp |= EDP_FORCE_VDD;
1942 
1943 	pp_stat_reg = _pp_stat_reg(intel_dp);
1944 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1945 
1946 	I915_WRITE(pp_ctrl_reg, pp);
1947 	POSTING_READ(pp_ctrl_reg);
1948 	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1949 			I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1950 	/*
1951 	 * If the panel wasn't on, delay before accessing aux channel
1952 	 */
1953 	if (!edp_have_panel_power(intel_dp)) {
1954 		DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1955 			      port_name(intel_dig_port->port));
1956 		msleep(intel_dp->panel_power_up_delay);
1957 	}
1958 
1959 	return need_to_disable;
1960 }
1961 
1962 /*
1963  * Must be paired with intel_edp_panel_vdd_off() or
1964  * intel_edp_panel_off().
1965  * Nested calls to these functions are not allowed since
1966  * we drop the lock. Caller must use some higher level
1967  * locking to prevent nested calls from other threads.
1968  */
1969 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1970 {
1971 	bool vdd;
1972 
1973 	if (!is_edp(intel_dp))
1974 		return;
1975 
1976 	pps_lock(intel_dp);
1977 	vdd = edp_panel_vdd_on(intel_dp);
1978 	pps_unlock(intel_dp);
1979 
1980 #if 1
1981 /* XXX: limit dmesg spam to 16 warnings instead of 137, where is the bug? */
1982 	if(!vdd)
1983 		DRM_ERROR_RATELIMITED("eDP port %c VDD already requested on\n",
1984 		    port_name(dp_to_dig_port(intel_dp)->port));
1985 #else
1986 	I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1987 	     port_name(dp_to_dig_port(intel_dp)->port));
1988 #endif
1989 }
1990 
1991 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1992 {
1993 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1994 	struct drm_i915_private *dev_priv = dev->dev_private;
1995 	struct intel_digital_port *intel_dig_port =
1996 		dp_to_dig_port(intel_dp);
1997 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1998 	enum intel_display_power_domain power_domain;
1999 	u32 pp;
2000 	u32 pp_stat_reg, pp_ctrl_reg;
2001 
2002 	lockdep_assert_held(&dev_priv->pps_mutex);
2003 
2004 	WARN_ON(intel_dp->want_panel_vdd);
2005 
2006 	if (!edp_have_panel_vdd(intel_dp))
2007 		return;
2008 
2009 	DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
2010 		      port_name(intel_dig_port->port));
2011 
2012 	pp = ironlake_get_pp_control(intel_dp);
2013 	pp &= ~EDP_FORCE_VDD;
2014 
2015 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2016 	pp_stat_reg = _pp_stat_reg(intel_dp);
2017 
2018 	I915_WRITE(pp_ctrl_reg, pp);
2019 	POSTING_READ(pp_ctrl_reg);
2020 
2021 	/* Make sure sequencer is idle before allowing subsequent activity */
2022 	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2023 	I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2024 
2025 	if ((pp & POWER_TARGET_ON) == 0)
2026 		intel_dp->last_power_cycle = jiffies;
2027 
2028 	power_domain = intel_display_port_power_domain(intel_encoder);
2029 	intel_display_power_put(dev_priv, power_domain);
2030 }
2031 
2032 static void edp_panel_vdd_work(struct work_struct *__work)
2033 {
2034 	struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
2035 						 struct intel_dp, panel_vdd_work);
2036 
2037 	pps_lock(intel_dp);
2038 	if (!intel_dp->want_panel_vdd)
2039 		edp_panel_vdd_off_sync(intel_dp);
2040 	pps_unlock(intel_dp);
2041 }
2042 
2043 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2044 {
2045 	unsigned long delay;
2046 
2047 	/*
2048 	 * Queue the timer to fire a long time from now (relative to the power
2049 	 * down delay) to keep the panel power up across a sequence of
2050 	 * operations.
2051 	 */
2052 	delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2053 	schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2054 }
2055 
2056 /*
2057  * Must be paired with edp_panel_vdd_on().
2058  * Must hold pps_mutex around the whole on/off sequence.
2059  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2060  */
2061 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2062 {
2063 	struct drm_i915_private *dev_priv =
2064 		intel_dp_to_dev(intel_dp)->dev_private;
2065 
2066 	lockdep_assert_held(&dev_priv->pps_mutex);
2067 
2068 	if (!is_edp(intel_dp))
2069 		return;
2070 
2071 	I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2072 	     port_name(dp_to_dig_port(intel_dp)->port));
2073 
2074 	intel_dp->want_panel_vdd = false;
2075 
2076 	if (sync)
2077 		edp_panel_vdd_off_sync(intel_dp);
2078 	else
2079 		edp_panel_vdd_schedule_off(intel_dp);
2080 }
2081 
2082 static void edp_panel_on(struct intel_dp *intel_dp)
2083 {
2084 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2085 	struct drm_i915_private *dev_priv = dev->dev_private;
2086 	u32 pp;
2087 	u32 pp_ctrl_reg;
2088 
2089 	lockdep_assert_held(&dev_priv->pps_mutex);
2090 
2091 	if (!is_edp(intel_dp))
2092 		return;
2093 
2094 	DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2095 		      port_name(dp_to_dig_port(intel_dp)->port));
2096 
2097 	if (WARN(edp_have_panel_power(intel_dp),
2098 		 "eDP port %c panel power already on\n",
2099 		 port_name(dp_to_dig_port(intel_dp)->port)))
2100 		return;
2101 
2102 	wait_panel_power_cycle(intel_dp);
2103 
2104 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2105 	pp = ironlake_get_pp_control(intel_dp);
2106 	if (IS_GEN5(dev)) {
2107 		/* ILK workaround: disable reset around power sequence */
2108 		pp &= ~PANEL_POWER_RESET;
2109 		I915_WRITE(pp_ctrl_reg, pp);
2110 		POSTING_READ(pp_ctrl_reg);
2111 	}
2112 
2113 	pp |= POWER_TARGET_ON;
2114 	if (!IS_GEN5(dev))
2115 		pp |= PANEL_POWER_RESET;
2116 
2117 	I915_WRITE(pp_ctrl_reg, pp);
2118 	POSTING_READ(pp_ctrl_reg);
2119 
2120 	wait_panel_on(intel_dp);
2121 	intel_dp->last_power_on = jiffies;
2122 
2123 	if (IS_GEN5(dev)) {
2124 		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2125 		I915_WRITE(pp_ctrl_reg, pp);
2126 		POSTING_READ(pp_ctrl_reg);
2127 	}
2128 }
2129 
2130 void intel_edp_panel_on(struct intel_dp *intel_dp)
2131 {
2132 	if (!is_edp(intel_dp))
2133 		return;
2134 
2135 	pps_lock(intel_dp);
2136 	edp_panel_on(intel_dp);
2137 	pps_unlock(intel_dp);
2138 }
2139 
2140 
2141 static void edp_panel_off(struct intel_dp *intel_dp)
2142 {
2143 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2144 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
2145 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2146 	struct drm_i915_private *dev_priv = dev->dev_private;
2147 	enum intel_display_power_domain power_domain;
2148 	u32 pp;
2149 	u32 pp_ctrl_reg;
2150 
2151 	lockdep_assert_held(&dev_priv->pps_mutex);
2152 
2153 	if (!is_edp(intel_dp))
2154 		return;
2155 
2156 	DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2157 		      port_name(dp_to_dig_port(intel_dp)->port));
2158 
2159 	WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2160 	     port_name(dp_to_dig_port(intel_dp)->port));
2161 
2162 	pp = ironlake_get_pp_control(intel_dp);
2163 	/* We need to switch off panel power _and_ force vdd, for otherwise some
2164 	 * panels get very unhappy and cease to work. */
2165 	pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2166 		EDP_BLC_ENABLE);
2167 
2168 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2169 
2170 	intel_dp->want_panel_vdd = false;
2171 
2172 	I915_WRITE(pp_ctrl_reg, pp);
2173 	POSTING_READ(pp_ctrl_reg);
2174 
2175 	intel_dp->last_power_cycle = jiffies;
2176 	wait_panel_off(intel_dp);
2177 
2178 	/* We got a reference when we enabled the VDD. */
2179 	power_domain = intel_display_port_power_domain(intel_encoder);
2180 	intel_display_power_put(dev_priv, power_domain);
2181 }
2182 
2183 void intel_edp_panel_off(struct intel_dp *intel_dp)
2184 {
2185 	if (!is_edp(intel_dp))
2186 		return;
2187 
2188 	pps_lock(intel_dp);
2189 	edp_panel_off(intel_dp);
2190 	pps_unlock(intel_dp);
2191 }
2192 
2193 /* Enable backlight in the panel power control. */
2194 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2195 {
2196 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2197 	struct drm_device *dev = intel_dig_port->base.base.dev;
2198 	struct drm_i915_private *dev_priv = dev->dev_private;
2199 	u32 pp;
2200 	u32 pp_ctrl_reg;
2201 
2202 	/*
2203 	 * If we enable the backlight right away following a panel power
2204 	 * on, we may see slight flicker as the panel syncs with the eDP
2205 	 * link.  So delay a bit to make sure the image is solid before
2206 	 * allowing it to appear.
2207 	 */
2208 	wait_backlight_on(intel_dp);
2209 
2210 	pps_lock(intel_dp);
2211 
2212 	pp = ironlake_get_pp_control(intel_dp);
2213 	pp |= EDP_BLC_ENABLE;
2214 
2215 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2216 
2217 	I915_WRITE(pp_ctrl_reg, pp);
2218 	POSTING_READ(pp_ctrl_reg);
2219 
2220 	pps_unlock(intel_dp);
2221 }
2222 
2223 /* Enable backlight PWM and backlight PP control. */
2224 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2225 {
2226 	if (!is_edp(intel_dp))
2227 		return;
2228 
2229 	DRM_DEBUG_KMS("\n");
2230 
2231 	intel_panel_enable_backlight(intel_dp->attached_connector);
2232 	_intel_edp_backlight_on(intel_dp);
2233 }
2234 
2235 /* Disable backlight in the panel power control. */
2236 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2237 {
2238 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2239 	struct drm_i915_private *dev_priv = dev->dev_private;
2240 	u32 pp;
2241 	u32 pp_ctrl_reg;
2242 
2243 	if (!is_edp(intel_dp))
2244 		return;
2245 
2246 	pps_lock(intel_dp);
2247 
2248 	pp = ironlake_get_pp_control(intel_dp);
2249 	pp &= ~EDP_BLC_ENABLE;
2250 
2251 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2252 
2253 	I915_WRITE(pp_ctrl_reg, pp);
2254 	POSTING_READ(pp_ctrl_reg);
2255 
2256 	pps_unlock(intel_dp);
2257 
2258 	intel_dp->last_backlight_off = jiffies;
2259 	edp_wait_backlight_off(intel_dp);
2260 }
2261 
2262 /* Disable backlight PP control and backlight PWM. */
2263 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2264 {
2265 	if (!is_edp(intel_dp))
2266 		return;
2267 
2268 	DRM_DEBUG_KMS("\n");
2269 
2270 	_intel_edp_backlight_off(intel_dp);
2271 	intel_panel_disable_backlight(intel_dp->attached_connector);
2272 }
2273 
2274 /*
2275  * Hook for controlling the panel power control backlight through the bl_power
2276  * sysfs attribute. Take care to handle multiple calls.
2277  */
2278 static void intel_edp_backlight_power(struct intel_connector *connector,
2279 				      bool enable)
2280 {
2281 	struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2282 	bool is_enabled;
2283 
2284 	pps_lock(intel_dp);
2285 	is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2286 	pps_unlock(intel_dp);
2287 
2288 	if (is_enabled == enable)
2289 		return;
2290 
2291 	DRM_DEBUG_KMS("panel power control backlight %s\n",
2292 		      enable ? "enable" : "disable");
2293 
2294 	if (enable)
2295 		_intel_edp_backlight_on(intel_dp);
2296 	else
2297 		_intel_edp_backlight_off(intel_dp);
2298 }
2299 
2300 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2301 {
2302 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2303 	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2304 	struct drm_device *dev = crtc->dev;
2305 	struct drm_i915_private *dev_priv = dev->dev_private;
2306 	u32 dpa_ctl;
2307 
2308 	assert_pipe_disabled(dev_priv,
2309 			     to_intel_crtc(crtc)->pipe);
2310 
2311 	DRM_DEBUG_KMS("\n");
2312 	dpa_ctl = I915_READ(DP_A);
2313 	WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2314 	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2315 
2316 	/* We don't adjust intel_dp->DP while tearing down the link, to
2317 	 * facilitate link retraining (e.g. after hotplug). Hence clear all
2318 	 * enable bits here to ensure that we don't enable too much. */
2319 	intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2320 	intel_dp->DP |= DP_PLL_ENABLE;
2321 	I915_WRITE(DP_A, intel_dp->DP);
2322 	POSTING_READ(DP_A);
2323 	udelay(200);
2324 }
2325 
2326 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2327 {
2328 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2329 	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2330 	struct drm_device *dev = crtc->dev;
2331 	struct drm_i915_private *dev_priv = dev->dev_private;
2332 	u32 dpa_ctl;
2333 
2334 	assert_pipe_disabled(dev_priv,
2335 			     to_intel_crtc(crtc)->pipe);
2336 
2337 	dpa_ctl = I915_READ(DP_A);
2338 	WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2339 	     "dp pll off, should be on\n");
2340 	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2341 
2342 	/* We can't rely on the value tracked for the DP register in
2343 	 * intel_dp->DP because link_down must not change that (otherwise link
2344 	 * re-training will fail. */
2345 	dpa_ctl &= ~DP_PLL_ENABLE;
2346 	I915_WRITE(DP_A, dpa_ctl);
2347 	POSTING_READ(DP_A);
2348 	udelay(200);
2349 }
2350 
2351 /* If the sink supports it, try to set the power state appropriately */
2352 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2353 {
2354 	int ret, i;
2355 
2356 	/* Should have a valid DPCD by this point */
2357 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2358 		return;
2359 
2360 	if (mode != DRM_MODE_DPMS_ON) {
2361 		ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2362 					 DP_SET_POWER_D3);
2363 	} else {
2364 		/*
2365 		 * When turning on, we need to retry for 1ms to give the sink
2366 		 * time to wake up.
2367 		 */
2368 		for (i = 0; i < 3; i++) {
2369 			ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2370 						 DP_SET_POWER_D0);
2371 			if (ret == 1)
2372 				break;
2373 			msleep(1);
2374 		}
2375 	}
2376 
2377 	if (ret != 1)
2378 		DRM_DEBUG_KMS("failed to %s sink power state\n",
2379 			      mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2380 }
2381 
2382 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2383 				  enum i915_pipe *pipe)
2384 {
2385 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2386 	enum port port = dp_to_dig_port(intel_dp)->port;
2387 	struct drm_device *dev = encoder->base.dev;
2388 	struct drm_i915_private *dev_priv = dev->dev_private;
2389 	enum intel_display_power_domain power_domain;
2390 	u32 tmp;
2391 
2392 	power_domain = intel_display_port_power_domain(encoder);
2393 	if (!intel_display_power_is_enabled(dev_priv, power_domain))
2394 		return false;
2395 
2396 	tmp = I915_READ(intel_dp->output_reg);
2397 
2398 	if (!(tmp & DP_PORT_EN))
2399 		return false;
2400 
2401 	if (IS_GEN7(dev) && port == PORT_A) {
2402 		*pipe = PORT_TO_PIPE_CPT(tmp);
2403 	} else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2404 		enum i915_pipe p;
2405 
2406 		for_each_pipe(dev_priv, p) {
2407 			u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2408 			if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2409 				*pipe = p;
2410 				return true;
2411 			}
2412 		}
2413 
2414 		DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2415 			      intel_dp->output_reg);
2416 	} else if (IS_CHERRYVIEW(dev)) {
2417 		*pipe = DP_PORT_TO_PIPE_CHV(tmp);
2418 	} else {
2419 		*pipe = PORT_TO_PIPE(tmp);
2420 	}
2421 
2422 	return true;
2423 }
2424 
2425 static void intel_dp_get_config(struct intel_encoder *encoder,
2426 				struct intel_crtc_state *pipe_config)
2427 {
2428 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2429 	u32 tmp, flags = 0;
2430 	struct drm_device *dev = encoder->base.dev;
2431 	struct drm_i915_private *dev_priv = dev->dev_private;
2432 	enum port port = dp_to_dig_port(intel_dp)->port;
2433 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2434 	int dotclock;
2435 
2436 	tmp = I915_READ(intel_dp->output_reg);
2437 
2438 	pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2439 
2440 	if (HAS_PCH_CPT(dev) && port != PORT_A) {
2441 		tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2442 		if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2443 			flags |= DRM_MODE_FLAG_PHSYNC;
2444 		else
2445 			flags |= DRM_MODE_FLAG_NHSYNC;
2446 
2447 		if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2448 			flags |= DRM_MODE_FLAG_PVSYNC;
2449 		else
2450 			flags |= DRM_MODE_FLAG_NVSYNC;
2451 	} else {
2452 		if (tmp & DP_SYNC_HS_HIGH)
2453 			flags |= DRM_MODE_FLAG_PHSYNC;
2454 		else
2455 			flags |= DRM_MODE_FLAG_NHSYNC;
2456 
2457 		if (tmp & DP_SYNC_VS_HIGH)
2458 			flags |= DRM_MODE_FLAG_PVSYNC;
2459 		else
2460 			flags |= DRM_MODE_FLAG_NVSYNC;
2461 	}
2462 
2463 	pipe_config->base.adjusted_mode.flags |= flags;
2464 
2465 	if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2466 	    tmp & DP_COLOR_RANGE_16_235)
2467 		pipe_config->limited_color_range = true;
2468 
2469 	pipe_config->has_dp_encoder = true;
2470 
2471 	intel_dp_get_m_n(crtc, pipe_config);
2472 
2473 	if (port == PORT_A) {
2474 		if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2475 			pipe_config->port_clock = 162000;
2476 		else
2477 			pipe_config->port_clock = 270000;
2478 	}
2479 
2480 	dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2481 					    &pipe_config->dp_m_n);
2482 
2483 	if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2484 		ironlake_check_encoder_dotclock(pipe_config, dotclock);
2485 
2486 	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2487 
2488 	if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2489 	    pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2490 		/*
2491 		 * This is a big fat ugly hack.
2492 		 *
2493 		 * Some machines in UEFI boot mode provide us a VBT that has 18
2494 		 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2495 		 * unknown we fail to light up. Yet the same BIOS boots up with
2496 		 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2497 		 * max, not what it tells us to use.
2498 		 *
2499 		 * Note: This will still be broken if the eDP panel is not lit
2500 		 * up by the BIOS, and thus we can't get the mode at module
2501 		 * load.
2502 		 */
2503 		DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2504 			      pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2505 		dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2506 	}
2507 }
2508 
2509 static void intel_disable_dp(struct intel_encoder *encoder)
2510 {
2511 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2512 	struct drm_device *dev = encoder->base.dev;
2513 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2514 
2515 	if (crtc->config->has_audio)
2516 		intel_audio_codec_disable(encoder);
2517 
2518 	if (HAS_PSR(dev) && !HAS_DDI(dev))
2519 		intel_psr_disable(intel_dp);
2520 
2521 	/* Make sure the panel is off before trying to change the mode. But also
2522 	 * ensure that we have vdd while we switch off the panel. */
2523 	intel_edp_panel_vdd_on(intel_dp);
2524 	intel_edp_backlight_off(intel_dp);
2525 	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2526 	intel_edp_panel_off(intel_dp);
2527 
2528 	/* disable the port before the pipe on g4x */
2529 	if (INTEL_INFO(dev)->gen < 5)
2530 		intel_dp_link_down(intel_dp);
2531 }
2532 
2533 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2534 {
2535 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2536 	enum port port = dp_to_dig_port(intel_dp)->port;
2537 
2538 	intel_dp_link_down(intel_dp);
2539 	if (port == PORT_A)
2540 		ironlake_edp_pll_off(intel_dp);
2541 }
2542 
2543 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2544 {
2545 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2546 
2547 	intel_dp_link_down(intel_dp);
2548 }
2549 
2550 static void chv_post_disable_dp(struct intel_encoder *encoder)
2551 {
2552 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2553 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2554 	struct drm_device *dev = encoder->base.dev;
2555 	struct drm_i915_private *dev_priv = dev->dev_private;
2556 	struct intel_crtc *intel_crtc =
2557 		to_intel_crtc(encoder->base.crtc);
2558 	enum dpio_channel ch = vlv_dport_to_channel(dport);
2559 	enum i915_pipe pipe = intel_crtc->pipe;
2560 	u32 val;
2561 
2562 	intel_dp_link_down(intel_dp);
2563 
2564 	mutex_lock(&dev_priv->sb_lock);
2565 
2566 	/* Propagate soft reset to data lane reset */
2567 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2568 	val |= CHV_PCS_REQ_SOFTRESET_EN;
2569 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2570 
2571 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2572 	val |= CHV_PCS_REQ_SOFTRESET_EN;
2573 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2574 
2575 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2576 	val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2577 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2578 
2579 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2580 	val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2581 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2582 
2583 	mutex_unlock(&dev_priv->sb_lock);
2584 }
2585 
2586 static void
2587 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2588 			 uint32_t *DP,
2589 			 uint8_t dp_train_pat)
2590 {
2591 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2592 	struct drm_device *dev = intel_dig_port->base.base.dev;
2593 	struct drm_i915_private *dev_priv = dev->dev_private;
2594 	enum port port = intel_dig_port->port;
2595 
2596 	if (HAS_DDI(dev)) {
2597 		uint32_t temp = I915_READ(DP_TP_CTL(port));
2598 
2599 		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2600 			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2601 		else
2602 			temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2603 
2604 		temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2605 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2606 		case DP_TRAINING_PATTERN_DISABLE:
2607 			temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2608 
2609 			break;
2610 		case DP_TRAINING_PATTERN_1:
2611 			temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2612 			break;
2613 		case DP_TRAINING_PATTERN_2:
2614 			temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2615 			break;
2616 		case DP_TRAINING_PATTERN_3:
2617 			temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2618 			break;
2619 		}
2620 		I915_WRITE(DP_TP_CTL(port), temp);
2621 
2622 	} else if ((IS_GEN7(dev) && port == PORT_A) ||
2623 		   (HAS_PCH_CPT(dev) && port != PORT_A)) {
2624 		*DP &= ~DP_LINK_TRAIN_MASK_CPT;
2625 
2626 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2627 		case DP_TRAINING_PATTERN_DISABLE:
2628 			*DP |= DP_LINK_TRAIN_OFF_CPT;
2629 			break;
2630 		case DP_TRAINING_PATTERN_1:
2631 			*DP |= DP_LINK_TRAIN_PAT_1_CPT;
2632 			break;
2633 		case DP_TRAINING_PATTERN_2:
2634 			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2635 			break;
2636 		case DP_TRAINING_PATTERN_3:
2637 			DRM_ERROR("DP training pattern 3 not supported\n");
2638 			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2639 			break;
2640 		}
2641 
2642 	} else {
2643 		if (IS_CHERRYVIEW(dev))
2644 			*DP &= ~DP_LINK_TRAIN_MASK_CHV;
2645 		else
2646 			*DP &= ~DP_LINK_TRAIN_MASK;
2647 
2648 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2649 		case DP_TRAINING_PATTERN_DISABLE:
2650 			*DP |= DP_LINK_TRAIN_OFF;
2651 			break;
2652 		case DP_TRAINING_PATTERN_1:
2653 			*DP |= DP_LINK_TRAIN_PAT_1;
2654 			break;
2655 		case DP_TRAINING_PATTERN_2:
2656 			*DP |= DP_LINK_TRAIN_PAT_2;
2657 			break;
2658 		case DP_TRAINING_PATTERN_3:
2659 			if (IS_CHERRYVIEW(dev)) {
2660 				*DP |= DP_LINK_TRAIN_PAT_3_CHV;
2661 			} else {
2662 				DRM_ERROR("DP training pattern 3 not supported\n");
2663 				*DP |= DP_LINK_TRAIN_PAT_2;
2664 			}
2665 			break;
2666 		}
2667 	}
2668 }
2669 
2670 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2671 {
2672 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2673 	struct drm_i915_private *dev_priv = dev->dev_private;
2674 
2675 	/* enable with pattern 1 (as per spec) */
2676 	_intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2677 				 DP_TRAINING_PATTERN_1);
2678 
2679 	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2680 	POSTING_READ(intel_dp->output_reg);
2681 
2682 	/*
2683 	 * Magic for VLV/CHV. We _must_ first set up the register
2684 	 * without actually enabling the port, and then do another
2685 	 * write to enable the port. Otherwise link training will
2686 	 * fail when the power sequencer is freshly used for this port.
2687 	 */
2688 	intel_dp->DP |= DP_PORT_EN;
2689 
2690 	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2691 	POSTING_READ(intel_dp->output_reg);
2692 }
2693 
2694 static void intel_enable_dp(struct intel_encoder *encoder)
2695 {
2696 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2697 	struct drm_device *dev = encoder->base.dev;
2698 	struct drm_i915_private *dev_priv = dev->dev_private;
2699 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2700 	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2701 	unsigned int lane_mask = 0x0;
2702 
2703 	if (WARN_ON(dp_reg & DP_PORT_EN))
2704 		return;
2705 
2706 	pps_lock(intel_dp);
2707 
2708 	if (IS_VALLEYVIEW(dev))
2709 		vlv_init_panel_power_sequencer(intel_dp);
2710 
2711 	intel_dp_enable_port(intel_dp);
2712 
2713 	edp_panel_vdd_on(intel_dp);
2714 	edp_panel_on(intel_dp);
2715 	edp_panel_vdd_off(intel_dp, true);
2716 
2717 	pps_unlock(intel_dp);
2718 
2719 	if (IS_VALLEYVIEW(dev))
2720 		vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2721 				    lane_mask);
2722 
2723 	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2724 	intel_dp_start_link_train(intel_dp);
2725 	intel_dp_complete_link_train(intel_dp);
2726 	intel_dp_stop_link_train(intel_dp);
2727 
2728 	if (crtc->config->has_audio) {
2729 		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2730 				 pipe_name(crtc->pipe));
2731 		intel_audio_codec_enable(encoder);
2732 	}
2733 }
2734 
2735 static void g4x_enable_dp(struct intel_encoder *encoder)
2736 {
2737 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2738 
2739 	intel_enable_dp(encoder);
2740 	intel_edp_backlight_on(intel_dp);
2741 }
2742 
2743 static void vlv_enable_dp(struct intel_encoder *encoder)
2744 {
2745 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2746 
2747 	intel_edp_backlight_on(intel_dp);
2748 	intel_psr_enable(intel_dp);
2749 }
2750 
2751 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2752 {
2753 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2754 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2755 
2756 	intel_dp_prepare(encoder);
2757 
2758 	/* Only ilk+ has port A */
2759 	if (dport->port == PORT_A) {
2760 		ironlake_set_pll_cpu_edp(intel_dp);
2761 		ironlake_edp_pll_on(intel_dp);
2762 	}
2763 }
2764 
2765 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2766 {
2767 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2768 	struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2769 	enum i915_pipe pipe = intel_dp->pps_pipe;
2770 	int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2771 
2772 	edp_panel_vdd_off_sync(intel_dp);
2773 
2774 	/*
2775 	 * VLV seems to get confused when multiple power seqeuencers
2776 	 * have the same port selected (even if only one has power/vdd
2777 	 * enabled). The failure manifests as vlv_wait_port_ready() failing
2778 	 * CHV on the other hand doesn't seem to mind having the same port
2779 	 * selected in multiple power seqeuencers, but let's clear the
2780 	 * port select always when logically disconnecting a power sequencer
2781 	 * from a port.
2782 	 */
2783 	DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2784 		      pipe_name(pipe), port_name(intel_dig_port->port));
2785 	I915_WRITE(pp_on_reg, 0);
2786 	POSTING_READ(pp_on_reg);
2787 
2788 	intel_dp->pps_pipe = INVALID_PIPE;
2789 }
2790 
2791 static void vlv_steal_power_sequencer(struct drm_device *dev,
2792 				      enum i915_pipe pipe)
2793 {
2794 	struct drm_i915_private *dev_priv = dev->dev_private;
2795 	struct intel_encoder *encoder;
2796 
2797 	lockdep_assert_held(&dev_priv->pps_mutex);
2798 
2799 	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2800 		return;
2801 
2802 	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2803 			    base.head) {
2804 		struct intel_dp *intel_dp;
2805 		enum port port;
2806 
2807 		if (encoder->type != INTEL_OUTPUT_EDP)
2808 			continue;
2809 
2810 		intel_dp = enc_to_intel_dp(&encoder->base);
2811 		port = dp_to_dig_port(intel_dp)->port;
2812 
2813 		if (intel_dp->pps_pipe != pipe)
2814 			continue;
2815 
2816 		DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2817 			      pipe_name(pipe), port_name(port));
2818 
2819 		WARN(encoder->base.crtc,
2820 		     "stealing pipe %c power sequencer from active eDP port %c\n",
2821 		     pipe_name(pipe), port_name(port));
2822 
2823 		/* make sure vdd is off before we steal it */
2824 		vlv_detach_power_sequencer(intel_dp);
2825 	}
2826 }
2827 
2828 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2829 {
2830 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2831 	struct intel_encoder *encoder = &intel_dig_port->base;
2832 	struct drm_device *dev = encoder->base.dev;
2833 	struct drm_i915_private *dev_priv = dev->dev_private;
2834 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2835 
2836 	lockdep_assert_held(&dev_priv->pps_mutex);
2837 
2838 	if (!is_edp(intel_dp))
2839 		return;
2840 
2841 	if (intel_dp->pps_pipe == crtc->pipe)
2842 		return;
2843 
2844 	/*
2845 	 * If another power sequencer was being used on this
2846 	 * port previously make sure to turn off vdd there while
2847 	 * we still have control of it.
2848 	 */
2849 	if (intel_dp->pps_pipe != INVALID_PIPE)
2850 		vlv_detach_power_sequencer(intel_dp);
2851 
2852 	/*
2853 	 * We may be stealing the power
2854 	 * sequencer from another port.
2855 	 */
2856 	vlv_steal_power_sequencer(dev, crtc->pipe);
2857 
2858 	/* now it's all ours */
2859 	intel_dp->pps_pipe = crtc->pipe;
2860 
2861 	DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2862 		      pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2863 
2864 	/* init power sequencer on this pipe and port */
2865 	intel_dp_init_panel_power_sequencer(dev, intel_dp);
2866 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2867 }
2868 
2869 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2870 {
2871 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2872 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2873 	struct drm_device *dev = encoder->base.dev;
2874 	struct drm_i915_private *dev_priv = dev->dev_private;
2875 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2876 	enum dpio_channel port = vlv_dport_to_channel(dport);
2877 	int pipe = intel_crtc->pipe;
2878 	u32 val;
2879 
2880 	mutex_lock(&dev_priv->sb_lock);
2881 
2882 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2883 	val = 0;
2884 	if (pipe)
2885 		val |= (1<<21);
2886 	else
2887 		val &= ~(1<<21);
2888 	val |= 0x001000c4;
2889 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2890 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2891 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2892 
2893 	mutex_unlock(&dev_priv->sb_lock);
2894 
2895 	intel_enable_dp(encoder);
2896 }
2897 
2898 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2899 {
2900 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2901 	struct drm_device *dev = encoder->base.dev;
2902 	struct drm_i915_private *dev_priv = dev->dev_private;
2903 	struct intel_crtc *intel_crtc =
2904 		to_intel_crtc(encoder->base.crtc);
2905 	enum dpio_channel port = vlv_dport_to_channel(dport);
2906 	int pipe = intel_crtc->pipe;
2907 
2908 	intel_dp_prepare(encoder);
2909 
2910 	/* Program Tx lane resets to default */
2911 	mutex_lock(&dev_priv->sb_lock);
2912 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2913 			 DPIO_PCS_TX_LANE2_RESET |
2914 			 DPIO_PCS_TX_LANE1_RESET);
2915 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2916 			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2917 			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2918 			 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2919 				 DPIO_PCS_CLK_SOFT_RESET);
2920 
2921 	/* Fix up inter-pair skew failure */
2922 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2923 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2924 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2925 	mutex_unlock(&dev_priv->sb_lock);
2926 }
2927 
2928 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2929 {
2930 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2931 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2932 	struct drm_device *dev = encoder->base.dev;
2933 	struct drm_i915_private *dev_priv = dev->dev_private;
2934 	struct intel_crtc *intel_crtc =
2935 		to_intel_crtc(encoder->base.crtc);
2936 	enum dpio_channel ch = vlv_dport_to_channel(dport);
2937 	int pipe = intel_crtc->pipe;
2938 	int data, i, stagger;
2939 	u32 val;
2940 
2941 	mutex_lock(&dev_priv->sb_lock);
2942 
2943 	/* allow hardware to manage TX FIFO reset source */
2944 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2945 	val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2946 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2947 
2948 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2949 	val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2950 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2951 
2952 	/* Deassert soft data lane reset*/
2953 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2954 	val |= CHV_PCS_REQ_SOFTRESET_EN;
2955 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2956 
2957 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2958 	val |= CHV_PCS_REQ_SOFTRESET_EN;
2959 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2960 
2961 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2962 	val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2963 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2964 
2965 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2966 	val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2967 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2968 
2969 	/* Program Tx lane latency optimal setting*/
2970 	for (i = 0; i < 4; i++) {
2971 		/* Set the upar bit */
2972 		data = (i == 1) ? 0x0 : 0x1;
2973 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2974 				data << DPIO_UPAR_SHIFT);
2975 	}
2976 
2977 	/* Data lane stagger programming */
2978 	if (intel_crtc->config->port_clock > 270000)
2979 		stagger = 0x18;
2980 	else if (intel_crtc->config->port_clock > 135000)
2981 		stagger = 0xd;
2982 	else if (intel_crtc->config->port_clock > 67500)
2983 		stagger = 0x7;
2984 	else if (intel_crtc->config->port_clock > 33750)
2985 		stagger = 0x4;
2986 	else
2987 		stagger = 0x2;
2988 
2989 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2990 	val |= DPIO_TX2_STAGGER_MASK(0x1f);
2991 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2992 
2993 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2994 	val |= DPIO_TX2_STAGGER_MASK(0x1f);
2995 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2996 
2997 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2998 		       DPIO_LANESTAGGER_STRAP(stagger) |
2999 		       DPIO_LANESTAGGER_STRAP_OVRD |
3000 		       DPIO_TX1_STAGGER_MASK(0x1f) |
3001 		       DPIO_TX1_STAGGER_MULT(6) |
3002 		       DPIO_TX2_STAGGER_MULT(0));
3003 
3004 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
3005 		       DPIO_LANESTAGGER_STRAP(stagger) |
3006 		       DPIO_LANESTAGGER_STRAP_OVRD |
3007 		       DPIO_TX1_STAGGER_MASK(0x1f) |
3008 		       DPIO_TX1_STAGGER_MULT(7) |
3009 		       DPIO_TX2_STAGGER_MULT(5));
3010 
3011 	mutex_unlock(&dev_priv->sb_lock);
3012 
3013 	intel_enable_dp(encoder);
3014 }
3015 
3016 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
3017 {
3018 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
3019 	struct drm_device *dev = encoder->base.dev;
3020 	struct drm_i915_private *dev_priv = dev->dev_private;
3021 	struct intel_crtc *intel_crtc =
3022 		to_intel_crtc(encoder->base.crtc);
3023 	enum dpio_channel ch = vlv_dport_to_channel(dport);
3024 	enum i915_pipe pipe = intel_crtc->pipe;
3025 	u32 val;
3026 
3027 	intel_dp_prepare(encoder);
3028 
3029 	mutex_lock(&dev_priv->sb_lock);
3030 
3031 	/* program left/right clock distribution */
3032 	if (pipe != PIPE_B) {
3033 		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3034 		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3035 		if (ch == DPIO_CH0)
3036 			val |= CHV_BUFLEFTENA1_FORCE;
3037 		if (ch == DPIO_CH1)
3038 			val |= CHV_BUFRIGHTENA1_FORCE;
3039 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3040 	} else {
3041 		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3042 		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3043 		if (ch == DPIO_CH0)
3044 			val |= CHV_BUFLEFTENA2_FORCE;
3045 		if (ch == DPIO_CH1)
3046 			val |= CHV_BUFRIGHTENA2_FORCE;
3047 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3048 	}
3049 
3050 	/* program clock channel usage */
3051 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3052 	val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3053 	if (pipe != PIPE_B)
3054 		val &= ~CHV_PCS_USEDCLKCHANNEL;
3055 	else
3056 		val |= CHV_PCS_USEDCLKCHANNEL;
3057 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3058 
3059 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3060 	val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3061 	if (pipe != PIPE_B)
3062 		val &= ~CHV_PCS_USEDCLKCHANNEL;
3063 	else
3064 		val |= CHV_PCS_USEDCLKCHANNEL;
3065 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3066 
3067 	/*
3068 	 * This a a bit weird since generally CL
3069 	 * matches the pipe, but here we need to
3070 	 * pick the CL based on the port.
3071 	 */
3072 	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3073 	if (pipe != PIPE_B)
3074 		val &= ~CHV_CMN_USEDCLKCHANNEL;
3075 	else
3076 		val |= CHV_CMN_USEDCLKCHANNEL;
3077 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3078 
3079 	mutex_unlock(&dev_priv->sb_lock);
3080 }
3081 
3082 /*
3083  * Native read with retry for link status and receiver capability reads for
3084  * cases where the sink may still be asleep.
3085  *
3086  * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3087  * supposed to retry 3 times per the spec.
3088  */
3089 static ssize_t
3090 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3091 			void *buffer, size_t size)
3092 {
3093 	ssize_t ret;
3094 	int i;
3095 
3096 	/*
3097 	 * Sometime we just get the same incorrect byte repeated
3098 	 * over the entire buffer. Doing just one throw away read
3099 	 * initially seems to "solve" it.
3100 	 */
3101 	drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3102 
3103 	for (i = 0; i < 3; i++) {
3104 		ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3105 		if (ret == size)
3106 			return ret;
3107 		msleep(1);
3108 	}
3109 
3110 	return ret;
3111 }
3112 
3113 /*
3114  * Fetch AUX CH registers 0x202 - 0x207 which contain
3115  * link status information
3116  */
3117 static bool
3118 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3119 {
3120 	return intel_dp_dpcd_read_wake(&intel_dp->aux,
3121 				       DP_LANE0_1_STATUS,
3122 				       link_status,
3123 				       DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3124 }
3125 
3126 /* These are source-specific values. */
3127 static uint8_t
3128 intel_dp_voltage_max(struct intel_dp *intel_dp)
3129 {
3130 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3131 	struct drm_i915_private *dev_priv = dev->dev_private;
3132 	enum port port = dp_to_dig_port(intel_dp)->port;
3133 
3134 	if (IS_BROXTON(dev))
3135 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3136 	else if (INTEL_INFO(dev)->gen >= 9) {
3137 		if (dev_priv->edp_low_vswing && port == PORT_A)
3138 			return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3139 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3140 	} else if (IS_VALLEYVIEW(dev))
3141 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3142 	else if (IS_GEN7(dev) && port == PORT_A)
3143 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3144 	else if (HAS_PCH_CPT(dev) && port != PORT_A)
3145 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3146 	else
3147 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3148 }
3149 
3150 static uint8_t
3151 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3152 {
3153 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3154 	enum port port = dp_to_dig_port(intel_dp)->port;
3155 
3156 	if (INTEL_INFO(dev)->gen >= 9) {
3157 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3158 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3159 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3160 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3161 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3162 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3163 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3164 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3165 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3166 		default:
3167 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3168 		}
3169 	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3170 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3171 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3172 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3173 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3174 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3175 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3176 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3177 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3178 		default:
3179 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3180 		}
3181 	} else if (IS_VALLEYVIEW(dev)) {
3182 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3183 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3184 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3185 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3186 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3187 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3188 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3189 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3190 		default:
3191 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3192 		}
3193 	} else if (IS_GEN7(dev) && port == PORT_A) {
3194 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3195 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3196 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3197 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3198 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3199 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3200 		default:
3201 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3202 		}
3203 	} else {
3204 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3205 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3206 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3207 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3208 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3209 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3210 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3211 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3212 		default:
3213 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3214 		}
3215 	}
3216 }
3217 
3218 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3219 {
3220 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3221 	struct drm_i915_private *dev_priv = dev->dev_private;
3222 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3223 	struct intel_crtc *intel_crtc =
3224 		to_intel_crtc(dport->base.base.crtc);
3225 	unsigned long demph_reg_value, preemph_reg_value,
3226 		uniqtranscale_reg_value;
3227 	uint8_t train_set = intel_dp->train_set[0];
3228 	enum dpio_channel port = vlv_dport_to_channel(dport);
3229 	int pipe = intel_crtc->pipe;
3230 
3231 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3232 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3233 		preemph_reg_value = 0x0004000;
3234 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3235 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3236 			demph_reg_value = 0x2B405555;
3237 			uniqtranscale_reg_value = 0x552AB83A;
3238 			break;
3239 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3240 			demph_reg_value = 0x2B404040;
3241 			uniqtranscale_reg_value = 0x5548B83A;
3242 			break;
3243 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3244 			demph_reg_value = 0x2B245555;
3245 			uniqtranscale_reg_value = 0x5560B83A;
3246 			break;
3247 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3248 			demph_reg_value = 0x2B405555;
3249 			uniqtranscale_reg_value = 0x5598DA3A;
3250 			break;
3251 		default:
3252 			return 0;
3253 		}
3254 		break;
3255 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3256 		preemph_reg_value = 0x0002000;
3257 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3258 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3259 			demph_reg_value = 0x2B404040;
3260 			uniqtranscale_reg_value = 0x5552B83A;
3261 			break;
3262 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3263 			demph_reg_value = 0x2B404848;
3264 			uniqtranscale_reg_value = 0x5580B83A;
3265 			break;
3266 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3267 			demph_reg_value = 0x2B404040;
3268 			uniqtranscale_reg_value = 0x55ADDA3A;
3269 			break;
3270 		default:
3271 			return 0;
3272 		}
3273 		break;
3274 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3275 		preemph_reg_value = 0x0000000;
3276 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3277 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3278 			demph_reg_value = 0x2B305555;
3279 			uniqtranscale_reg_value = 0x5570B83A;
3280 			break;
3281 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3282 			demph_reg_value = 0x2B2B4040;
3283 			uniqtranscale_reg_value = 0x55ADDA3A;
3284 			break;
3285 		default:
3286 			return 0;
3287 		}
3288 		break;
3289 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3290 		preemph_reg_value = 0x0006000;
3291 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3292 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3293 			demph_reg_value = 0x1B405555;
3294 			uniqtranscale_reg_value = 0x55ADDA3A;
3295 			break;
3296 		default:
3297 			return 0;
3298 		}
3299 		break;
3300 	default:
3301 		return 0;
3302 	}
3303 
3304 	mutex_lock(&dev_priv->sb_lock);
3305 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3306 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3307 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3308 			 uniqtranscale_reg_value);
3309 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3310 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3311 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3312 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3313 	mutex_unlock(&dev_priv->sb_lock);
3314 
3315 	return 0;
3316 }
3317 
3318 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3319 {
3320 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3321 	struct drm_i915_private *dev_priv = dev->dev_private;
3322 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3323 	struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3324 	u32 deemph_reg_value, margin_reg_value, val;
3325 	uint8_t train_set = intel_dp->train_set[0];
3326 	enum dpio_channel ch = vlv_dport_to_channel(dport);
3327 	enum i915_pipe pipe = intel_crtc->pipe;
3328 	int i;
3329 
3330 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3331 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3332 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3333 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3334 			deemph_reg_value = 128;
3335 			margin_reg_value = 52;
3336 			break;
3337 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3338 			deemph_reg_value = 128;
3339 			margin_reg_value = 77;
3340 			break;
3341 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3342 			deemph_reg_value = 128;
3343 			margin_reg_value = 102;
3344 			break;
3345 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3346 			deemph_reg_value = 128;
3347 			margin_reg_value = 154;
3348 			/* FIXME extra to set for 1200 */
3349 			break;
3350 		default:
3351 			return 0;
3352 		}
3353 		break;
3354 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3355 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3356 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3357 			deemph_reg_value = 85;
3358 			margin_reg_value = 78;
3359 			break;
3360 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3361 			deemph_reg_value = 85;
3362 			margin_reg_value = 116;
3363 			break;
3364 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3365 			deemph_reg_value = 85;
3366 			margin_reg_value = 154;
3367 			break;
3368 		default:
3369 			return 0;
3370 		}
3371 		break;
3372 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3373 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3374 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3375 			deemph_reg_value = 64;
3376 			margin_reg_value = 104;
3377 			break;
3378 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3379 			deemph_reg_value = 64;
3380 			margin_reg_value = 154;
3381 			break;
3382 		default:
3383 			return 0;
3384 		}
3385 		break;
3386 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3387 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3388 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3389 			deemph_reg_value = 43;
3390 			margin_reg_value = 154;
3391 			break;
3392 		default:
3393 			return 0;
3394 		}
3395 		break;
3396 	default:
3397 		return 0;
3398 	}
3399 
3400 	mutex_lock(&dev_priv->sb_lock);
3401 
3402 	/* Clear calc init */
3403 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3404 	val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3405 	val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3406 	val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3407 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3408 
3409 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3410 	val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3411 	val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3412 	val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3413 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3414 
3415 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3416 	val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3417 	val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3418 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3419 
3420 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3421 	val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3422 	val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3423 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3424 
3425 	/* Program swing deemph */
3426 	for (i = 0; i < 4; i++) {
3427 		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3428 		val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3429 		val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3430 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3431 	}
3432 
3433 	/* Program swing margin */
3434 	for (i = 0; i < 4; i++) {
3435 		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3436 		val &= ~DPIO_SWING_MARGIN000_MASK;
3437 		val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3438 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3439 	}
3440 
3441 	/* Disable unique transition scale */
3442 	for (i = 0; i < 4; i++) {
3443 		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3444 		val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3445 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3446 	}
3447 
3448 	if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3449 			== DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3450 		((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3451 			== DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3452 
3453 		/*
3454 		 * The document said it needs to set bit 27 for ch0 and bit 26
3455 		 * for ch1. Might be a typo in the doc.
3456 		 * For now, for this unique transition scale selection, set bit
3457 		 * 27 for ch0 and ch1.
3458 		 */
3459 		for (i = 0; i < 4; i++) {
3460 			val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3461 			val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3462 			vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3463 		}
3464 
3465 		for (i = 0; i < 4; i++) {
3466 			val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3467 			val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3468 			val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3469 			vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3470 		}
3471 	}
3472 
3473 	/* Start swing calculation */
3474 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3475 	val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3476 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3477 
3478 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3479 	val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3480 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3481 
3482 	/* LRC Bypass */
3483 	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3484 	val |= DPIO_LRC_BYPASS;
3485 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3486 
3487 	mutex_unlock(&dev_priv->sb_lock);
3488 
3489 	return 0;
3490 }
3491 
3492 static void
3493 intel_get_adjust_train(struct intel_dp *intel_dp,
3494 		       const uint8_t link_status[DP_LINK_STATUS_SIZE])
3495 {
3496 	uint8_t v = 0;
3497 	uint8_t p = 0;
3498 	int lane;
3499 	uint8_t voltage_max;
3500 	uint8_t preemph_max;
3501 
3502 	for (lane = 0; lane < intel_dp->lane_count; lane++) {
3503 		uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3504 		uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3505 
3506 		if (this_v > v)
3507 			v = this_v;
3508 		if (this_p > p)
3509 			p = this_p;
3510 	}
3511 
3512 	voltage_max = intel_dp_voltage_max(intel_dp);
3513 	if (v >= voltage_max)
3514 		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3515 
3516 	preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3517 	if (p >= preemph_max)
3518 		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3519 
3520 	for (lane = 0; lane < 4; lane++)
3521 		intel_dp->train_set[lane] = v | p;
3522 }
3523 
3524 static uint32_t
3525 gen4_signal_levels(uint8_t train_set)
3526 {
3527 	uint32_t	signal_levels = 0;
3528 
3529 	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3530 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3531 	default:
3532 		signal_levels |= DP_VOLTAGE_0_4;
3533 		break;
3534 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3535 		signal_levels |= DP_VOLTAGE_0_6;
3536 		break;
3537 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3538 		signal_levels |= DP_VOLTAGE_0_8;
3539 		break;
3540 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3541 		signal_levels |= DP_VOLTAGE_1_2;
3542 		break;
3543 	}
3544 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3545 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3546 	default:
3547 		signal_levels |= DP_PRE_EMPHASIS_0;
3548 		break;
3549 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3550 		signal_levels |= DP_PRE_EMPHASIS_3_5;
3551 		break;
3552 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3553 		signal_levels |= DP_PRE_EMPHASIS_6;
3554 		break;
3555 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3556 		signal_levels |= DP_PRE_EMPHASIS_9_5;
3557 		break;
3558 	}
3559 	return signal_levels;
3560 }
3561 
3562 /* Gen6's DP voltage swing and pre-emphasis control */
3563 static uint32_t
3564 gen6_edp_signal_levels(uint8_t train_set)
3565 {
3566 	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3567 					 DP_TRAIN_PRE_EMPHASIS_MASK);
3568 	switch (signal_levels) {
3569 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3570 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3571 		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3572 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3573 		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3574 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3575 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3576 		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3577 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3578 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3579 		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3580 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3581 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3582 		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3583 	default:
3584 		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3585 			      "0x%x\n", signal_levels);
3586 		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3587 	}
3588 }
3589 
3590 /* Gen7's DP voltage swing and pre-emphasis control */
3591 static uint32_t
3592 gen7_edp_signal_levels(uint8_t train_set)
3593 {
3594 	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3595 					 DP_TRAIN_PRE_EMPHASIS_MASK);
3596 	switch (signal_levels) {
3597 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3598 		return EDP_LINK_TRAIN_400MV_0DB_IVB;
3599 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3600 		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3601 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3602 		return EDP_LINK_TRAIN_400MV_6DB_IVB;
3603 
3604 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3605 		return EDP_LINK_TRAIN_600MV_0DB_IVB;
3606 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3607 		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3608 
3609 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3610 		return EDP_LINK_TRAIN_800MV_0DB_IVB;
3611 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3612 		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3613 
3614 	default:
3615 		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3616 			      "0x%x\n", signal_levels);
3617 		return EDP_LINK_TRAIN_500MV_0DB_IVB;
3618 	}
3619 }
3620 
3621 /* Properly updates "DP" with the correct signal levels. */
3622 static void
3623 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3624 {
3625 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3626 	enum port port = intel_dig_port->port;
3627 	struct drm_device *dev = intel_dig_port->base.base.dev;
3628 	uint32_t signal_levels, mask = 0;
3629 	uint8_t train_set = intel_dp->train_set[0];
3630 
3631 	if (HAS_DDI(dev)) {
3632 		signal_levels = ddi_signal_levels(intel_dp);
3633 
3634 		if (IS_BROXTON(dev))
3635 			signal_levels = 0;
3636 		else
3637 			mask = DDI_BUF_EMP_MASK;
3638 	} else if (IS_CHERRYVIEW(dev)) {
3639 		signal_levels = chv_signal_levels(intel_dp);
3640 	} else if (IS_VALLEYVIEW(dev)) {
3641 		signal_levels = vlv_signal_levels(intel_dp);
3642 	} else if (IS_GEN7(dev) && port == PORT_A) {
3643 		signal_levels = gen7_edp_signal_levels(train_set);
3644 		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3645 	} else if (IS_GEN6(dev) && port == PORT_A) {
3646 		signal_levels = gen6_edp_signal_levels(train_set);
3647 		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3648 	} else {
3649 		signal_levels = gen4_signal_levels(train_set);
3650 		mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3651 	}
3652 
3653 	if (mask)
3654 		DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3655 
3656 	DRM_DEBUG_KMS("Using vswing level %d\n",
3657 		train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3658 	DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3659 		(train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3660 			DP_TRAIN_PRE_EMPHASIS_SHIFT);
3661 
3662 	*DP = (*DP & ~mask) | signal_levels;
3663 }
3664 
3665 static bool
3666 intel_dp_set_link_train(struct intel_dp *intel_dp,
3667 			uint32_t *DP,
3668 			uint8_t dp_train_pat)
3669 {
3670 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3671 	struct drm_device *dev = intel_dig_port->base.base.dev;
3672 	struct drm_i915_private *dev_priv = dev->dev_private;
3673 	uint8_t buf[sizeof(intel_dp->train_set) + 1];
3674 	int ret, len;
3675 
3676 	_intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3677 
3678 	I915_WRITE(intel_dp->output_reg, *DP);
3679 	POSTING_READ(intel_dp->output_reg);
3680 
3681 	buf[0] = dp_train_pat;
3682 	if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3683 	    DP_TRAINING_PATTERN_DISABLE) {
3684 		/* don't write DP_TRAINING_LANEx_SET on disable */
3685 		len = 1;
3686 	} else {
3687 		/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3688 		memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3689 		len = intel_dp->lane_count + 1;
3690 	}
3691 
3692 	ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3693 				buf, len);
3694 
3695 	return ret == len;
3696 }
3697 
3698 static bool
3699 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3700 			uint8_t dp_train_pat)
3701 {
3702 	if (!intel_dp->train_set_valid)
3703 		memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3704 	intel_dp_set_signal_levels(intel_dp, DP);
3705 	return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3706 }
3707 
3708 static bool
3709 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3710 			   const uint8_t link_status[DP_LINK_STATUS_SIZE])
3711 {
3712 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3713 	struct drm_device *dev = intel_dig_port->base.base.dev;
3714 	struct drm_i915_private *dev_priv = dev->dev_private;
3715 	int ret;
3716 
3717 	intel_get_adjust_train(intel_dp, link_status);
3718 	intel_dp_set_signal_levels(intel_dp, DP);
3719 
3720 	I915_WRITE(intel_dp->output_reg, *DP);
3721 	POSTING_READ(intel_dp->output_reg);
3722 
3723 	ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3724 				intel_dp->train_set, intel_dp->lane_count);
3725 
3726 	return ret == intel_dp->lane_count;
3727 }
3728 
3729 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3730 {
3731 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3732 	struct drm_device *dev = intel_dig_port->base.base.dev;
3733 	struct drm_i915_private *dev_priv = dev->dev_private;
3734 	enum port port = intel_dig_port->port;
3735 	uint32_t val;
3736 
3737 	if (!HAS_DDI(dev))
3738 		return;
3739 
3740 	val = I915_READ(DP_TP_CTL(port));
3741 	val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3742 	val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3743 	I915_WRITE(DP_TP_CTL(port), val);
3744 
3745 	/*
3746 	 * On PORT_A we can have only eDP in SST mode. There the only reason
3747 	 * we need to set idle transmission mode is to work around a HW issue
3748 	 * where we enable the pipe while not in idle link-training mode.
3749 	 * In this case there is requirement to wait for a minimum number of
3750 	 * idle patterns to be sent.
3751 	 */
3752 	if (port == PORT_A)
3753 		return;
3754 
3755 	if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3756 		     1))
3757 		DRM_ERROR("Timed out waiting for DP idle patterns\n");
3758 }
3759 
3760 /* Enable corresponding port and start training pattern 1 */
3761 void
3762 intel_dp_start_link_train(struct intel_dp *intel_dp)
3763 {
3764 	struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3765 	struct drm_device *dev = encoder->dev;
3766 	int i;
3767 	uint8_t voltage;
3768 	int voltage_tries, loop_tries;
3769 	uint32_t DP = intel_dp->DP;
3770 	uint8_t link_config[2];
3771 
3772 	if (HAS_DDI(dev))
3773 		intel_ddi_prepare_link_retrain(encoder);
3774 
3775 	/* Write the link configuration data */
3776 	link_config[0] = intel_dp->link_bw;
3777 	link_config[1] = intel_dp->lane_count;
3778 	if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3779 		link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3780 	drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3781 	if (intel_dp->num_sink_rates)
3782 		drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3783 				&intel_dp->rate_select, 1);
3784 
3785 	link_config[0] = 0;
3786 	link_config[1] = DP_SET_ANSI_8B10B;
3787 	drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3788 
3789 	DP |= DP_PORT_EN;
3790 
3791 	/* clock recovery */
3792 	if (!intel_dp_reset_link_train(intel_dp, &DP,
3793 				       DP_TRAINING_PATTERN_1 |
3794 				       DP_LINK_SCRAMBLING_DISABLE)) {
3795 		DRM_ERROR("failed to enable link training\n");
3796 		return;
3797 	}
3798 
3799 	voltage = 0xff;
3800 	voltage_tries = 0;
3801 	loop_tries = 0;
3802 	for (;;) {
3803 		uint8_t link_status[DP_LINK_STATUS_SIZE];
3804 
3805 		drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3806 		if (!intel_dp_get_link_status(intel_dp, link_status)) {
3807 			DRM_ERROR("failed to get link status\n");
3808 			break;
3809 		}
3810 
3811 		if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3812 			DRM_DEBUG_KMS("clock recovery OK\n");
3813 			break;
3814 		}
3815 
3816 		/*
3817 		 * if we used previously trained voltage and pre-emphasis values
3818 		 * and we don't get clock recovery, reset link training values
3819 		 */
3820 		if (intel_dp->train_set_valid) {
3821 			DRM_DEBUG_KMS("clock recovery not ok, reset");
3822 			/* clear the flag as we are not reusing train set */
3823 			intel_dp->train_set_valid = false;
3824 			if (!intel_dp_reset_link_train(intel_dp, &DP,
3825 						       DP_TRAINING_PATTERN_1 |
3826 						       DP_LINK_SCRAMBLING_DISABLE)) {
3827 				DRM_ERROR("failed to enable link training\n");
3828 				return;
3829 			}
3830 			continue;
3831 		}
3832 
3833 		/* Check to see if we've tried the max voltage */
3834 		for (i = 0; i < intel_dp->lane_count; i++)
3835 			if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3836 				break;
3837 		if (i == intel_dp->lane_count) {
3838 			++loop_tries;
3839 			if (loop_tries == 5) {
3840 				DRM_ERROR("too many full retries, give up\n");
3841 				break;
3842 			}
3843 			intel_dp_reset_link_train(intel_dp, &DP,
3844 						  DP_TRAINING_PATTERN_1 |
3845 						  DP_LINK_SCRAMBLING_DISABLE);
3846 			voltage_tries = 0;
3847 			continue;
3848 		}
3849 
3850 		/* Check to see if we've tried the same voltage 5 times */
3851 		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3852 			++voltage_tries;
3853 			if (voltage_tries == 5) {
3854 				DRM_ERROR("too many voltage retries, give up\n");
3855 				break;
3856 			}
3857 		} else
3858 			voltage_tries = 0;
3859 		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3860 
3861 		/* Update training set as requested by target */
3862 		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3863 			DRM_ERROR("failed to update link training\n");
3864 			break;
3865 		}
3866 	}
3867 
3868 	intel_dp->DP = DP;
3869 }
3870 
3871 void
3872 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3873 {
3874 	bool channel_eq = false;
3875 	int tries, cr_tries;
3876 	uint32_t DP = intel_dp->DP;
3877 	uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3878 
3879 	/* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3880 	if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3881 		training_pattern = DP_TRAINING_PATTERN_3;
3882 
3883 	/* channel equalization */
3884 	if (!intel_dp_set_link_train(intel_dp, &DP,
3885 				     training_pattern |
3886 				     DP_LINK_SCRAMBLING_DISABLE)) {
3887 		DRM_ERROR("failed to start channel equalization\n");
3888 		return;
3889 	}
3890 
3891 	tries = 0;
3892 	cr_tries = 0;
3893 	channel_eq = false;
3894 	for (;;) {
3895 		uint8_t link_status[DP_LINK_STATUS_SIZE];
3896 
3897 		if (cr_tries > 5) {
3898 			DRM_ERROR("failed to train DP, aborting\n");
3899 			break;
3900 		}
3901 
3902 		drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3903 		if (!intel_dp_get_link_status(intel_dp, link_status)) {
3904 			DRM_ERROR("failed to get link status\n");
3905 			break;
3906 		}
3907 
3908 		/* Make sure clock is still ok */
3909 		if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3910 			intel_dp->train_set_valid = false;
3911 			intel_dp_start_link_train(intel_dp);
3912 			intel_dp_set_link_train(intel_dp, &DP,
3913 						training_pattern |
3914 						DP_LINK_SCRAMBLING_DISABLE);
3915 			cr_tries++;
3916 			continue;
3917 		}
3918 
3919 		if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3920 			channel_eq = true;
3921 			break;
3922 		}
3923 
3924 		/* Try 5 times, then try clock recovery if that fails */
3925 		if (tries > 5) {
3926 			intel_dp->train_set_valid = false;
3927 			intel_dp_start_link_train(intel_dp);
3928 			intel_dp_set_link_train(intel_dp, &DP,
3929 						training_pattern |
3930 						DP_LINK_SCRAMBLING_DISABLE);
3931 			tries = 0;
3932 			cr_tries++;
3933 			continue;
3934 		}
3935 
3936 		/* Update training set as requested by target */
3937 		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3938 			DRM_ERROR("failed to update link training\n");
3939 			break;
3940 		}
3941 		++tries;
3942 	}
3943 
3944 	intel_dp_set_idle_link_train(intel_dp);
3945 
3946 	intel_dp->DP = DP;
3947 
3948 	if (channel_eq) {
3949 		intel_dp->train_set_valid = true;
3950 		DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3951 	}
3952 }
3953 
3954 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3955 {
3956 	intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3957 				DP_TRAINING_PATTERN_DISABLE);
3958 }
3959 
3960 static void
3961 intel_dp_link_down(struct intel_dp *intel_dp)
3962 {
3963 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3964 	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3965 	enum port port = intel_dig_port->port;
3966 	struct drm_device *dev = intel_dig_port->base.base.dev;
3967 	struct drm_i915_private *dev_priv = dev->dev_private;
3968 	uint32_t DP = intel_dp->DP;
3969 
3970 	if (WARN_ON(HAS_DDI(dev)))
3971 		return;
3972 
3973 	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3974 		return;
3975 
3976 	DRM_DEBUG_KMS("\n");
3977 
3978 	if ((IS_GEN7(dev) && port == PORT_A) ||
3979 	    (HAS_PCH_CPT(dev) && port != PORT_A)) {
3980 		DP &= ~DP_LINK_TRAIN_MASK_CPT;
3981 		DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3982 	} else {
3983 		if (IS_CHERRYVIEW(dev))
3984 			DP &= ~DP_LINK_TRAIN_MASK_CHV;
3985 		else
3986 			DP &= ~DP_LINK_TRAIN_MASK;
3987 		DP |= DP_LINK_TRAIN_PAT_IDLE;
3988 	}
3989 	I915_WRITE(intel_dp->output_reg, DP);
3990 	POSTING_READ(intel_dp->output_reg);
3991 
3992 	DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3993 	I915_WRITE(intel_dp->output_reg, DP);
3994 	POSTING_READ(intel_dp->output_reg);
3995 
3996 	/*
3997 	 * HW workaround for IBX, we need to move the port
3998 	 * to transcoder A after disabling it to allow the
3999 	 * matching HDMI port to be enabled on transcoder A.
4000 	 */
4001 	if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
4002 		/* always enable with pattern 1 (as per spec) */
4003 		DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
4004 		DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
4005 		I915_WRITE(intel_dp->output_reg, DP);
4006 		POSTING_READ(intel_dp->output_reg);
4007 
4008 		DP &= ~DP_PORT_EN;
4009 		I915_WRITE(intel_dp->output_reg, DP);
4010 		POSTING_READ(intel_dp->output_reg);
4011 	}
4012 
4013 	msleep(intel_dp->panel_power_down_delay);
4014 }
4015 
4016 static bool
4017 intel_dp_get_dpcd(struct intel_dp *intel_dp)
4018 {
4019 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4020 	struct drm_device *dev = dig_port->base.base.dev;
4021 	struct drm_i915_private *dev_priv = dev->dev_private;
4022 	uint8_t rev;
4023 
4024 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
4025 				    sizeof(intel_dp->dpcd)) < 0)
4026 		return false; /* aux transfer failed */
4027 
4028 #ifdef __DragonFly__
4029 	char dpcd_hex_dump[DP_RECEIVER_CAP_SIZE * 3];
4030 	DRM_DEBUG_KMS("DPCD: %s\n", hexncpy(intel_dp->dpcd, sizeof(intel_dp->dpcd),
4031 		      dpcd_hex_dump, sizeof(dpcd_hex_dump), " "));
4032 #else
4033 	DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
4034 #endif
4035 
4036 	if (intel_dp->dpcd[DP_DPCD_REV] == 0)
4037 		return false; /* DPCD not present */
4038 
4039 	/* Check if the panel supports PSR */
4040 	memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
4041 	if (is_edp(intel_dp)) {
4042 		intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
4043 					intel_dp->psr_dpcd,
4044 					sizeof(intel_dp->psr_dpcd));
4045 		if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
4046 			dev_priv->psr.sink_support = true;
4047 			DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
4048 		}
4049 
4050 		if (INTEL_INFO(dev)->gen >= 9 &&
4051 			(intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
4052 			uint8_t frame_sync_cap;
4053 
4054 			dev_priv->psr.sink_support = true;
4055 			intel_dp_dpcd_read_wake(&intel_dp->aux,
4056 					DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
4057 					&frame_sync_cap, 1);
4058 			dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
4059 			/* PSR2 needs frame sync as well */
4060 			dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
4061 			DRM_DEBUG_KMS("PSR2 %s on sink",
4062 				dev_priv->psr.psr2_support ? "supported" : "not supported");
4063 		}
4064 	}
4065 
4066 	/* Training Pattern 3 support, Intel platforms that support HBR2 alone
4067 	 * have support for TP3 hence that check is used along with dpcd check
4068 	 * to ensure TP3 can be enabled.
4069 	 * SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
4070 	 * supported but still not enabled.
4071 	 */
4072 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
4073 	    intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
4074 	    intel_dp_source_supports_hbr2(dev)) {
4075 		intel_dp->use_tps3 = true;
4076 		DRM_DEBUG_KMS("Displayport TPS3 supported\n");
4077 	} else
4078 		intel_dp->use_tps3 = false;
4079 
4080 	/* Intermediate frequency support */
4081 	if (is_edp(intel_dp) &&
4082 	    (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] &	DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
4083 	    (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
4084 	    (rev >= 0x03)) { /* eDp v1.4 or higher */
4085 		__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4086 		int i;
4087 
4088 		intel_dp_dpcd_read_wake(&intel_dp->aux,
4089 				DP_SUPPORTED_LINK_RATES,
4090 				sink_rates,
4091 				sizeof(sink_rates));
4092 
4093 		for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4094 			int val = le16_to_cpu(sink_rates[i]);
4095 
4096 			if (val == 0)
4097 				break;
4098 
4099 			/* Value read is in kHz while drm clock is saved in deca-kHz */
4100 			intel_dp->sink_rates[i] = (val * 200) / 10;
4101 		}
4102 		intel_dp->num_sink_rates = i;
4103 	}
4104 
4105 	intel_dp_print_rates(intel_dp);
4106 
4107 	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4108 	      DP_DWN_STRM_PORT_PRESENT))
4109 		return true; /* native DP sink */
4110 
4111 	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4112 		return true; /* no per-port downstream info */
4113 
4114 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4115 				    intel_dp->downstream_ports,
4116 				    DP_MAX_DOWNSTREAM_PORTS) < 0)
4117 		return false; /* downstream port status fetch failed */
4118 
4119 	return true;
4120 }
4121 
4122 static void
4123 intel_dp_probe_oui(struct intel_dp *intel_dp)
4124 {
4125 	u8 buf[3];
4126 
4127 	if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4128 		return;
4129 
4130 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
4131 		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4132 			      buf[0], buf[1], buf[2]);
4133 
4134 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
4135 		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4136 			      buf[0], buf[1], buf[2]);
4137 }
4138 
4139 static bool
4140 intel_dp_probe_mst(struct intel_dp *intel_dp)
4141 {
4142 	u8 buf[1];
4143 
4144 	if (!intel_dp->can_mst)
4145 		return false;
4146 
4147 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4148 		return false;
4149 
4150 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4151 		if (buf[0] & DP_MST_CAP) {
4152 			DRM_DEBUG_KMS("Sink is MST capable\n");
4153 			intel_dp->is_mst = true;
4154 		} else {
4155 			DRM_DEBUG_KMS("Sink is not MST capable\n");
4156 			intel_dp->is_mst = false;
4157 		}
4158 	}
4159 
4160 #if 0
4161 	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4162 	return intel_dp->is_mst;
4163 #else
4164 	return false;
4165 #endif
4166 }
4167 
4168 static void intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
4169 {
4170 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4171 	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4172 	u8 buf;
4173 
4174 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4175 		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4176 		return;
4177 	}
4178 
4179 	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4180 			       buf & ~DP_TEST_SINK_START) < 0)
4181 		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4182 
4183 	hsw_enable_ips(intel_crtc);
4184 }
4185 
4186 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4187 {
4188 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4189 	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4190 	u8 buf;
4191 
4192 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4193 		return -EIO;
4194 
4195 	if (!(buf & DP_TEST_CRC_SUPPORTED))
4196 		return -ENOTTY;
4197 
4198 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4199 		return -EIO;
4200 
4201 	hsw_disable_ips(intel_crtc);
4202 
4203 	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4204 			       buf | DP_TEST_SINK_START) < 0) {
4205 		hsw_enable_ips(intel_crtc);
4206 		return -EIO;
4207 	}
4208 
4209 	return 0;
4210 }
4211 
4212 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4213 {
4214 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4215 	struct drm_device *dev = dig_port->base.base.dev;
4216 	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4217 	u8 buf;
4218 	int test_crc_count;
4219 	int attempts = 6;
4220 	int ret;
4221 
4222 	ret = intel_dp_sink_crc_start(intel_dp);
4223 	if (ret)
4224 		return ret;
4225 
4226 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4227 		ret = -EIO;
4228 		goto stop;
4229 	}
4230 
4231 	test_crc_count = buf & DP_TEST_COUNT_MASK;
4232 
4233 	do {
4234 		if (drm_dp_dpcd_readb(&intel_dp->aux,
4235 				      DP_TEST_SINK_MISC, &buf) < 0) {
4236 			ret = -EIO;
4237 			goto stop;
4238 		}
4239 		intel_wait_for_vblank(dev, intel_crtc->pipe);
4240 	} while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
4241 
4242 	if (attempts == 0) {
4243 		DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
4244 		ret = -ETIMEDOUT;
4245 		goto stop;
4246 	}
4247 
4248 	if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
4249 		ret = -EIO;
4250 stop:
4251 	intel_dp_sink_crc_stop(intel_dp);
4252 	return ret;
4253 }
4254 
4255 static bool
4256 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4257 {
4258 	return intel_dp_dpcd_read_wake(&intel_dp->aux,
4259 				       DP_DEVICE_SERVICE_IRQ_VECTOR,
4260 				       sink_irq_vector, 1) == 1;
4261 }
4262 
4263 #if 0
4264 static bool
4265 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4266 {
4267 	int ret;
4268 
4269 	ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4270 					     DP_SINK_COUNT_ESI,
4271 					     sink_irq_vector, 14);
4272 	if (ret != 14)
4273 		return false;
4274 
4275 	return true;
4276 }
4277 #endif
4278 
4279 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4280 {
4281 	uint8_t test_result = DP_TEST_ACK;
4282 	return test_result;
4283 }
4284 
4285 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4286 {
4287 	uint8_t test_result = DP_TEST_NAK;
4288 	return test_result;
4289 }
4290 
4291 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4292 {
4293 	uint8_t test_result = DP_TEST_NAK;
4294 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4295 	struct drm_connector *connector = &intel_connector->base;
4296 
4297 	if (intel_connector->detect_edid == NULL ||
4298 	    connector->edid_corrupt ||
4299 	    intel_dp->aux.i2c_defer_count > 6) {
4300 		/* Check EDID read for NACKs, DEFERs and corruption
4301 		 * (DP CTS 1.2 Core r1.1)
4302 		 *    4.2.2.4 : Failed EDID read, I2C_NAK
4303 		 *    4.2.2.5 : Failed EDID read, I2C_DEFER
4304 		 *    4.2.2.6 : EDID corruption detected
4305 		 * Use failsafe mode for all cases
4306 		 */
4307 		if (intel_dp->aux.i2c_nack_count > 0 ||
4308 			intel_dp->aux.i2c_defer_count > 0)
4309 			DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4310 				      intel_dp->aux.i2c_nack_count,
4311 				      intel_dp->aux.i2c_defer_count);
4312 		intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4313 	} else {
4314 		struct edid *block = intel_connector->detect_edid;
4315 
4316 		/* We have to write the checksum
4317 		 * of the last block read
4318 		 */
4319 		block += intel_connector->detect_edid->extensions;
4320 
4321 		if (!drm_dp_dpcd_write(&intel_dp->aux,
4322 					DP_TEST_EDID_CHECKSUM,
4323 					&block->checksum,
4324 					1))
4325 			DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4326 
4327 		test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4328 		intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4329 	}
4330 
4331 	/* Set test active flag here so userspace doesn't interrupt things */
4332 	intel_dp->compliance_test_active = 1;
4333 
4334 	return test_result;
4335 }
4336 
4337 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4338 {
4339 	uint8_t test_result = DP_TEST_NAK;
4340 	return test_result;
4341 }
4342 
4343 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4344 {
4345 	uint8_t response = DP_TEST_NAK;
4346 	uint8_t rxdata = 0;
4347 	int status = 0;
4348 
4349 	intel_dp->compliance_test_active = 0;
4350 	intel_dp->compliance_test_type = 0;
4351 	intel_dp->compliance_test_data = 0;
4352 
4353 	intel_dp->aux.i2c_nack_count = 0;
4354 	intel_dp->aux.i2c_defer_count = 0;
4355 
4356 	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4357 	if (status <= 0) {
4358 		DRM_DEBUG_KMS("Could not read test request from sink\n");
4359 		goto update_status;
4360 	}
4361 
4362 	switch (rxdata) {
4363 	case DP_TEST_LINK_TRAINING:
4364 		DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4365 		intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4366 		response = intel_dp_autotest_link_training(intel_dp);
4367 		break;
4368 	case DP_TEST_LINK_VIDEO_PATTERN:
4369 		DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4370 		intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4371 		response = intel_dp_autotest_video_pattern(intel_dp);
4372 		break;
4373 	case DP_TEST_LINK_EDID_READ:
4374 		DRM_DEBUG_KMS("EDID test requested\n");
4375 		intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4376 		response = intel_dp_autotest_edid(intel_dp);
4377 		break;
4378 	case DP_TEST_LINK_PHY_TEST_PATTERN:
4379 		DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4380 		intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4381 		response = intel_dp_autotest_phy_pattern(intel_dp);
4382 		break;
4383 	default:
4384 		DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4385 		break;
4386 	}
4387 
4388 update_status:
4389 	status = drm_dp_dpcd_write(&intel_dp->aux,
4390 				   DP_TEST_RESPONSE,
4391 				   &response, 1);
4392 	if (status <= 0)
4393 		DRM_DEBUG_KMS("Could not write test response to sink\n");
4394 }
4395 
4396 #if 0
4397 static int
4398 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4399 {
4400 	bool bret;
4401 
4402 	if (intel_dp->is_mst) {
4403 		u8 esi[16] = { 0 };
4404 		int ret = 0;
4405 		int retry;
4406 		bool handled;
4407 		bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4408 go_again:
4409 		if (bret == true) {
4410 
4411 			/* check link status - esi[10] = 0x200c */
4412 			if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4413 				DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4414 				intel_dp_start_link_train(intel_dp);
4415 				intel_dp_complete_link_train(intel_dp);
4416 				intel_dp_stop_link_train(intel_dp);
4417 			}
4418 
4419 			DRM_DEBUG_KMS("got esi %3ph\n", esi);
4420 			ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4421 
4422 			if (handled) {
4423 				for (retry = 0; retry < 3; retry++) {
4424 					int wret;
4425 					wret = drm_dp_dpcd_write(&intel_dp->aux,
4426 								 DP_SINK_COUNT_ESI+1,
4427 								 &esi[1], 3);
4428 					if (wret == 3) {
4429 						break;
4430 					}
4431 				}
4432 
4433 				bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4434 				if (bret == true) {
4435 					DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4436 					goto go_again;
4437 				}
4438 			} else
4439 				ret = 0;
4440 
4441 			return ret;
4442 		} else {
4443 			struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4444 			DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4445 			intel_dp->is_mst = false;
4446 			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4447 			/* send a hotplug event */
4448 			drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4449 		}
4450 	}
4451 	return -EINVAL;
4452 }
4453 #endif
4454 
4455 /*
4456  * According to DP spec
4457  * 5.1.2:
4458  *  1. Read DPCD
4459  *  2. Configure link according to Receiver Capabilities
4460  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4461  *  4. Check link status on receipt of hot-plug interrupt
4462  */
4463 static void
4464 intel_dp_check_link_status(struct intel_dp *intel_dp)
4465 {
4466 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4467 	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4468 	u8 sink_irq_vector;
4469 	u8 link_status[DP_LINK_STATUS_SIZE];
4470 
4471 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4472 
4473 	if (!intel_encoder->base.crtc)
4474 		return;
4475 
4476 	if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4477 		return;
4478 
4479 	/* Try to read receiver status if the link appears to be up */
4480 	if (!intel_dp_get_link_status(intel_dp, link_status)) {
4481 		return;
4482 	}
4483 
4484 	/* Now read the DPCD to see if it's actually running */
4485 	if (!intel_dp_get_dpcd(intel_dp)) {
4486 		return;
4487 	}
4488 
4489 	/* Try to read the source of the interrupt */
4490 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4491 	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4492 		/* Clear interrupt source */
4493 		drm_dp_dpcd_writeb(&intel_dp->aux,
4494 				   DP_DEVICE_SERVICE_IRQ_VECTOR,
4495 				   sink_irq_vector);
4496 
4497 		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4498 			DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4499 		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4500 			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4501 	}
4502 
4503 	if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4504 		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4505 			      intel_encoder->base.name);
4506 		intel_dp_start_link_train(intel_dp);
4507 		intel_dp_complete_link_train(intel_dp);
4508 		intel_dp_stop_link_train(intel_dp);
4509 	}
4510 }
4511 
4512 /* XXX this is probably wrong for multiple downstream ports */
4513 static enum drm_connector_status
4514 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4515 {
4516 	uint8_t *dpcd = intel_dp->dpcd;
4517 	uint8_t type;
4518 
4519 	if (!intel_dp_get_dpcd(intel_dp))
4520 		return connector_status_disconnected;
4521 
4522 	/* if there's no downstream port, we're done */
4523 	if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4524 		return connector_status_connected;
4525 
4526 	/* If we're HPD-aware, SINK_COUNT changes dynamically */
4527 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4528 	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4529 		uint8_t reg;
4530 
4531 		if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4532 					    &reg, 1) < 0)
4533 			return connector_status_unknown;
4534 
4535 		return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4536 					      : connector_status_disconnected;
4537 	}
4538 
4539 	/* If no HPD, poke DDC gently */
4540 	if (drm_probe_ddc(intel_dp->aux.ddc))
4541 		return connector_status_connected;
4542 
4543 	/* Well we tried, say unknown for unreliable port types */
4544 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4545 		type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4546 		if (type == DP_DS_PORT_TYPE_VGA ||
4547 		    type == DP_DS_PORT_TYPE_NON_EDID)
4548 			return connector_status_unknown;
4549 	} else {
4550 		type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4551 			DP_DWN_STRM_PORT_TYPE_MASK;
4552 		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4553 		    type == DP_DWN_STRM_PORT_TYPE_OTHER)
4554 			return connector_status_unknown;
4555 	}
4556 
4557 	/* Anything else is out of spec, warn and ignore */
4558 	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4559 	return connector_status_disconnected;
4560 }
4561 
4562 static enum drm_connector_status
4563 edp_detect(struct intel_dp *intel_dp)
4564 {
4565 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4566 	enum drm_connector_status status;
4567 
4568 	status = intel_panel_detect(dev);
4569 	if (status == connector_status_unknown)
4570 		status = connector_status_connected;
4571 
4572 	return status;
4573 }
4574 
4575 static enum drm_connector_status
4576 ironlake_dp_detect(struct intel_dp *intel_dp)
4577 {
4578 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4579 	struct drm_i915_private *dev_priv = dev->dev_private;
4580 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4581 
4582 	if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4583 		return connector_status_disconnected;
4584 
4585 	return intel_dp_detect_dpcd(intel_dp);
4586 }
4587 
4588 static int g4x_digital_port_connected(struct drm_device *dev,
4589 				       struct intel_digital_port *intel_dig_port)
4590 {
4591 	struct drm_i915_private *dev_priv = dev->dev_private;
4592 	uint32_t bit;
4593 
4594 	if (IS_VALLEYVIEW(dev)) {
4595 		switch (intel_dig_port->port) {
4596 		case PORT_B:
4597 			bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4598 			break;
4599 		case PORT_C:
4600 			bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4601 			break;
4602 		case PORT_D:
4603 			bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4604 			break;
4605 		default:
4606 			return -EINVAL;
4607 		}
4608 	} else {
4609 		switch (intel_dig_port->port) {
4610 		case PORT_B:
4611 			bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4612 			break;
4613 		case PORT_C:
4614 			bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4615 			break;
4616 		case PORT_D:
4617 			bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4618 			break;
4619 		default:
4620 			return -EINVAL;
4621 		}
4622 	}
4623 
4624 	if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4625 		return 0;
4626 	return 1;
4627 }
4628 
4629 static enum drm_connector_status
4630 g4x_dp_detect(struct intel_dp *intel_dp)
4631 {
4632 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4633 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4634 	int ret;
4635 
4636 	/* Can't disconnect eDP, but you can close the lid... */
4637 	if (is_edp(intel_dp)) {
4638 		enum drm_connector_status status;
4639 
4640 		status = intel_panel_detect(dev);
4641 		if (status == connector_status_unknown)
4642 			status = connector_status_connected;
4643 		return status;
4644 	}
4645 
4646 	ret = g4x_digital_port_connected(dev, intel_dig_port);
4647 	if (ret == -EINVAL)
4648 		return connector_status_unknown;
4649 	else if (ret == 0)
4650 		return connector_status_disconnected;
4651 
4652 	return intel_dp_detect_dpcd(intel_dp);
4653 }
4654 
4655 static struct edid *
4656 intel_dp_get_edid(struct intel_dp *intel_dp)
4657 {
4658 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4659 
4660 	/* use cached edid if we have one */
4661 	if (intel_connector->edid) {
4662 		/* invalid edid */
4663 		if (IS_ERR(intel_connector->edid))
4664 			return NULL;
4665 
4666 		return drm_edid_duplicate(intel_connector->edid);
4667 	} else
4668 		return drm_get_edid(&intel_connector->base,
4669 				    intel_dp->aux.ddc);
4670 }
4671 
4672 static void
4673 intel_dp_set_edid(struct intel_dp *intel_dp)
4674 {
4675 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4676 	struct edid *edid;
4677 
4678 	edid = intel_dp_get_edid(intel_dp);
4679 	intel_connector->detect_edid = edid;
4680 
4681 	if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4682 		intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4683 	else
4684 		intel_dp->has_audio = drm_detect_monitor_audio(edid);
4685 }
4686 
4687 static void
4688 intel_dp_unset_edid(struct intel_dp *intel_dp)
4689 {
4690 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4691 
4692 	kfree(intel_connector->detect_edid);
4693 	intel_connector->detect_edid = NULL;
4694 
4695 	intel_dp->has_audio = false;
4696 }
4697 
4698 static enum intel_display_power_domain
4699 intel_dp_power_get(struct intel_dp *dp)
4700 {
4701 	struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4702 	enum intel_display_power_domain power_domain;
4703 
4704 	power_domain = intel_display_port_power_domain(encoder);
4705 	intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4706 
4707 	return power_domain;
4708 }
4709 
4710 static void
4711 intel_dp_power_put(struct intel_dp *dp,
4712 		   enum intel_display_power_domain power_domain)
4713 {
4714 	struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4715 	intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4716 }
4717 
4718 static enum drm_connector_status
4719 intel_dp_detect(struct drm_connector *connector, bool force)
4720 {
4721 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4722 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4723 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4724 	struct drm_device *dev = connector->dev;
4725 	enum drm_connector_status status;
4726 	enum intel_display_power_domain power_domain;
4727 	bool ret;
4728 	u8 sink_irq_vector;
4729 
4730 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4731 		      connector->base.id, connector->name);
4732 	intel_dp_unset_edid(intel_dp);
4733 
4734 	if (intel_dp->is_mst) {
4735 		/* MST devices are disconnected from a monitor POV */
4736 		if (intel_encoder->type != INTEL_OUTPUT_EDP)
4737 			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4738 		return connector_status_disconnected;
4739 	}
4740 
4741 	power_domain = intel_dp_power_get(intel_dp);
4742 
4743 	/* Can't disconnect eDP, but you can close the lid... */
4744 	if (is_edp(intel_dp))
4745 		status = edp_detect(intel_dp);
4746 	else if (HAS_PCH_SPLIT(dev))
4747 		status = ironlake_dp_detect(intel_dp);
4748 	else
4749 		status = g4x_dp_detect(intel_dp);
4750 	if (status != connector_status_connected)
4751 		goto out;
4752 
4753 	intel_dp_probe_oui(intel_dp);
4754 
4755 	ret = intel_dp_probe_mst(intel_dp);
4756 	if (ret) {
4757 		/* if we are in MST mode then this connector
4758 		   won't appear connected or have anything with EDID on it */
4759 		if (intel_encoder->type != INTEL_OUTPUT_EDP)
4760 			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4761 		status = connector_status_disconnected;
4762 		goto out;
4763 	}
4764 
4765 	intel_dp_set_edid(intel_dp);
4766 
4767 	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4768 		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4769 	status = connector_status_connected;
4770 
4771 	/* Try to read the source of the interrupt */
4772 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4773 	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4774 		/* Clear interrupt source */
4775 		drm_dp_dpcd_writeb(&intel_dp->aux,
4776 				   DP_DEVICE_SERVICE_IRQ_VECTOR,
4777 				   sink_irq_vector);
4778 
4779 		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4780 			intel_dp_handle_test_request(intel_dp);
4781 		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4782 			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4783 	}
4784 
4785 out:
4786 	intel_dp_power_put(intel_dp, power_domain);
4787 	return status;
4788 }
4789 
4790 static void
4791 intel_dp_force(struct drm_connector *connector)
4792 {
4793 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4794 	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4795 	enum intel_display_power_domain power_domain;
4796 
4797 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4798 		      connector->base.id, connector->name);
4799 	intel_dp_unset_edid(intel_dp);
4800 
4801 	if (connector->status != connector_status_connected)
4802 		return;
4803 
4804 	power_domain = intel_dp_power_get(intel_dp);
4805 
4806 	intel_dp_set_edid(intel_dp);
4807 
4808 	intel_dp_power_put(intel_dp, power_domain);
4809 
4810 	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4811 		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4812 }
4813 
4814 static int intel_dp_get_modes(struct drm_connector *connector)
4815 {
4816 	struct intel_connector *intel_connector = to_intel_connector(connector);
4817 	struct edid *edid;
4818 
4819 	edid = intel_connector->detect_edid;
4820 	if (edid) {
4821 		int ret = intel_connector_update_modes(connector, edid);
4822 		if (ret)
4823 			return ret;
4824 	}
4825 
4826 	/* if eDP has no EDID, fall back to fixed mode */
4827 	if (is_edp(intel_attached_dp(connector)) &&
4828 	    intel_connector->panel.fixed_mode) {
4829 		struct drm_display_mode *mode;
4830 
4831 		mode = drm_mode_duplicate(connector->dev,
4832 					  intel_connector->panel.fixed_mode);
4833 		if (mode) {
4834 			drm_mode_probed_add(connector, mode);
4835 			return 1;
4836 		}
4837 	}
4838 
4839 	return 0;
4840 }
4841 
4842 static bool
4843 intel_dp_detect_audio(struct drm_connector *connector)
4844 {
4845 	bool has_audio = false;
4846 	struct edid *edid;
4847 
4848 	edid = to_intel_connector(connector)->detect_edid;
4849 	if (edid)
4850 		has_audio = drm_detect_monitor_audio(edid);
4851 
4852 	return has_audio;
4853 }
4854 
4855 static int
4856 intel_dp_set_property(struct drm_connector *connector,
4857 		      struct drm_property *property,
4858 		      uint64_t val)
4859 {
4860 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
4861 	struct intel_connector *intel_connector = to_intel_connector(connector);
4862 	struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4863 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4864 	int ret;
4865 
4866 	ret = drm_object_property_set_value(&connector->base, property, val);
4867 	if (ret)
4868 		return ret;
4869 
4870 	if (property == dev_priv->force_audio_property) {
4871 		int i = val;
4872 		bool has_audio;
4873 
4874 		if (i == intel_dp->force_audio)
4875 			return 0;
4876 
4877 		intel_dp->force_audio = i;
4878 
4879 		if (i == HDMI_AUDIO_AUTO)
4880 			has_audio = intel_dp_detect_audio(connector);
4881 		else
4882 			has_audio = (i == HDMI_AUDIO_ON);
4883 
4884 		if (has_audio == intel_dp->has_audio)
4885 			return 0;
4886 
4887 		intel_dp->has_audio = has_audio;
4888 		goto done;
4889 	}
4890 
4891 	if (property == dev_priv->broadcast_rgb_property) {
4892 		bool old_auto = intel_dp->color_range_auto;
4893 		uint32_t old_range = intel_dp->color_range;
4894 
4895 		switch (val) {
4896 		case INTEL_BROADCAST_RGB_AUTO:
4897 			intel_dp->color_range_auto = true;
4898 			break;
4899 		case INTEL_BROADCAST_RGB_FULL:
4900 			intel_dp->color_range_auto = false;
4901 			intel_dp->color_range = 0;
4902 			break;
4903 		case INTEL_BROADCAST_RGB_LIMITED:
4904 			intel_dp->color_range_auto = false;
4905 			intel_dp->color_range = DP_COLOR_RANGE_16_235;
4906 			break;
4907 		default:
4908 			return -EINVAL;
4909 		}
4910 
4911 		if (old_auto == intel_dp->color_range_auto &&
4912 		    old_range == intel_dp->color_range)
4913 			return 0;
4914 
4915 		goto done;
4916 	}
4917 
4918 	if (is_edp(intel_dp) &&
4919 	    property == connector->dev->mode_config.scaling_mode_property) {
4920 		if (val == DRM_MODE_SCALE_NONE) {
4921 			DRM_DEBUG_KMS("no scaling not supported\n");
4922 			return -EINVAL;
4923 		}
4924 
4925 		if (intel_connector->panel.fitting_mode == val) {
4926 			/* the eDP scaling property is not changed */
4927 			return 0;
4928 		}
4929 		intel_connector->panel.fitting_mode = val;
4930 
4931 		goto done;
4932 	}
4933 
4934 	return -EINVAL;
4935 
4936 done:
4937 	if (intel_encoder->base.crtc)
4938 		intel_crtc_restore_mode(intel_encoder->base.crtc);
4939 
4940 	return 0;
4941 }
4942 
4943 static void
4944 intel_dp_connector_destroy(struct drm_connector *connector)
4945 {
4946 	struct intel_connector *intel_connector = to_intel_connector(connector);
4947 
4948 	kfree(intel_connector->detect_edid);
4949 
4950 	if (!IS_ERR_OR_NULL(intel_connector->edid))
4951 		kfree(intel_connector->edid);
4952 
4953 	/* Can't call is_edp() since the encoder may have been destroyed
4954 	 * already. */
4955 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4956 		intel_panel_fini(&intel_connector->panel);
4957 
4958 	drm_connector_cleanup(connector);
4959 	kfree(connector);
4960 }
4961 
4962 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4963 {
4964 	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4965 	struct intel_dp *intel_dp = &intel_dig_port->dp;
4966 
4967 	drm_dp_aux_unregister(&intel_dp->aux);
4968 	intel_dp_mst_encoder_cleanup(intel_dig_port);
4969 	if (is_edp(intel_dp)) {
4970 		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4971 		/*
4972 		 * vdd might still be enabled do to the delayed vdd off.
4973 		 * Make sure vdd is actually turned off here.
4974 		 */
4975 		pps_lock(intel_dp);
4976 		edp_panel_vdd_off_sync(intel_dp);
4977 		pps_unlock(intel_dp);
4978 
4979 #if 0
4980 		if (intel_dp->edp_notifier.notifier_call) {
4981 			unregister_reboot_notifier(&intel_dp->edp_notifier);
4982 			intel_dp->edp_notifier.notifier_call = NULL;
4983 		}
4984 #endif
4985 	}
4986 	drm_encoder_cleanup(encoder);
4987 	kfree(intel_dig_port);
4988 }
4989 
4990 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4991 {
4992 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4993 
4994 	if (!is_edp(intel_dp))
4995 		return;
4996 
4997 	/*
4998 	 * vdd might still be enabled do to the delayed vdd off.
4999 	 * Make sure vdd is actually turned off here.
5000 	 */
5001 	cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5002 	pps_lock(intel_dp);
5003 	edp_panel_vdd_off_sync(intel_dp);
5004 	pps_unlock(intel_dp);
5005 }
5006 
5007 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5008 {
5009 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5010 	struct drm_device *dev = intel_dig_port->base.base.dev;
5011 	struct drm_i915_private *dev_priv = dev->dev_private;
5012 	enum intel_display_power_domain power_domain;
5013 
5014 	lockdep_assert_held(&dev_priv->pps_mutex);
5015 
5016 	if (!edp_have_panel_vdd(intel_dp))
5017 		return;
5018 
5019 	/*
5020 	 * The VDD bit needs a power domain reference, so if the bit is
5021 	 * already enabled when we boot or resume, grab this reference and
5022 	 * schedule a vdd off, so we don't hold on to the reference
5023 	 * indefinitely.
5024 	 */
5025 	DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5026 	power_domain = intel_display_port_power_domain(&intel_dig_port->base);
5027 	intel_display_power_get(dev_priv, power_domain);
5028 
5029 	edp_panel_vdd_schedule_off(intel_dp);
5030 }
5031 
5032 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
5033 {
5034 	struct intel_dp *intel_dp;
5035 
5036 	if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
5037 		return;
5038 
5039 	intel_dp = enc_to_intel_dp(encoder);
5040 
5041 	pps_lock(intel_dp);
5042 
5043 	/*
5044 	 * Read out the current power sequencer assignment,
5045 	 * in case the BIOS did something with it.
5046 	 */
5047 	if (IS_VALLEYVIEW(encoder->dev))
5048 		vlv_initial_power_sequencer_setup(intel_dp);
5049 
5050 	intel_edp_panel_vdd_sanitize(intel_dp);
5051 
5052 	pps_unlock(intel_dp);
5053 }
5054 
5055 static const struct drm_connector_funcs intel_dp_connector_funcs = {
5056 	.dpms = drm_atomic_helper_connector_dpms,
5057 	.detect = intel_dp_detect,
5058 	.force = intel_dp_force,
5059 	.fill_modes = drm_helper_probe_single_connector_modes,
5060 	.set_property = intel_dp_set_property,
5061 	.atomic_get_property = intel_connector_atomic_get_property,
5062 	.destroy = intel_dp_connector_destroy,
5063 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5064 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
5065 };
5066 
5067 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5068 	.get_modes = intel_dp_get_modes,
5069 	.mode_valid = intel_dp_mode_valid,
5070 	.best_encoder = intel_best_encoder,
5071 };
5072 
5073 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5074 	.reset = intel_dp_encoder_reset,
5075 	.destroy = intel_dp_encoder_destroy,
5076 };
5077 
5078 bool
5079 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5080 {
5081 	struct intel_dp *intel_dp = &intel_dig_port->dp;
5082 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5083 	struct drm_device *dev = intel_dig_port->base.base.dev;
5084 	struct drm_i915_private *dev_priv = dev->dev_private;
5085 	enum intel_display_power_domain power_domain;
5086 	bool ret = true;
5087 
5088 	if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
5089 		intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
5090 
5091 	if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5092 		/*
5093 		 * vdd off can generate a long pulse on eDP which
5094 		 * would require vdd on to handle it, and thus we
5095 		 * would end up in an endless cycle of
5096 		 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5097 		 */
5098 		DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5099 			      port_name(intel_dig_port->port));
5100 		return false;
5101 	}
5102 
5103 	DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5104 		      port_name(intel_dig_port->port),
5105 		      long_hpd ? "long" : "short");
5106 
5107 	power_domain = intel_display_port_power_domain(intel_encoder);
5108 	intel_display_power_get(dev_priv, power_domain);
5109 
5110 	if (long_hpd) {
5111 		/* indicate that we need to restart link training */
5112 		intel_dp->train_set_valid = false;
5113 
5114 		if (HAS_PCH_SPLIT(dev)) {
5115 			if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
5116 				goto mst_fail;
5117 		} else {
5118 			if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
5119 				goto mst_fail;
5120 		}
5121 
5122 		if (!intel_dp_get_dpcd(intel_dp)) {
5123 			goto mst_fail;
5124 		}
5125 
5126 		intel_dp_probe_oui(intel_dp);
5127 
5128 		if (!intel_dp_probe_mst(intel_dp)) {
5129 			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5130 			intel_dp_check_link_status(intel_dp);
5131 			drm_modeset_unlock(&dev->mode_config.connection_mutex);
5132 			goto mst_fail;
5133 		}
5134 	} else {
5135 		if (intel_dp->is_mst) {
5136 #if 0
5137 			if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
5138 				goto mst_fail;
5139 #endif
5140 		}
5141 
5142 		if (!intel_dp->is_mst) {
5143 			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5144 			intel_dp_check_link_status(intel_dp);
5145 			drm_modeset_unlock(&dev->mode_config.connection_mutex);
5146 		}
5147 	}
5148 
5149 	ret = false;
5150 
5151 	goto put_power;
5152 mst_fail:
5153 	/* if we were in MST mode, and device is not there get out of MST mode */
5154 	if (intel_dp->is_mst) {
5155 		DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5156 		intel_dp->is_mst = false;
5157 #if 0
5158 		drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5159 #endif
5160 	}
5161 put_power:
5162 	intel_display_power_put(dev_priv, power_domain);
5163 
5164 	return ret;
5165 }
5166 
5167 /* Return which DP Port should be selected for Transcoder DP control */
5168 int
5169 intel_trans_dp_port_sel(struct drm_crtc *crtc)
5170 {
5171 	struct drm_device *dev = crtc->dev;
5172 	struct intel_encoder *intel_encoder;
5173 	struct intel_dp *intel_dp;
5174 
5175 	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5176 		intel_dp = enc_to_intel_dp(&intel_encoder->base);
5177 
5178 		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5179 		    intel_encoder->type == INTEL_OUTPUT_EDP)
5180 			return intel_dp->output_reg;
5181 	}
5182 
5183 	return -1;
5184 }
5185 
5186 /* check the VBT to see whether the eDP is on another port */
5187 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5188 {
5189 	struct drm_i915_private *dev_priv = dev->dev_private;
5190 	union child_device_config *p_child;
5191 	int i;
5192 	static const short port_mapping[] = {
5193 		[PORT_B] = DVO_PORT_DPB,
5194 		[PORT_C] = DVO_PORT_DPC,
5195 		[PORT_D] = DVO_PORT_DPD,
5196 		[PORT_E] = DVO_PORT_DPE,
5197 	};
5198 
5199 	if (port == PORT_A)
5200 		return true;
5201 
5202 	if (!dev_priv->vbt.child_dev_num)
5203 		return false;
5204 
5205 	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5206 		p_child = dev_priv->vbt.child_dev + i;
5207 
5208 		if (p_child->common.dvo_port == port_mapping[port] &&
5209 		    (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5210 		    (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5211 			return true;
5212 	}
5213 	return false;
5214 }
5215 
5216 void
5217 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5218 {
5219 	struct intel_connector *intel_connector = to_intel_connector(connector);
5220 
5221 	intel_attach_force_audio_property(connector);
5222 	intel_attach_broadcast_rgb_property(connector);
5223 	intel_dp->color_range_auto = true;
5224 
5225 	if (is_edp(intel_dp)) {
5226 		drm_mode_create_scaling_mode_property(connector->dev);
5227 		drm_object_attach_property(
5228 			&connector->base,
5229 			connector->dev->mode_config.scaling_mode_property,
5230 			DRM_MODE_SCALE_ASPECT);
5231 		intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5232 	}
5233 }
5234 
5235 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5236 {
5237 	intel_dp->last_power_cycle = jiffies;
5238 	intel_dp->last_power_on = jiffies;
5239 	intel_dp->last_backlight_off = jiffies;
5240 }
5241 
5242 static void
5243 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5244 				    struct intel_dp *intel_dp)
5245 {
5246 	struct drm_i915_private *dev_priv = dev->dev_private;
5247 	struct edp_power_seq cur, vbt, spec,
5248 		*final = &intel_dp->pps_delays;
5249 	u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5250 	int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
5251 
5252 	lockdep_assert_held(&dev_priv->pps_mutex);
5253 
5254 	/* already initialized? */
5255 	if (final->t11_t12 != 0)
5256 		return;
5257 
5258 	if (IS_BROXTON(dev)) {
5259 		/*
5260 		 * TODO: BXT has 2 sets of PPS registers.
5261 		 * Correct Register for Broxton need to be identified
5262 		 * using VBT. hardcoding for now
5263 		 */
5264 		pp_ctrl_reg = BXT_PP_CONTROL(0);
5265 		pp_on_reg = BXT_PP_ON_DELAYS(0);
5266 		pp_off_reg = BXT_PP_OFF_DELAYS(0);
5267 	} else if (HAS_PCH_SPLIT(dev)) {
5268 		pp_ctrl_reg = PCH_PP_CONTROL;
5269 		pp_on_reg = PCH_PP_ON_DELAYS;
5270 		pp_off_reg = PCH_PP_OFF_DELAYS;
5271 		pp_div_reg = PCH_PP_DIVISOR;
5272 	} else {
5273 		enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5274 
5275 		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5276 		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5277 		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5278 		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5279 	}
5280 
5281 	/* Workaround: Need to write PP_CONTROL with the unlock key as
5282 	 * the very first thing. */
5283 	pp_ctl = ironlake_get_pp_control(intel_dp);
5284 
5285 	pp_on = I915_READ(pp_on_reg);
5286 	pp_off = I915_READ(pp_off_reg);
5287 	if (!IS_BROXTON(dev)) {
5288 		I915_WRITE(pp_ctrl_reg, pp_ctl);
5289 		pp_div = I915_READ(pp_div_reg);
5290 	}
5291 
5292 	/* Pull timing values out of registers */
5293 	cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5294 		PANEL_POWER_UP_DELAY_SHIFT;
5295 
5296 	cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5297 		PANEL_LIGHT_ON_DELAY_SHIFT;
5298 
5299 	cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5300 		PANEL_LIGHT_OFF_DELAY_SHIFT;
5301 
5302 	cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5303 		PANEL_POWER_DOWN_DELAY_SHIFT;
5304 
5305 	if (IS_BROXTON(dev)) {
5306 		u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5307 			BXT_POWER_CYCLE_DELAY_SHIFT;
5308 		if (tmp > 0)
5309 			cur.t11_t12 = (tmp - 1) * 1000;
5310 		else
5311 			cur.t11_t12 = 0;
5312 	} else {
5313 		cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5314 		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5315 	}
5316 
5317 	DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5318 		      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5319 
5320 	vbt = dev_priv->vbt.edp_pps;
5321 
5322 	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5323 	 * our hw here, which are all in 100usec. */
5324 	spec.t1_t3 = 210 * 10;
5325 	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5326 	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5327 	spec.t10 = 500 * 10;
5328 	/* This one is special and actually in units of 100ms, but zero
5329 	 * based in the hw (so we need to add 100 ms). But the sw vbt
5330 	 * table multiplies it with 1000 to make it in units of 100usec,
5331 	 * too. */
5332 	spec.t11_t12 = (510 + 100) * 10;
5333 
5334 	DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5335 		      vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5336 
5337 	/* Use the max of the register settings and vbt. If both are
5338 	 * unset, fall back to the spec limits. */
5339 #define assign_final(field)	final->field = (max(cur.field, vbt.field) == 0 ? \
5340 				       spec.field : \
5341 				       max(cur.field, vbt.field))
5342 	assign_final(t1_t3);
5343 	assign_final(t8);
5344 	assign_final(t9);
5345 	assign_final(t10);
5346 	assign_final(t11_t12);
5347 #undef assign_final
5348 
5349 #define get_delay(field)	(DIV_ROUND_UP(final->field, 10))
5350 	intel_dp->panel_power_up_delay = get_delay(t1_t3);
5351 	intel_dp->backlight_on_delay = get_delay(t8);
5352 	intel_dp->backlight_off_delay = get_delay(t9);
5353 	intel_dp->panel_power_down_delay = get_delay(t10);
5354 	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5355 #undef get_delay
5356 
5357 	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5358 		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5359 		      intel_dp->panel_power_cycle_delay);
5360 
5361 	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5362 		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5363 }
5364 
5365 static void
5366 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5367 					      struct intel_dp *intel_dp)
5368 {
5369 	struct drm_i915_private *dev_priv = dev->dev_private;
5370 	u32 pp_on, pp_off, pp_div, port_sel = 0;
5371 	int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5372 	int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
5373 	enum port port = dp_to_dig_port(intel_dp)->port;
5374 	const struct edp_power_seq *seq = &intel_dp->pps_delays;
5375 
5376 	lockdep_assert_held(&dev_priv->pps_mutex);
5377 
5378 	if (IS_BROXTON(dev)) {
5379 		/*
5380 		 * TODO: BXT has 2 sets of PPS registers.
5381 		 * Correct Register for Broxton need to be identified
5382 		 * using VBT. hardcoding for now
5383 		 */
5384 		pp_ctrl_reg = BXT_PP_CONTROL(0);
5385 		pp_on_reg = BXT_PP_ON_DELAYS(0);
5386 		pp_off_reg = BXT_PP_OFF_DELAYS(0);
5387 
5388 	} else if (HAS_PCH_SPLIT(dev)) {
5389 		pp_on_reg = PCH_PP_ON_DELAYS;
5390 		pp_off_reg = PCH_PP_OFF_DELAYS;
5391 		pp_div_reg = PCH_PP_DIVISOR;
5392 	} else {
5393 		enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5394 
5395 		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5396 		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5397 		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5398 	}
5399 
5400 	/*
5401 	 * And finally store the new values in the power sequencer. The
5402 	 * backlight delays are set to 1 because we do manual waits on them. For
5403 	 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5404 	 * we'll end up waiting for the backlight off delay twice: once when we
5405 	 * do the manual sleep, and once when we disable the panel and wait for
5406 	 * the PP_STATUS bit to become zero.
5407 	 */
5408 	pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5409 		(1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5410 	pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5411 		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5412 	/* Compute the divisor for the pp clock, simply match the Bspec
5413 	 * formula. */
5414 	if (IS_BROXTON(dev)) {
5415 		pp_div = I915_READ(pp_ctrl_reg);
5416 		pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5417 		pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5418 				<< BXT_POWER_CYCLE_DELAY_SHIFT);
5419 	} else {
5420 		pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5421 		pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5422 				<< PANEL_POWER_CYCLE_DELAY_SHIFT);
5423 	}
5424 
5425 	/* Haswell doesn't have any port selection bits for the panel
5426 	 * power sequencer any more. */
5427 	if (IS_VALLEYVIEW(dev)) {
5428 		port_sel = PANEL_PORT_SELECT_VLV(port);
5429 	} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5430 		if (port == PORT_A)
5431 			port_sel = PANEL_PORT_SELECT_DPA;
5432 		else
5433 			port_sel = PANEL_PORT_SELECT_DPD;
5434 	}
5435 
5436 	pp_on |= port_sel;
5437 
5438 	I915_WRITE(pp_on_reg, pp_on);
5439 	I915_WRITE(pp_off_reg, pp_off);
5440 	if (IS_BROXTON(dev))
5441 		I915_WRITE(pp_ctrl_reg, pp_div);
5442 	else
5443 		I915_WRITE(pp_div_reg, pp_div);
5444 
5445 	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5446 		      I915_READ(pp_on_reg),
5447 		      I915_READ(pp_off_reg),
5448 		      IS_BROXTON(dev) ?
5449 		      (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5450 		      I915_READ(pp_div_reg));
5451 }
5452 
5453 /**
5454  * intel_dp_set_drrs_state - program registers for RR switch to take effect
5455  * @dev: DRM device
5456  * @refresh_rate: RR to be programmed
5457  *
5458  * This function gets called when refresh rate (RR) has to be changed from
5459  * one frequency to another. Switches can be between high and low RR
5460  * supported by the panel or to any other RR based on media playback (in
5461  * this case, RR value needs to be passed from user space).
5462  *
5463  * The caller of this function needs to take a lock on dev_priv->drrs.
5464  */
5465 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5466 {
5467 	struct drm_i915_private *dev_priv = dev->dev_private;
5468 	struct intel_encoder *encoder;
5469 	struct intel_digital_port *dig_port = NULL;
5470 	struct intel_dp *intel_dp = dev_priv->drrs.dp;
5471 	struct intel_crtc_state *config = NULL;
5472 	struct intel_crtc *intel_crtc = NULL;
5473 	u32 reg, val;
5474 	enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5475 
5476 	if (refresh_rate <= 0) {
5477 		DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5478 		return;
5479 	}
5480 
5481 	if (intel_dp == NULL) {
5482 		DRM_DEBUG_KMS("DRRS not supported.\n");
5483 		return;
5484 	}
5485 
5486 	/*
5487 	 * FIXME: This needs proper synchronization with psr state for some
5488 	 * platforms that cannot have PSR and DRRS enabled at the same time.
5489 	 */
5490 
5491 	dig_port = dp_to_dig_port(intel_dp);
5492 	encoder = &dig_port->base;
5493 	intel_crtc = to_intel_crtc(encoder->base.crtc);
5494 
5495 	if (!intel_crtc) {
5496 		DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5497 		return;
5498 	}
5499 
5500 	config = intel_crtc->config;
5501 
5502 	if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5503 		DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5504 		return;
5505 	}
5506 
5507 	if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5508 			refresh_rate)
5509 		index = DRRS_LOW_RR;
5510 
5511 	if (index == dev_priv->drrs.refresh_rate_type) {
5512 		DRM_DEBUG_KMS(
5513 			"DRRS requested for previously set RR...ignoring\n");
5514 		return;
5515 	}
5516 
5517 	if (!intel_crtc->active) {
5518 		DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5519 		return;
5520 	}
5521 
5522 	if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5523 		switch (index) {
5524 		case DRRS_HIGH_RR:
5525 			intel_dp_set_m_n(intel_crtc, M1_N1);
5526 			break;
5527 		case DRRS_LOW_RR:
5528 			intel_dp_set_m_n(intel_crtc, M2_N2);
5529 			break;
5530 		case DRRS_MAX_RR:
5531 		default:
5532 			DRM_ERROR("Unsupported refreshrate type\n");
5533 		}
5534 	} else if (INTEL_INFO(dev)->gen > 6) {
5535 		reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5536 		val = I915_READ(reg);
5537 
5538 		if (index > DRRS_HIGH_RR) {
5539 			if (IS_VALLEYVIEW(dev))
5540 				val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5541 			else
5542 				val |= PIPECONF_EDP_RR_MODE_SWITCH;
5543 		} else {
5544 			if (IS_VALLEYVIEW(dev))
5545 				val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5546 			else
5547 				val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5548 		}
5549 		I915_WRITE(reg, val);
5550 	}
5551 
5552 	dev_priv->drrs.refresh_rate_type = index;
5553 
5554 	DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5555 }
5556 
5557 /**
5558  * intel_edp_drrs_enable - init drrs struct if supported
5559  * @intel_dp: DP struct
5560  *
5561  * Initializes frontbuffer_bits and drrs.dp
5562  */
5563 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5564 {
5565 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5566 	struct drm_i915_private *dev_priv = dev->dev_private;
5567 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5568 	struct drm_crtc *crtc = dig_port->base.base.crtc;
5569 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5570 
5571 	if (!intel_crtc->config->has_drrs) {
5572 		DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5573 		return;
5574 	}
5575 
5576 	mutex_lock(&dev_priv->drrs.mutex);
5577 	if (WARN_ON(dev_priv->drrs.dp)) {
5578 		DRM_ERROR("DRRS already enabled\n");
5579 		goto unlock;
5580 	}
5581 
5582 	dev_priv->drrs.busy_frontbuffer_bits = 0;
5583 
5584 	dev_priv->drrs.dp = intel_dp;
5585 
5586 unlock:
5587 	mutex_unlock(&dev_priv->drrs.mutex);
5588 }
5589 
5590 /**
5591  * intel_edp_drrs_disable - Disable DRRS
5592  * @intel_dp: DP struct
5593  *
5594  */
5595 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5596 {
5597 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5598 	struct drm_i915_private *dev_priv = dev->dev_private;
5599 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5600 	struct drm_crtc *crtc = dig_port->base.base.crtc;
5601 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5602 
5603 	if (!intel_crtc->config->has_drrs)
5604 		return;
5605 
5606 	mutex_lock(&dev_priv->drrs.mutex);
5607 	if (!dev_priv->drrs.dp) {
5608 		mutex_unlock(&dev_priv->drrs.mutex);
5609 		return;
5610 	}
5611 
5612 	if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5613 		intel_dp_set_drrs_state(dev_priv->dev,
5614 			intel_dp->attached_connector->panel.
5615 			fixed_mode->vrefresh);
5616 
5617 	dev_priv->drrs.dp = NULL;
5618 	mutex_unlock(&dev_priv->drrs.mutex);
5619 
5620 	cancel_delayed_work_sync(&dev_priv->drrs.work);
5621 }
5622 
5623 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5624 {
5625 	struct drm_i915_private *dev_priv =
5626 		container_of(work, typeof(*dev_priv), drrs.work.work);
5627 	struct intel_dp *intel_dp;
5628 
5629 	mutex_lock(&dev_priv->drrs.mutex);
5630 
5631 	intel_dp = dev_priv->drrs.dp;
5632 
5633 	if (!intel_dp)
5634 		goto unlock;
5635 
5636 	/*
5637 	 * The delayed work can race with an invalidate hence we need to
5638 	 * recheck.
5639 	 */
5640 
5641 	if (dev_priv->drrs.busy_frontbuffer_bits)
5642 		goto unlock;
5643 
5644 	if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5645 		intel_dp_set_drrs_state(dev_priv->dev,
5646 			intel_dp->attached_connector->panel.
5647 			downclock_mode->vrefresh);
5648 
5649 unlock:
5650 	mutex_unlock(&dev_priv->drrs.mutex);
5651 }
5652 
5653 /**
5654  * intel_edp_drrs_invalidate - Disable Idleness DRRS
5655  * @dev: DRM device
5656  * @frontbuffer_bits: frontbuffer plane tracking bits
5657  *
5658  * This function gets called everytime rendering on the given planes start.
5659  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5660  *
5661  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5662  */
5663 void intel_edp_drrs_invalidate(struct drm_device *dev,
5664 		unsigned frontbuffer_bits)
5665 {
5666 	struct drm_i915_private *dev_priv = dev->dev_private;
5667 	struct drm_crtc *crtc;
5668 	enum i915_pipe pipe;
5669 
5670 	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5671 		return;
5672 
5673 	cancel_delayed_work(&dev_priv->drrs.work);
5674 
5675 	mutex_lock(&dev_priv->drrs.mutex);
5676 	if (!dev_priv->drrs.dp) {
5677 		mutex_unlock(&dev_priv->drrs.mutex);
5678 		return;
5679 	}
5680 
5681 	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5682 	pipe = to_intel_crtc(crtc)->pipe;
5683 
5684 	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5685 	dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5686 
5687 	/* invalidate means busy screen hence upclock */
5688 	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5689 		intel_dp_set_drrs_state(dev_priv->dev,
5690 				dev_priv->drrs.dp->attached_connector->panel.
5691 				fixed_mode->vrefresh);
5692 
5693 	mutex_unlock(&dev_priv->drrs.mutex);
5694 }
5695 
5696 /**
5697  * intel_edp_drrs_flush - Restart Idleness DRRS
5698  * @dev: DRM device
5699  * @frontbuffer_bits: frontbuffer plane tracking bits
5700  *
5701  * This function gets called every time rendering on the given planes has
5702  * completed or flip on a crtc is completed. So DRRS should be upclocked
5703  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5704  * if no other planes are dirty.
5705  *
5706  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5707  */
5708 void intel_edp_drrs_flush(struct drm_device *dev,
5709 		unsigned frontbuffer_bits)
5710 {
5711 	struct drm_i915_private *dev_priv = dev->dev_private;
5712 	struct drm_crtc *crtc;
5713 	enum i915_pipe pipe;
5714 
5715 	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5716 		return;
5717 
5718 	cancel_delayed_work(&dev_priv->drrs.work);
5719 
5720 	mutex_lock(&dev_priv->drrs.mutex);
5721 	if (!dev_priv->drrs.dp) {
5722 		mutex_unlock(&dev_priv->drrs.mutex);
5723 		return;
5724 	}
5725 
5726 	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5727 	pipe = to_intel_crtc(crtc)->pipe;
5728 
5729 	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5730 	dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5731 
5732 	/* flush means busy screen hence upclock */
5733 	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5734 		intel_dp_set_drrs_state(dev_priv->dev,
5735 				dev_priv->drrs.dp->attached_connector->panel.
5736 				fixed_mode->vrefresh);
5737 
5738 	/*
5739 	 * flush also means no more activity hence schedule downclock, if all
5740 	 * other fbs are quiescent too
5741 	 */
5742 	if (!dev_priv->drrs.busy_frontbuffer_bits)
5743 		schedule_delayed_work(&dev_priv->drrs.work,
5744 				msecs_to_jiffies(1000));
5745 	mutex_unlock(&dev_priv->drrs.mutex);
5746 }
5747 
5748 /**
5749  * DOC: Display Refresh Rate Switching (DRRS)
5750  *
5751  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5752  * which enables swtching between low and high refresh rates,
5753  * dynamically, based on the usage scenario. This feature is applicable
5754  * for internal panels.
5755  *
5756  * Indication that the panel supports DRRS is given by the panel EDID, which
5757  * would list multiple refresh rates for one resolution.
5758  *
5759  * DRRS is of 2 types - static and seamless.
5760  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5761  * (may appear as a blink on screen) and is used in dock-undock scenario.
5762  * Seamless DRRS involves changing RR without any visual effect to the user
5763  * and can be used during normal system usage. This is done by programming
5764  * certain registers.
5765  *
5766  * Support for static/seamless DRRS may be indicated in the VBT based on
5767  * inputs from the panel spec.
5768  *
5769  * DRRS saves power by switching to low RR based on usage scenarios.
5770  *
5771  * eDP DRRS:-
5772  *        The implementation is based on frontbuffer tracking implementation.
5773  * When there is a disturbance on the screen triggered by user activity or a
5774  * periodic system activity, DRRS is disabled (RR is changed to high RR).
5775  * When there is no movement on screen, after a timeout of 1 second, a switch
5776  * to low RR is made.
5777  *        For integration with frontbuffer tracking code,
5778  * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5779  *
5780  * DRRS can be further extended to support other internal panels and also
5781  * the scenario of video playback wherein RR is set based on the rate
5782  * requested by userspace.
5783  */
5784 
5785 /**
5786  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5787  * @intel_connector: eDP connector
5788  * @fixed_mode: preferred mode of panel
5789  *
5790  * This function is  called only once at driver load to initialize basic
5791  * DRRS stuff.
5792  *
5793  * Returns:
5794  * Downclock mode if panel supports it, else return NULL.
5795  * DRRS support is determined by the presence of downclock mode (apart
5796  * from VBT setting).
5797  */
5798 static struct drm_display_mode *
5799 intel_dp_drrs_init(struct intel_connector *intel_connector,
5800 		struct drm_display_mode *fixed_mode)
5801 {
5802 	struct drm_connector *connector = &intel_connector->base;
5803 	struct drm_device *dev = connector->dev;
5804 	struct drm_i915_private *dev_priv = dev->dev_private;
5805 	struct drm_display_mode *downclock_mode = NULL;
5806 
5807 	INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5808 	lockinit(&dev_priv->drrs.mutex, "i915dm", 0, LK_CANRECURSE);
5809 
5810 	if (INTEL_INFO(dev)->gen <= 6) {
5811 		DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5812 		return NULL;
5813 	}
5814 
5815 	if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5816 		DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5817 		return NULL;
5818 	}
5819 
5820 	downclock_mode = intel_find_panel_downclock
5821 					(dev, fixed_mode, connector);
5822 
5823 	if (!downclock_mode) {
5824 		DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5825 		return NULL;
5826 	}
5827 
5828 	dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5829 
5830 	dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5831 	DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5832 	return downclock_mode;
5833 }
5834 
5835 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5836 				     struct intel_connector *intel_connector)
5837 {
5838 	struct drm_connector *connector = &intel_connector->base;
5839 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5840 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5841 	struct drm_device *dev = intel_encoder->base.dev;
5842 	struct drm_i915_private *dev_priv = dev->dev_private;
5843 	struct drm_display_mode *fixed_mode = NULL;
5844 	struct drm_display_mode *downclock_mode = NULL;
5845 	bool has_dpcd;
5846 	struct drm_display_mode *scan;
5847 	struct edid *edid;
5848 	enum i915_pipe pipe = INVALID_PIPE;
5849 
5850 	if (!is_edp(intel_dp))
5851 		return true;
5852 
5853 	pps_lock(intel_dp);
5854 	intel_edp_panel_vdd_sanitize(intel_dp);
5855 	pps_unlock(intel_dp);
5856 
5857 	/* Cache DPCD and EDID for edp. */
5858 	has_dpcd = intel_dp_get_dpcd(intel_dp);
5859 
5860 	if (has_dpcd) {
5861 		if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5862 			dev_priv->no_aux_handshake =
5863 				intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5864 				DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5865 	} else {
5866 		/* if this fails, presume the device is a ghost */
5867 		DRM_INFO("failed to retrieve link info, disabling eDP\n");
5868 		return false;
5869 	}
5870 
5871 	/* We now know it's not a ghost, init power sequence regs. */
5872 	pps_lock(intel_dp);
5873 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5874 	pps_unlock(intel_dp);
5875 
5876 	mutex_lock(&dev->mode_config.mutex);
5877 	edid = drm_get_edid(connector, intel_dp->aux.ddc);
5878 	if (edid) {
5879 		if (drm_add_edid_modes(connector, edid)) {
5880 			drm_mode_connector_update_edid_property(connector,
5881 								edid);
5882 			drm_edid_to_eld(connector, edid);
5883 		} else {
5884 			kfree(edid);
5885 			edid = ERR_PTR(-EINVAL);
5886 		}
5887 	} else {
5888 		edid = ERR_PTR(-ENOENT);
5889 	}
5890 	intel_connector->edid = edid;
5891 
5892 	/* prefer fixed mode from EDID if available */
5893 	list_for_each_entry(scan, &connector->probed_modes, head) {
5894 		if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5895 			fixed_mode = drm_mode_duplicate(dev, scan);
5896 			downclock_mode = intel_dp_drrs_init(
5897 						intel_connector, fixed_mode);
5898 			break;
5899 		}
5900 	}
5901 
5902 	/* fallback to VBT if available for eDP */
5903 	if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5904 		fixed_mode = drm_mode_duplicate(dev,
5905 					dev_priv->vbt.lfp_lvds_vbt_mode);
5906 		if (fixed_mode)
5907 			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5908 	}
5909 	mutex_unlock(&dev->mode_config.mutex);
5910 
5911 	if (IS_VALLEYVIEW(dev)) {
5912 #if 0
5913 		intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5914 		register_reboot_notifier(&intel_dp->edp_notifier);
5915 #endif
5916 
5917 		/*
5918 		 * Figure out the current pipe for the initial backlight setup.
5919 		 * If the current pipe isn't valid, try the PPS pipe, and if that
5920 		 * fails just assume pipe A.
5921 		 */
5922 		if (IS_CHERRYVIEW(dev))
5923 			pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5924 		else
5925 			pipe = PORT_TO_PIPE(intel_dp->DP);
5926 
5927 		if (pipe != PIPE_A && pipe != PIPE_B)
5928 			pipe = intel_dp->pps_pipe;
5929 
5930 		if (pipe != PIPE_A && pipe != PIPE_B)
5931 			pipe = PIPE_A;
5932 
5933 		DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5934 			      pipe_name(pipe));
5935 	}
5936 
5937 	intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5938 	intel_connector->panel.backlight_power = intel_edp_backlight_power;
5939 	intel_panel_setup_backlight(connector, pipe);
5940 
5941 	return true;
5942 }
5943 
5944 bool
5945 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5946 			struct intel_connector *intel_connector)
5947 {
5948 	struct drm_connector *connector = &intel_connector->base;
5949 	struct intel_dp *intel_dp = &intel_dig_port->dp;
5950 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5951 	struct drm_device *dev = intel_encoder->base.dev;
5952 	struct drm_i915_private *dev_priv = dev->dev_private;
5953 	enum port port = intel_dig_port->port;
5954 	int type;
5955 
5956 	intel_dp->pps_pipe = INVALID_PIPE;
5957 
5958 	/* intel_dp vfuncs */
5959 	if (INTEL_INFO(dev)->gen >= 9)
5960 		intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5961 	else if (IS_VALLEYVIEW(dev))
5962 		intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5963 	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5964 		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5965 	else if (HAS_PCH_SPLIT(dev))
5966 		intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5967 	else
5968 		intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5969 
5970 	if (INTEL_INFO(dev)->gen >= 9)
5971 		intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5972 	else
5973 		intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5974 
5975 	/* Preserve the current hw state. */
5976 	intel_dp->DP = I915_READ(intel_dp->output_reg);
5977 	intel_dp->attached_connector = intel_connector;
5978 
5979 	if (intel_dp_is_edp(dev, port))
5980 		type = DRM_MODE_CONNECTOR_eDP;
5981 	else
5982 		type = DRM_MODE_CONNECTOR_DisplayPort;
5983 
5984 	/*
5985 	 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5986 	 * for DP the encoder type can be set by the caller to
5987 	 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5988 	 */
5989 	if (type == DRM_MODE_CONNECTOR_eDP)
5990 		intel_encoder->type = INTEL_OUTPUT_EDP;
5991 
5992 	/* eDP only on port B and/or C on vlv/chv */
5993 	if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5994 		    port != PORT_B && port != PORT_C))
5995 		return false;
5996 
5997 	DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5998 			type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5999 			port_name(port));
6000 
6001 	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
6002 	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
6003 
6004 	connector->interlace_allowed = true;
6005 	connector->doublescan_allowed = 0;
6006 
6007 	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
6008 			  edp_panel_vdd_work);
6009 
6010 	intel_connector_attach_encoder(intel_connector, intel_encoder);
6011 	drm_connector_register(connector);
6012 
6013 	if (HAS_DDI(dev))
6014 		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
6015 	else
6016 		intel_connector->get_hw_state = intel_connector_get_hw_state;
6017 	intel_connector->unregister = intel_dp_connector_unregister;
6018 
6019 	/* Set up the hotplug pin. */
6020 	switch (port) {
6021 	case PORT_A:
6022 		intel_encoder->hpd_pin = HPD_PORT_A;
6023 		break;
6024 	case PORT_B:
6025 		intel_encoder->hpd_pin = HPD_PORT_B;
6026 		break;
6027 	case PORT_C:
6028 		intel_encoder->hpd_pin = HPD_PORT_C;
6029 		break;
6030 	case PORT_D:
6031 		intel_encoder->hpd_pin = HPD_PORT_D;
6032 		break;
6033 	case PORT_E:
6034 		intel_encoder->hpd_pin = HPD_PORT_E;
6035 		break;
6036 	default:
6037 		BUG();
6038 	}
6039 
6040 	if (is_edp(intel_dp)) {
6041 		pps_lock(intel_dp);
6042 		intel_dp_init_panel_power_timestamps(intel_dp);
6043 		if (IS_VALLEYVIEW(dev))
6044 			vlv_initial_power_sequencer_setup(intel_dp);
6045 		else
6046 			intel_dp_init_panel_power_sequencer(dev, intel_dp);
6047 		pps_unlock(intel_dp);
6048 	}
6049 
6050 	intel_dp_aux_init(intel_dp, intel_connector);
6051 
6052 	/* init MST on ports that can support it */
6053 	if (HAS_DP_MST(dev) &&
6054 	    (port == PORT_B || port == PORT_C || port == PORT_D))
6055 		intel_dp_mst_encoder_init(intel_dig_port,
6056 					  intel_connector->base.base.id);
6057 
6058 	if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6059 		drm_dp_aux_unregister(&intel_dp->aux);
6060 		if (is_edp(intel_dp)) {
6061 			cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
6062 			/*
6063 			 * vdd might still be enabled do to the delayed vdd off.
6064 			 * Make sure vdd is actually turned off here.
6065 			 */
6066 			pps_lock(intel_dp);
6067 			edp_panel_vdd_off_sync(intel_dp);
6068 			pps_unlock(intel_dp);
6069 		}
6070 		drm_connector_unregister(connector);
6071 		drm_connector_cleanup(connector);
6072 		return false;
6073 	}
6074 
6075 	intel_dp_add_properties(intel_dp, connector);
6076 
6077 	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6078 	 * 0xd.  Failure to do so will result in spurious interrupts being
6079 	 * generated on the port when a cable is not attached.
6080 	 */
6081 	if (IS_G4X(dev) && !IS_GM45(dev)) {
6082 		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6083 		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6084 	}
6085 
6086 #if 0
6087 	i915_debugfs_connector_add(connector);
6088 #endif
6089 
6090 	return true;
6091 }
6092 
6093 void
6094 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
6095 {
6096 	struct drm_i915_private *dev_priv = dev->dev_private;
6097 	struct intel_digital_port *intel_dig_port;
6098 	struct intel_encoder *intel_encoder;
6099 	struct drm_encoder *encoder;
6100 	struct intel_connector *intel_connector;
6101 
6102 	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6103 	if (!intel_dig_port)
6104 		return;
6105 
6106 	intel_connector = intel_connector_alloc();
6107 	if (!intel_connector) {
6108 		kfree(intel_dig_port);
6109 		return;
6110 	}
6111 
6112 	intel_encoder = &intel_dig_port->base;
6113 	encoder = &intel_encoder->base;
6114 
6115 	drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6116 			 DRM_MODE_ENCODER_TMDS);
6117 
6118 	intel_encoder->compute_config = intel_dp_compute_config;
6119 	intel_encoder->disable = intel_disable_dp;
6120 	intel_encoder->get_hw_state = intel_dp_get_hw_state;
6121 	intel_encoder->get_config = intel_dp_get_config;
6122 	intel_encoder->suspend = intel_dp_encoder_suspend;
6123 	if (IS_CHERRYVIEW(dev)) {
6124 		intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6125 		intel_encoder->pre_enable = chv_pre_enable_dp;
6126 		intel_encoder->enable = vlv_enable_dp;
6127 		intel_encoder->post_disable = chv_post_disable_dp;
6128 	} else if (IS_VALLEYVIEW(dev)) {
6129 		intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6130 		intel_encoder->pre_enable = vlv_pre_enable_dp;
6131 		intel_encoder->enable = vlv_enable_dp;
6132 		intel_encoder->post_disable = vlv_post_disable_dp;
6133 	} else {
6134 		intel_encoder->pre_enable = g4x_pre_enable_dp;
6135 		intel_encoder->enable = g4x_enable_dp;
6136 		if (INTEL_INFO(dev)->gen >= 5)
6137 			intel_encoder->post_disable = ilk_post_disable_dp;
6138 	}
6139 
6140 	intel_dig_port->port = port;
6141 	intel_dig_port->dp.output_reg = output_reg;
6142 
6143 	intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
6144 	if (IS_CHERRYVIEW(dev)) {
6145 		if (port == PORT_D)
6146 			intel_encoder->crtc_mask = 1 << 2;
6147 		else
6148 			intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6149 	} else {
6150 		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6151 	}
6152 	intel_encoder->cloneable = 0;
6153 
6154 	intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6155 	dev_priv->hotplug.irq_port[port] = intel_dig_port;
6156 
6157 	if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
6158 		drm_encoder_cleanup(encoder);
6159 		kfree(intel_dig_port);
6160 		kfree(intel_connector);
6161 	}
6162 }
6163 
6164 #if 0
6165 void intel_dp_mst_suspend(struct drm_device *dev)
6166 {
6167 	struct drm_i915_private *dev_priv = dev->dev_private;
6168 	int i;
6169 
6170 	/* disable MST */
6171 	for (i = 0; i < I915_MAX_PORTS; i++) {
6172 		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6173 		if (!intel_dig_port)
6174 			continue;
6175 
6176 		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6177 			if (!intel_dig_port->dp.can_mst)
6178 				continue;
6179 			if (intel_dig_port->dp.is_mst)
6180 				drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6181 		}
6182 	}
6183 }
6184 #endif
6185 
6186 void intel_dp_mst_resume(struct drm_device *dev)
6187 {
6188 	struct drm_i915_private *dev_priv = dev->dev_private;
6189 	int i;
6190 
6191 	for (i = 0; i < I915_MAX_PORTS; i++) {
6192 		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6193 		if (!intel_dig_port)
6194 			continue;
6195 		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6196 #if 0
6197 			int ret;
6198 
6199 			if (!intel_dig_port->dp.can_mst)
6200 				continue;
6201 
6202 			ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6203 			if (ret != 0) {
6204 				intel_dp_check_mst_status(&intel_dig_port->dp);
6205 			}
6206 #endif
6207 		}
6208 	}
6209 }
6210