xref: /dragonfly/sys/dev/drm/i915/intel_dp.c (revision c66c7e2f)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27 
28 #include <linux/i2c.h>
29 #include <linux/export.h>
30 #include <drm/drmP.h>
31 #include <linux/slab.h>
32 #include <drm/drm_atomic_helper.h>
33 #include <drm/drm_crtc.h>
34 #include <drm/drm_crtc_helper.h>
35 #include <drm/drm_edid.h>
36 #include "intel_drv.h"
37 #include <drm/i915_drm.h>
38 #include "i915_drv.h"
39 
40 #define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
41 
42 static int disable_aux_irq = 0;
43 TUNABLE_INT("drm.i915.disable_aux_irq", &disable_aux_irq);
44 
45 struct dp_link_dpll {
46 	int link_bw;
47 	struct dpll dpll;
48 };
49 
50 static const struct dp_link_dpll gen4_dpll[] = {
51 	{ DP_LINK_BW_1_62,
52 		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
53 	{ DP_LINK_BW_2_7,
54 		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
55 };
56 
57 static const struct dp_link_dpll pch_dpll[] = {
58 	{ DP_LINK_BW_1_62,
59 		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
60 	{ DP_LINK_BW_2_7,
61 		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
62 };
63 
64 static const struct dp_link_dpll vlv_dpll[] = {
65 	{ DP_LINK_BW_1_62,
66 		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
67 	{ DP_LINK_BW_2_7,
68 		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
69 };
70 
71 /*
72  * CHV supports eDP 1.4 that have  more link rates.
73  * Below only provides the fixed rate but exclude variable rate.
74  */
75 static const struct dp_link_dpll chv_dpll[] = {
76 	/*
77 	 * CHV requires to program fractional division for m2.
78 	 * m2 is stored in fixed point format using formula below
79 	 * (m2_int << 22) | m2_fraction
80 	 */
81 	{ DP_LINK_BW_1_62,	/* m2_int = 32, m2_fraction = 1677722 */
82 		{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
83 	{ DP_LINK_BW_2_7,	/* m2_int = 27, m2_fraction = 0 */
84 		{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
85 	{ DP_LINK_BW_5_4,	/* m2_int = 27, m2_fraction = 0 */
86 		{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
87 };
88 /* Skylake supports following rates */
89 static const int gen9_rates[] = { 162000, 216000, 270000,
90 				  324000, 432000, 540000 };
91 static const int chv_rates[] = { 162000, 202500, 210000, 216000,
92 				 243000, 270000, 324000, 405000,
93 				 420000, 432000, 540000 };
94 static const int default_rates[] = { 162000, 270000, 540000 };
95 
96 /**
97  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
98  * @intel_dp: DP struct
99  *
100  * If a CPU or PCH DP output is attached to an eDP panel, this function
101  * will return true, and false otherwise.
102  */
103 static bool is_edp(struct intel_dp *intel_dp)
104 {
105 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
106 
107 	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
108 }
109 
110 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
111 {
112 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
113 
114 	return intel_dig_port->base.base.dev;
115 }
116 
117 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
118 {
119 	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
120 }
121 
122 static void intel_dp_link_down(struct intel_dp *intel_dp);
123 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
124 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
125 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
126 static void vlv_steal_power_sequencer(struct drm_device *dev,
127 				      enum i915_pipe pipe);
128 
129 static int
130 intel_dp_max_link_bw(struct intel_dp  *intel_dp)
131 {
132 	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
133 
134 	switch (max_link_bw) {
135 	case DP_LINK_BW_1_62:
136 	case DP_LINK_BW_2_7:
137 	case DP_LINK_BW_5_4:
138 		break;
139 	default:
140 		WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
141 		     max_link_bw);
142 		max_link_bw = DP_LINK_BW_1_62;
143 		break;
144 	}
145 	return max_link_bw;
146 }
147 
148 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
149 {
150 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
151 	struct drm_device *dev = intel_dig_port->base.base.dev;
152 	u8 source_max, sink_max;
153 
154 	source_max = 4;
155 	if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
156 	    (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
157 		source_max = 2;
158 
159 	sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
160 
161 	return min(source_max, sink_max);
162 }
163 
164 /*
165  * The units on the numbers in the next two are... bizarre.  Examples will
166  * make it clearer; this one parallels an example in the eDP spec.
167  *
168  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
169  *
170  *     270000 * 1 * 8 / 10 == 216000
171  *
172  * The actual data capacity of that configuration is 2.16Gbit/s, so the
173  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
174  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
175  * 119000.  At 18bpp that's 2142000 kilobits per second.
176  *
177  * Thus the strange-looking division by 10 in intel_dp_link_required, to
178  * get the result in decakilobits instead of kilobits.
179  */
180 
181 static int
182 intel_dp_link_required(int pixel_clock, int bpp)
183 {
184 	return (pixel_clock * bpp + 9) / 10;
185 }
186 
187 static int
188 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
189 {
190 	return (max_link_clock * max_lanes * 8) / 10;
191 }
192 
193 static enum drm_mode_status
194 intel_dp_mode_valid(struct drm_connector *connector,
195 		    struct drm_display_mode *mode)
196 {
197 	struct intel_dp *intel_dp = intel_attached_dp(connector);
198 	struct intel_connector *intel_connector = to_intel_connector(connector);
199 	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
200 	int target_clock = mode->clock;
201 	int max_rate, mode_rate, max_lanes, max_link_clock;
202 
203 	if (is_edp(intel_dp) && fixed_mode) {
204 		if (mode->hdisplay > fixed_mode->hdisplay)
205 			return MODE_PANEL;
206 
207 		if (mode->vdisplay > fixed_mode->vdisplay)
208 			return MODE_PANEL;
209 
210 		target_clock = fixed_mode->clock;
211 	}
212 
213 	max_link_clock = intel_dp_max_link_rate(intel_dp);
214 	max_lanes = intel_dp_max_lane_count(intel_dp);
215 
216 	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
217 	mode_rate = intel_dp_link_required(target_clock, 18);
218 
219 	if (mode_rate > max_rate)
220 		return MODE_CLOCK_HIGH;
221 
222 	if (mode->clock < 10000)
223 		return MODE_CLOCK_LOW;
224 
225 	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
226 		return MODE_H_ILLEGAL;
227 
228 	return MODE_OK;
229 }
230 
231 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
232 {
233 	int	i;
234 	uint32_t v = 0;
235 
236 	if (src_bytes > 4)
237 		src_bytes = 4;
238 	for (i = 0; i < src_bytes; i++)
239 		v |= ((uint32_t) src[i]) << ((3-i) * 8);
240 	return v;
241 }
242 
243 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
244 {
245 	int i;
246 	if (dst_bytes > 4)
247 		dst_bytes = 4;
248 	for (i = 0; i < dst_bytes; i++)
249 		dst[i] = src >> ((3-i) * 8);
250 }
251 
252 /* hrawclock is 1/4 the FSB frequency */
253 static int
254 intel_hrawclk(struct drm_device *dev)
255 {
256 	struct drm_i915_private *dev_priv = dev->dev_private;
257 	uint32_t clkcfg;
258 
259 	/* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
260 	if (IS_VALLEYVIEW(dev))
261 		return 200;
262 
263 	clkcfg = I915_READ(CLKCFG);
264 	switch (clkcfg & CLKCFG_FSB_MASK) {
265 	case CLKCFG_FSB_400:
266 		return 100;
267 	case CLKCFG_FSB_533:
268 		return 133;
269 	case CLKCFG_FSB_667:
270 		return 166;
271 	case CLKCFG_FSB_800:
272 		return 200;
273 	case CLKCFG_FSB_1067:
274 		return 266;
275 	case CLKCFG_FSB_1333:
276 		return 333;
277 	/* these two are just a guess; one of them might be right */
278 	case CLKCFG_FSB_1600:
279 	case CLKCFG_FSB_1600_ALT:
280 		return 400;
281 	default:
282 		return 133;
283 	}
284 }
285 
286 static void
287 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
288 				    struct intel_dp *intel_dp);
289 static void
290 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
291 					      struct intel_dp *intel_dp);
292 
293 static void pps_lock(struct intel_dp *intel_dp)
294 {
295 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
296 	struct intel_encoder *encoder = &intel_dig_port->base;
297 	struct drm_device *dev = encoder->base.dev;
298 	struct drm_i915_private *dev_priv = dev->dev_private;
299 	enum intel_display_power_domain power_domain;
300 
301 	/*
302 	 * See vlv_power_sequencer_reset() why we need
303 	 * a power domain reference here.
304 	 */
305 	power_domain = intel_display_port_power_domain(encoder);
306 	intel_display_power_get(dev_priv, power_domain);
307 
308 	mutex_lock(&dev_priv->pps_mutex);
309 }
310 
311 static void pps_unlock(struct intel_dp *intel_dp)
312 {
313 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
314 	struct intel_encoder *encoder = &intel_dig_port->base;
315 	struct drm_device *dev = encoder->base.dev;
316 	struct drm_i915_private *dev_priv = dev->dev_private;
317 	enum intel_display_power_domain power_domain;
318 
319 	mutex_unlock(&dev_priv->pps_mutex);
320 
321 	power_domain = intel_display_port_power_domain(encoder);
322 	intel_display_power_put(dev_priv, power_domain);
323 }
324 
325 static void
326 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
327 {
328 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
329 	struct drm_device *dev = intel_dig_port->base.base.dev;
330 	struct drm_i915_private *dev_priv = dev->dev_private;
331 	enum i915_pipe pipe = intel_dp->pps_pipe;
332 	bool pll_enabled;
333 	uint32_t DP;
334 
335 	if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
336 		 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
337 		 pipe_name(pipe), port_name(intel_dig_port->port)))
338 		return;
339 
340 	DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
341 		      pipe_name(pipe), port_name(intel_dig_port->port));
342 
343 	/* Preserve the BIOS-computed detected bit. This is
344 	 * supposed to be read-only.
345 	 */
346 	DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
347 	DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
348 	DP |= DP_PORT_WIDTH(1);
349 	DP |= DP_LINK_TRAIN_PAT_1;
350 
351 	if (IS_CHERRYVIEW(dev))
352 		DP |= DP_PIPE_SELECT_CHV(pipe);
353 	else if (pipe == PIPE_B)
354 		DP |= DP_PIPEB_SELECT;
355 
356 	pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
357 
358 	/*
359 	 * The DPLL for the pipe must be enabled for this to work.
360 	 * So enable temporarily it if it's not already enabled.
361 	 */
362 	if (!pll_enabled)
363 		vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
364 				 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
365 
366 	/*
367 	 * Similar magic as in intel_dp_enable_port().
368 	 * We _must_ do this port enable + disable trick
369 	 * to make this power seqeuencer lock onto the port.
370 	 * Otherwise even VDD force bit won't work.
371 	 */
372 	I915_WRITE(intel_dp->output_reg, DP);
373 	POSTING_READ(intel_dp->output_reg);
374 
375 	I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
376 	POSTING_READ(intel_dp->output_reg);
377 
378 	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
379 	POSTING_READ(intel_dp->output_reg);
380 
381 	if (!pll_enabled)
382 		vlv_force_pll_off(dev, pipe);
383 }
384 
385 static enum i915_pipe
386 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
387 {
388 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
389 	struct drm_device *dev = intel_dig_port->base.base.dev;
390 	struct drm_i915_private *dev_priv = dev->dev_private;
391 	struct intel_encoder *encoder;
392 	unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
393 	enum i915_pipe pipe;
394 
395 	lockdep_assert_held(&dev_priv->pps_mutex);
396 
397 	/* We should never land here with regular DP ports */
398 	WARN_ON(!is_edp(intel_dp));
399 
400 	if (intel_dp->pps_pipe != INVALID_PIPE)
401 		return intel_dp->pps_pipe;
402 
403 	/*
404 	 * We don't have power sequencer currently.
405 	 * Pick one that's not used by other ports.
406 	 */
407 	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
408 			    base.head) {
409 		struct intel_dp *tmp;
410 
411 		if (encoder->type != INTEL_OUTPUT_EDP)
412 			continue;
413 
414 		tmp = enc_to_intel_dp(&encoder->base);
415 
416 		if (tmp->pps_pipe != INVALID_PIPE)
417 			pipes &= ~(1 << tmp->pps_pipe);
418 	}
419 
420 	/*
421 	 * Didn't find one. This should not happen since there
422 	 * are two power sequencers and up to two eDP ports.
423 	 */
424 	if (WARN_ON(pipes == 0))
425 		pipe = PIPE_A;
426 	else
427 		pipe = ffs(pipes) - 1;
428 
429 	vlv_steal_power_sequencer(dev, pipe);
430 	intel_dp->pps_pipe = pipe;
431 
432 	DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
433 		      pipe_name(intel_dp->pps_pipe),
434 		      port_name(intel_dig_port->port));
435 
436 	/* init power sequencer on this pipe and port */
437 	intel_dp_init_panel_power_sequencer(dev, intel_dp);
438 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
439 
440 	/*
441 	 * Even vdd force doesn't work until we've made
442 	 * the power sequencer lock in on the port.
443 	 */
444 	vlv_power_sequencer_kick(intel_dp);
445 
446 	return intel_dp->pps_pipe;
447 }
448 
449 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
450 			       enum i915_pipe pipe);
451 
452 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
453 			       enum i915_pipe pipe)
454 {
455 	return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
456 }
457 
458 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
459 				enum i915_pipe pipe)
460 {
461 	return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
462 }
463 
464 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
465 			 enum i915_pipe pipe)
466 {
467 	return true;
468 }
469 
470 static enum i915_pipe
471 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
472 		     enum port port,
473 		     vlv_pipe_check pipe_check)
474 {
475 	enum i915_pipe pipe;
476 
477 	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
478 		u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
479 			PANEL_PORT_SELECT_MASK;
480 
481 		if (port_sel != PANEL_PORT_SELECT_VLV(port))
482 			continue;
483 
484 		if (!pipe_check(dev_priv, pipe))
485 			continue;
486 
487 		return pipe;
488 	}
489 
490 	return INVALID_PIPE;
491 }
492 
493 static void
494 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
495 {
496 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
497 	struct drm_device *dev = intel_dig_port->base.base.dev;
498 	struct drm_i915_private *dev_priv = dev->dev_private;
499 	enum port port = intel_dig_port->port;
500 
501 	lockdep_assert_held(&dev_priv->pps_mutex);
502 
503 	/* try to find a pipe with this port selected */
504 	/* first pick one where the panel is on */
505 	intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
506 						  vlv_pipe_has_pp_on);
507 	/* didn't find one? pick one where vdd is on */
508 	if (intel_dp->pps_pipe == INVALID_PIPE)
509 		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
510 							  vlv_pipe_has_vdd_on);
511 	/* didn't find one? pick one with just the correct port */
512 	if (intel_dp->pps_pipe == INVALID_PIPE)
513 		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
514 							  vlv_pipe_any);
515 
516 	/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
517 	if (intel_dp->pps_pipe == INVALID_PIPE) {
518 		DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
519 			      port_name(port));
520 		return;
521 	}
522 
523 	DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
524 		      port_name(port), pipe_name(intel_dp->pps_pipe));
525 
526 	intel_dp_init_panel_power_sequencer(dev, intel_dp);
527 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
528 }
529 
530 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
531 {
532 	struct drm_device *dev = dev_priv->dev;
533 	struct intel_encoder *encoder;
534 
535 	if (WARN_ON(!IS_VALLEYVIEW(dev)))
536 		return;
537 
538 	/*
539 	 * We can't grab pps_mutex here due to deadlock with power_domain
540 	 * mutex when power_domain functions are called while holding pps_mutex.
541 	 * That also means that in order to use pps_pipe the code needs to
542 	 * hold both a power domain reference and pps_mutex, and the power domain
543 	 * reference get/put must be done while _not_ holding pps_mutex.
544 	 * pps_{lock,unlock}() do these steps in the correct order, so one
545 	 * should use them always.
546 	 */
547 
548 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
549 		struct intel_dp *intel_dp;
550 
551 		if (encoder->type != INTEL_OUTPUT_EDP)
552 			continue;
553 
554 		intel_dp = enc_to_intel_dp(&encoder->base);
555 		intel_dp->pps_pipe = INVALID_PIPE;
556 	}
557 }
558 
559 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
560 {
561 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
562 
563 	if (HAS_PCH_SPLIT(dev))
564 		return PCH_PP_CONTROL;
565 	else
566 		return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
567 }
568 
569 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
570 {
571 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
572 
573 	if (HAS_PCH_SPLIT(dev))
574 		return PCH_PP_STATUS;
575 	else
576 		return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
577 }
578 
579 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
580    This function only applicable when panel PM state is not to be tracked */
581 #if 0
582 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
583 			      void *unused)
584 {
585 	struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
586 						 edp_notifier);
587 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
588 	struct drm_i915_private *dev_priv = dev->dev_private;
589 	u32 pp_div;
590 	u32 pp_ctrl_reg, pp_div_reg;
591 
592 	if (!is_edp(intel_dp) || code != SYS_RESTART)
593 		return 0;
594 
595 	pps_lock(intel_dp);
596 
597 	if (IS_VALLEYVIEW(dev)) {
598 		enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
599 
600 		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
601 		pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
602 		pp_div = I915_READ(pp_div_reg);
603 		pp_div &= PP_REFERENCE_DIVIDER_MASK;
604 
605 		/* 0x1F write to PP_DIV_REG sets max cycle delay */
606 		I915_WRITE(pp_div_reg, pp_div | 0x1F);
607 		I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
608 		msleep(intel_dp->panel_power_cycle_delay);
609 	}
610 
611 	pps_unlock(intel_dp);
612 
613 	return 0;
614 }
615 #endif
616 
617 static bool edp_have_panel_power(struct intel_dp *intel_dp)
618 {
619 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
620 	struct drm_i915_private *dev_priv = dev->dev_private;
621 
622 	lockdep_assert_held(&dev_priv->pps_mutex);
623 
624 	if (IS_VALLEYVIEW(dev) &&
625 	    intel_dp->pps_pipe == INVALID_PIPE)
626 		return false;
627 
628 	return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
629 }
630 
631 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
632 {
633 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
634 	struct drm_i915_private *dev_priv = dev->dev_private;
635 
636 	lockdep_assert_held(&dev_priv->pps_mutex);
637 
638 	if (IS_VALLEYVIEW(dev) &&
639 	    intel_dp->pps_pipe == INVALID_PIPE)
640 		return false;
641 
642 	return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
643 }
644 
645 static void
646 intel_dp_check_edp(struct intel_dp *intel_dp)
647 {
648 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
649 	struct drm_i915_private *dev_priv = dev->dev_private;
650 
651 	if (!is_edp(intel_dp))
652 		return;
653 
654 	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
655 		WARN(1, "eDP powered off while attempting aux channel communication.\n");
656 		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
657 			      I915_READ(_pp_stat_reg(intel_dp)),
658 			      I915_READ(_pp_ctrl_reg(intel_dp)));
659 	}
660 }
661 
662 static uint32_t
663 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
664 {
665 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
666 	struct drm_device *dev = intel_dig_port->base.base.dev;
667 	struct drm_i915_private *dev_priv = dev->dev_private;
668 	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
669 	uint32_t status;
670 	bool done;
671 
672 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
673 	if (has_aux_irq)
674 		done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
675 					  msecs_to_jiffies_timeout(10));
676 	else
677 		done = wait_for_atomic(C, 10) == 0;
678 	if (!done)
679 		DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
680 			  has_aux_irq);
681 #undef C
682 
683 	return status;
684 }
685 
686 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
687 {
688 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
689 	struct drm_device *dev = intel_dig_port->base.base.dev;
690 
691 	/*
692 	 * The clock divider is based off the hrawclk, and would like to run at
693 	 * 2MHz.  So, take the hrawclk value and divide by 2 and use that
694 	 */
695 	return index ? 0 : intel_hrawclk(dev) / 2;
696 }
697 
698 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
699 {
700 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
701 	struct drm_device *dev = intel_dig_port->base.base.dev;
702 
703 	if (index)
704 		return 0;
705 
706 	if (intel_dig_port->port == PORT_A) {
707 		if (IS_GEN6(dev) || IS_GEN7(dev))
708 			return 200; /* SNB & IVB eDP input clock at 400Mhz */
709 		else
710 			return 225; /* eDP input clock at 450Mhz */
711 	} else {
712 		return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
713 	}
714 }
715 
716 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
717 {
718 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
719 	struct drm_device *dev = intel_dig_port->base.base.dev;
720 	struct drm_i915_private *dev_priv = dev->dev_private;
721 
722 	if (intel_dig_port->port == PORT_A) {
723 		if (index)
724 			return 0;
725 		return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
726 	} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
727 		/* Workaround for non-ULT HSW */
728 		switch (index) {
729 		case 0: return 63;
730 		case 1: return 72;
731 		default: return 0;
732 		}
733 	} else  {
734 		return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
735 	}
736 }
737 
738 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
739 {
740 	return index ? 0 : 100;
741 }
742 
743 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
744 {
745 	/*
746 	 * SKL doesn't need us to program the AUX clock divider (Hardware will
747 	 * derive the clock from CDCLK automatically). We still implement the
748 	 * get_aux_clock_divider vfunc to plug-in into the existing code.
749 	 */
750 	return index ? 0 : 1;
751 }
752 
753 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
754 				      bool has_aux_irq,
755 				      int send_bytes,
756 				      uint32_t aux_clock_divider)
757 {
758 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
759 	struct drm_device *dev = intel_dig_port->base.base.dev;
760 	uint32_t precharge, timeout;
761 
762 	if (IS_GEN6(dev))
763 		precharge = 3;
764 	else
765 		precharge = 5;
766 
767 	if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
768 		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
769 	else
770 		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
771 
772 	return DP_AUX_CH_CTL_SEND_BUSY |
773 	       DP_AUX_CH_CTL_DONE |
774 	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
775 	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
776 	       timeout |
777 	       DP_AUX_CH_CTL_RECEIVE_ERROR |
778 	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
779 	       (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
780 	       (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
781 }
782 
783 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
784 				      bool has_aux_irq,
785 				      int send_bytes,
786 				      uint32_t unused)
787 {
788 	return DP_AUX_CH_CTL_SEND_BUSY |
789 	       DP_AUX_CH_CTL_DONE |
790 	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
791 	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
792 	       DP_AUX_CH_CTL_TIME_OUT_1600us |
793 	       DP_AUX_CH_CTL_RECEIVE_ERROR |
794 	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
795 	       DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
796 }
797 
798 static int
799 intel_dp_aux_ch(struct intel_dp *intel_dp,
800 		const uint8_t *send, int send_bytes,
801 		uint8_t *recv, int recv_size)
802 {
803 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
804 	struct drm_device *dev = intel_dig_port->base.base.dev;
805 	struct drm_i915_private *dev_priv = dev->dev_private;
806 	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
807 	uint32_t ch_data = ch_ctl + 4;
808 	uint32_t aux_clock_divider;
809 	int i, ret, recv_bytes;
810 	uint32_t status;
811 	int try, clock = 0;
812 	bool has_aux_irq = HAS_AUX_IRQ(dev) && !disable_aux_irq;
813 	bool vdd;
814 
815 	pps_lock(intel_dp);
816 
817 	/*
818 	 * We will be called with VDD already enabled for dpcd/edid/oui reads.
819 	 * In such cases we want to leave VDD enabled and it's up to upper layers
820 	 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
821 	 * ourselves.
822 	 */
823 	vdd = edp_panel_vdd_on(intel_dp);
824 
825 	/* dp aux is extremely sensitive to irq latency, hence request the
826 	 * lowest possible wakeup latency and so prevent the cpu from going into
827 	 * deep sleep states.
828 	 */
829 	pm_qos_update_request(&dev_priv->pm_qos, 0);
830 
831 	intel_dp_check_edp(intel_dp);
832 
833 	intel_aux_display_runtime_get(dev_priv);
834 
835 	/* Try to wait for any previous AUX channel activity */
836 	for (try = 0; try < 3; try++) {
837 		status = I915_READ_NOTRACE(ch_ctl);
838 		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
839 			break;
840 		msleep(1);
841 	}
842 
843 	if (try == 3) {
844 		WARN(1, "dp_aux_ch not started status 0x%08x\n",
845 		     I915_READ(ch_ctl));
846 		ret = -EBUSY;
847 		goto out;
848 	}
849 
850 	/* Only 5 data registers! */
851 	if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
852 		ret = -E2BIG;
853 		goto out;
854 	}
855 
856 	while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
857 		u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
858 							  has_aux_irq,
859 							  send_bytes,
860 							  aux_clock_divider);
861 
862 		/* Must try at least 3 times according to DP spec */
863 		for (try = 0; try < 5; try++) {
864 			/* Load the send data into the aux channel data registers */
865 			for (i = 0; i < send_bytes; i += 4)
866 				I915_WRITE(ch_data + i,
867 					   intel_dp_pack_aux(send + i,
868 							     send_bytes - i));
869 
870 			/* Send the command and wait for it to complete */
871 			I915_WRITE(ch_ctl, send_ctl);
872 
873 			status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
874 
875 			/* Clear done status and any errors */
876 			I915_WRITE(ch_ctl,
877 				   status |
878 				   DP_AUX_CH_CTL_DONE |
879 				   DP_AUX_CH_CTL_TIME_OUT_ERROR |
880 				   DP_AUX_CH_CTL_RECEIVE_ERROR);
881 
882 			if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
883 				      DP_AUX_CH_CTL_RECEIVE_ERROR))
884 				continue;
885 			if (status & DP_AUX_CH_CTL_DONE)
886 				goto done;
887 		}
888 	}
889 
890 	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
891 		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
892 		ret = -EBUSY;
893 		goto out;
894 	}
895 
896 done:
897 	/* Check for timeout or receive error.
898 	 * Timeouts occur when the sink is not connected
899 	 */
900 	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
901 		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
902 		ret = -EIO;
903 		goto out;
904 	}
905 
906 	/* Timeouts occur when the device isn't connected, so they're
907 	 * "normal" -- don't fill the kernel log with these */
908 	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
909 		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
910 		ret = -ETIMEDOUT;
911 		goto out;
912 	}
913 
914 	/* Unload any bytes sent back from the other side */
915 	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
916 		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
917 	if (recv_bytes > recv_size)
918 		recv_bytes = recv_size;
919 
920 	for (i = 0; i < recv_bytes; i += 4)
921 		intel_dp_unpack_aux(I915_READ(ch_data + i),
922 				    recv + i, recv_bytes - i);
923 
924 	ret = recv_bytes;
925 out:
926 	pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
927 	intel_aux_display_runtime_put(dev_priv);
928 
929 	if (vdd)
930 		edp_panel_vdd_off(intel_dp, false);
931 
932 	pps_unlock(intel_dp);
933 
934 	return ret;
935 }
936 
937 #define BARE_ADDRESS_SIZE	3
938 #define HEADER_SIZE		(BARE_ADDRESS_SIZE + 1)
939 static ssize_t
940 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
941 {
942 	struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
943 	uint8_t txbuf[20], rxbuf[20];
944 	size_t txsize, rxsize;
945 	int ret;
946 
947 	txbuf[0] = (msg->request << 4) |
948 		((msg->address >> 16) & 0xf);
949 	txbuf[1] = (msg->address >> 8) & 0xff;
950 	txbuf[2] = msg->address & 0xff;
951 	txbuf[3] = msg->size - 1;
952 
953 	switch (msg->request & ~DP_AUX_I2C_MOT) {
954 	case DP_AUX_NATIVE_WRITE:
955 	case DP_AUX_I2C_WRITE:
956 		txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
957 		rxsize = 2; /* 0 or 1 data bytes */
958 
959 		if (WARN_ON(txsize > 20))
960 			return -E2BIG;
961 
962 		memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
963 
964 		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
965 		if (ret > 0) {
966 			msg->reply = rxbuf[0] >> 4;
967 
968 			if (ret > 1) {
969 				/* Number of bytes written in a short write. */
970 				ret = clamp_t(int, rxbuf[1], 0, msg->size);
971 			} else {
972 				/* Return payload size. */
973 				ret = msg->size;
974 			}
975 		}
976 		break;
977 
978 	case DP_AUX_NATIVE_READ:
979 	case DP_AUX_I2C_READ:
980 		txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
981 		rxsize = msg->size + 1;
982 
983 		if (WARN_ON(rxsize > 20))
984 			return -E2BIG;
985 
986 		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
987 		if (ret > 0) {
988 			msg->reply = rxbuf[0] >> 4;
989 			/*
990 			 * Assume happy day, and copy the data. The caller is
991 			 * expected to check msg->reply before touching it.
992 			 *
993 			 * Return payload size.
994 			 */
995 			ret--;
996 			memcpy(msg->buffer, rxbuf + 1, ret);
997 		}
998 		break;
999 
1000 	default:
1001 		ret = -EINVAL;
1002 		break;
1003 	}
1004 
1005 	return ret;
1006 }
1007 
1008 static int
1009 intel_dp_i2c_aux_ch(struct device *adapter, int mode,
1010 		    uint8_t write_byte, uint8_t *read_byte)
1011 {
1012 	struct i2c_algo_dp_aux_data *data = device_get_softc(adapter);
1013 	struct intel_dp *intel_dp = data->priv;
1014 	uint16_t address = data->address;
1015 	uint8_t msg[5];
1016 	uint8_t reply[2];
1017 	unsigned retry;
1018 	int msg_bytes;
1019 	int reply_bytes;
1020 	int ret;
1021 
1022 	intel_edp_panel_vdd_on(intel_dp);
1023 	intel_dp_check_edp(intel_dp);
1024 	/* Set up the command byte */
1025 	if (mode & MODE_I2C_READ)
1026 		msg[0] = DP_AUX_I2C_READ << 4;
1027 	else
1028 		msg[0] = DP_AUX_I2C_WRITE << 4;
1029 
1030 	if (!(mode & MODE_I2C_STOP))
1031 		msg[0] |= DP_AUX_I2C_MOT << 4;
1032 
1033 	msg[1] = address >> 8;
1034 	msg[2] = address;
1035 
1036 	switch (mode) {
1037 	case MODE_I2C_WRITE:
1038 		msg[3] = 0;
1039 		msg[4] = write_byte;
1040 		msg_bytes = 5;
1041 		reply_bytes = 1;
1042 		break;
1043 	case MODE_I2C_READ:
1044 		msg[3] = 0;
1045 		msg_bytes = 4;
1046 		reply_bytes = 2;
1047 		break;
1048 	default:
1049 		msg_bytes = 3;
1050 		reply_bytes = 1;
1051 		break;
1052 	}
1053 
1054 	/*
1055 	 * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is
1056 	 * required to retry at least seven times upon receiving AUX_DEFER
1057 	 * before giving up the AUX transaction.
1058 	 */
1059 	for (retry = 0; retry < 7; retry++) {
1060 		ret = intel_dp_aux_ch(intel_dp,
1061 				      msg, msg_bytes,
1062 				      reply, reply_bytes);
1063 		if (ret < 0) {
1064 			DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
1065 			goto out;
1066 		}
1067 
1068 		switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
1069 		case DP_AUX_NATIVE_REPLY_ACK:
1070 			/* I2C-over-AUX Reply field is only valid
1071 			 * when paired with AUX ACK.
1072 			 */
1073 			break;
1074 		case DP_AUX_NATIVE_REPLY_NACK:
1075 			DRM_DEBUG_KMS("aux_ch native nack\n");
1076 			ret = -EREMOTEIO;
1077 			goto out;
1078 		case DP_AUX_NATIVE_REPLY_DEFER:
1079 			/*
1080 			 * For now, just give more slack to branch devices. We
1081 			 * could check the DPCD for I2C bit rate capabilities,
1082 			 * and if available, adjust the interval. We could also
1083 			 * be more careful with DP-to-Legacy adapters where a
1084 			 * long legacy cable may force very low I2C bit rates.
1085 			 */
1086 			if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
1087 			    DP_DWN_STRM_PORT_PRESENT)
1088 				usleep_range(500, 600);
1089 			else
1090 				usleep_range(300, 400);
1091 			continue;
1092 		default:
1093 			DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
1094 				  reply[0]);
1095 			ret = -EREMOTEIO;
1096 			goto out;
1097 		}
1098 
1099 		switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) {
1100 		case DP_AUX_I2C_REPLY_ACK:
1101 			if (mode == MODE_I2C_READ) {
1102 				*read_byte = reply[1];
1103 			}
1104 			ret = 0;	/* reply_bytes - 1 */
1105 			goto out;
1106 		case DP_AUX_I2C_REPLY_NACK:
1107 			DRM_DEBUG_KMS("aux_i2c nack\n");
1108 			ret = -EREMOTEIO;
1109 			goto out;
1110 		case DP_AUX_I2C_REPLY_DEFER:
1111 			DRM_DEBUG_KMS("aux_i2c defer\n");
1112 			udelay(100);
1113 			break;
1114 		default:
1115 			DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
1116 			ret = -EREMOTEIO;
1117 			goto out;
1118 		}
1119 	}
1120 
1121 	DRM_ERROR("too many retries, giving up\n");
1122 	ret = -EREMOTEIO;
1123 
1124 out:
1125 	return ret;
1126 }
1127 
1128 static void
1129 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1130 {
1131 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1132 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1133 	enum port port = intel_dig_port->port;
1134 	const char *name = NULL;
1135 	int ret;
1136 
1137 	switch (port) {
1138 	case PORT_A:
1139 		intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1140 		name = "DPDDC-A";
1141 		break;
1142 	case PORT_B:
1143 		intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1144 		name = "DPDDC-B";
1145 		break;
1146 	case PORT_C:
1147 		intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1148 		name = "DPDDC-C";
1149 		break;
1150 	case PORT_D:
1151 		intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1152 		name = "DPDDC-D";
1153 		break;
1154 	default:
1155 		BUG();
1156 	}
1157 
1158 	/*
1159 	 * The AUX_CTL register is usually DP_CTL + 0x10.
1160 	 *
1161 	 * On Haswell and Broadwell though:
1162 	 *   - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1163 	 *   - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1164 	 *
1165 	 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1166 	 */
1167 	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
1168 		intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1169 
1170 	intel_dp->aux.name = name;
1171 	intel_dp->aux.dev = dev->dev;
1172 	intel_dp->aux.transfer = intel_dp_aux_transfer;
1173 
1174 	DRM_DEBUG_KMS("i2c_init %s\n", name);
1175 	ret = iic_dp_aux_add_bus(connector->base.dev->dev, name,
1176 	    intel_dp_i2c_aux_ch, intel_dp, &intel_dp->dp_iic_bus,
1177 	    &intel_dp->aux.ddc);
1178 	WARN(ret, "intel_dp_i2c_init failed with error %d for port %c\n",
1179 	     ret, port_name(port));
1180 
1181 }
1182 
1183 static void
1184 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1185 {
1186 	intel_connector_unregister(intel_connector);
1187 }
1188 
1189 #if 0
1190 static int
1191 intel_dp_i2c_init(struct intel_dp *intel_dp,
1192 		  struct intel_connector *intel_connector, const char *name)
1193 {
1194 	int	ret;
1195 
1196 	DRM_DEBUG_KMS("i2c_init %s\n", name);
1197 #if 0
1198 	memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter));
1199 	intel_dp->adapter.owner = THIS_MODULE;
1200 	intel_dp->adapter.class = I2C_CLASS_DDC;
1201 	strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
1202 	intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
1203 	intel_dp->adapter.algo_data = &intel_dp->algo;
1204 	intel_dp->adapter.dev.parent = intel_connector->base.dev->dev;
1205 
1206 	ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
1207 	if (ret < 0)
1208 		return ret;
1209 
1210 	ret = sysfs_create_link(&intel_connector->base.kdev->kobj,
1211 				&intel_dp->adapter.dev.kobj,
1212 				intel_dp->adapter.dev.kobj.name);
1213 #endif
1214 	ret = iic_dp_aux_add_bus(intel_connector->base.dev->dev, name,
1215 	    intel_dp_i2c_aux_ch, intel_dp, &intel_dp->dp_iic_bus,
1216 	    &intel_dp->adapter);
1217 
1218 	return ret;
1219 }
1220 #endif
1221 
1222 static void
1223 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
1224 {
1225 	u32 ctrl1;
1226 
1227 	pipe_config->ddi_pll_sel = SKL_DPLL0;
1228 	pipe_config->dpll_hw_state.cfgcr1 = 0;
1229 	pipe_config->dpll_hw_state.cfgcr2 = 0;
1230 
1231 	ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1232 	switch (link_clock / 2) {
1233 	case 81000:
1234 		ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
1235 					      SKL_DPLL0);
1236 		break;
1237 	case 135000:
1238 		ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
1239 					      SKL_DPLL0);
1240 		break;
1241 	case 270000:
1242 		ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
1243 					      SKL_DPLL0);
1244 		break;
1245 	case 162000:
1246 		ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
1247 					      SKL_DPLL0);
1248 		break;
1249 	/* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1250 	results in CDCLK change. Need to handle the change of CDCLK by
1251 	disabling pipes and re-enabling them */
1252 	case 108000:
1253 		ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
1254 					      SKL_DPLL0);
1255 		break;
1256 	case 216000:
1257 		ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
1258 					      SKL_DPLL0);
1259 		break;
1260 
1261 	}
1262 	pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1263 }
1264 
1265 static void
1266 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
1267 {
1268 	switch (link_bw) {
1269 	case DP_LINK_BW_1_62:
1270 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1271 		break;
1272 	case DP_LINK_BW_2_7:
1273 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1274 		break;
1275 	case DP_LINK_BW_5_4:
1276 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1277 		break;
1278 	}
1279 }
1280 
1281 static int
1282 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1283 {
1284 	if (intel_dp->num_sink_rates) {
1285 		*sink_rates = intel_dp->sink_rates;
1286 		return intel_dp->num_sink_rates;
1287 	}
1288 
1289 	*sink_rates = default_rates;
1290 
1291 	return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1292 }
1293 
1294 static int
1295 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1296 {
1297 	if (INTEL_INFO(dev)->gen >= 9) {
1298 		*source_rates = gen9_rates;
1299 		return ARRAY_SIZE(gen9_rates);
1300 	} else if (IS_CHERRYVIEW(dev)) {
1301 		*source_rates = chv_rates;
1302 		return ARRAY_SIZE(chv_rates);
1303 	}
1304 
1305 	*source_rates = default_rates;
1306 
1307 	if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1308 		/* WaDisableHBR2:skl */
1309 		return (DP_LINK_BW_2_7 >> 3) + 1;
1310 	else if (INTEL_INFO(dev)->gen >= 8 ||
1311 	    (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1312 		return (DP_LINK_BW_5_4 >> 3) + 1;
1313 	else
1314 		return (DP_LINK_BW_2_7 >> 3) + 1;
1315 }
1316 
1317 static void
1318 intel_dp_set_clock(struct intel_encoder *encoder,
1319 		   struct intel_crtc_state *pipe_config, int link_bw)
1320 {
1321 	struct drm_device *dev = encoder->base.dev;
1322 	const struct dp_link_dpll *divisor = NULL;
1323 	int i, count = 0;
1324 
1325 	if (IS_G4X(dev)) {
1326 		divisor = gen4_dpll;
1327 		count = ARRAY_SIZE(gen4_dpll);
1328 	} else if (HAS_PCH_SPLIT(dev)) {
1329 		divisor = pch_dpll;
1330 		count = ARRAY_SIZE(pch_dpll);
1331 	} else if (IS_CHERRYVIEW(dev)) {
1332 		divisor = chv_dpll;
1333 		count = ARRAY_SIZE(chv_dpll);
1334 	} else if (IS_VALLEYVIEW(dev)) {
1335 		divisor = vlv_dpll;
1336 		count = ARRAY_SIZE(vlv_dpll);
1337 	}
1338 
1339 	if (divisor && count) {
1340 		for (i = 0; i < count; i++) {
1341 			if (link_bw == divisor[i].link_bw) {
1342 				pipe_config->dpll = divisor[i].dpll;
1343 				pipe_config->clock_set = true;
1344 				break;
1345 			}
1346 		}
1347 	}
1348 }
1349 
1350 static int intersect_rates(const int *source_rates, int source_len,
1351 			   const int *sink_rates, int sink_len,
1352 			   int *common_rates)
1353 {
1354 	int i = 0, j = 0, k = 0;
1355 
1356 	while (i < source_len && j < sink_len) {
1357 		if (source_rates[i] == sink_rates[j]) {
1358 			if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1359 				return k;
1360 			common_rates[k] = source_rates[i];
1361 			++k;
1362 			++i;
1363 			++j;
1364 		} else if (source_rates[i] < sink_rates[j]) {
1365 			++i;
1366 		} else {
1367 			++j;
1368 		}
1369 	}
1370 	return k;
1371 }
1372 
1373 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1374 				 int *common_rates)
1375 {
1376 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1377 	const int *source_rates, *sink_rates;
1378 	int source_len, sink_len;
1379 
1380 	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1381 	source_len = intel_dp_source_rates(dev, &source_rates);
1382 
1383 	return intersect_rates(source_rates, source_len,
1384 			       sink_rates, sink_len,
1385 			       common_rates);
1386 }
1387 
1388 static void snprintf_int_array(char *str, size_t len,
1389 			       const int *array, int nelem)
1390 {
1391 	int i;
1392 
1393 	str[0] = '\0';
1394 
1395 	for (i = 0; i < nelem; i++) {
1396 		int r = ksnprintf(str, len, "%d,", array[i]);
1397 		if (r >= len)
1398 			return;
1399 		str += r;
1400 		len -= r;
1401 	}
1402 }
1403 
1404 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1405 {
1406 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1407 	const int *source_rates, *sink_rates;
1408 	int source_len, sink_len, common_len;
1409 	int common_rates[DP_MAX_SUPPORTED_RATES];
1410 	char str[128]; /* FIXME: too big for stack? */
1411 
1412 	if ((drm_debug & DRM_UT_KMS) == 0)
1413 		return;
1414 
1415 	source_len = intel_dp_source_rates(dev, &source_rates);
1416 	snprintf_int_array(str, sizeof(str), source_rates, source_len);
1417 	DRM_DEBUG_KMS("source rates: %s\n", str);
1418 
1419 	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1420 	snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1421 	DRM_DEBUG_KMS("sink rates: %s\n", str);
1422 
1423 	common_len = intel_dp_common_rates(intel_dp, common_rates);
1424 	snprintf_int_array(str, sizeof(str), common_rates, common_len);
1425 	DRM_DEBUG_KMS("common rates: %s\n", str);
1426 }
1427 
1428 static int rate_to_index(int find, const int *rates)
1429 {
1430 	int i = 0;
1431 
1432 	for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1433 		if (find == rates[i])
1434 			break;
1435 
1436 	return i;
1437 }
1438 
1439 int
1440 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1441 {
1442 	int rates[DP_MAX_SUPPORTED_RATES] = {};
1443 	int len;
1444 
1445 	len = intel_dp_common_rates(intel_dp, rates);
1446 	if (WARN_ON(len <= 0))
1447 		return 162000;
1448 
1449 	return rates[rate_to_index(0, rates) - 1];
1450 }
1451 
1452 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1453 {
1454 	return rate_to_index(rate, intel_dp->sink_rates);
1455 }
1456 
1457 bool
1458 intel_dp_compute_config(struct intel_encoder *encoder,
1459 			struct intel_crtc_state *pipe_config)
1460 {
1461 	struct drm_device *dev = encoder->base.dev;
1462 	struct drm_i915_private *dev_priv = dev->dev_private;
1463 	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1464 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1465 	enum port port = dp_to_dig_port(intel_dp)->port;
1466 	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1467 	struct intel_connector *intel_connector = intel_dp->attached_connector;
1468 	int lane_count, clock;
1469 	int min_lane_count = 1;
1470 	int max_lane_count = intel_dp_max_lane_count(intel_dp);
1471 	/* Conveniently, the link BW constants become indices with a shift...*/
1472 	int min_clock = 0;
1473 	int max_clock;
1474 	int bpp, mode_rate;
1475 	int link_avail, link_clock;
1476 	int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1477 	int common_len;
1478 
1479 	common_len = intel_dp_common_rates(intel_dp, common_rates);
1480 
1481 	/* No common link rates between source and sink */
1482 	WARN_ON(common_len <= 0);
1483 
1484 	max_clock = common_len - 1;
1485 
1486 	if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1487 		pipe_config->has_pch_encoder = true;
1488 
1489 	pipe_config->has_dp_encoder = true;
1490 	pipe_config->has_drrs = false;
1491 	pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1492 
1493 	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1494 		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1495 				       adjusted_mode);
1496 		if (!HAS_PCH_SPLIT(dev))
1497 			intel_gmch_panel_fitting(intel_crtc, pipe_config,
1498 						 intel_connector->panel.fitting_mode);
1499 		else
1500 			intel_pch_panel_fitting(intel_crtc, pipe_config,
1501 						intel_connector->panel.fitting_mode);
1502 	}
1503 
1504 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1505 		return false;
1506 
1507 	DRM_DEBUG_KMS("DP link computation with max lane count %i "
1508 		      "max bw %d pixel clock %iKHz\n",
1509 		      max_lane_count, common_rates[max_clock],
1510 		      adjusted_mode->crtc_clock);
1511 
1512 	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
1513 	 * bpc in between. */
1514 	bpp = pipe_config->pipe_bpp;
1515 	if (is_edp(intel_dp)) {
1516 		if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1517 			DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1518 				      dev_priv->vbt.edp_bpp);
1519 			bpp = dev_priv->vbt.edp_bpp;
1520 		}
1521 
1522 		/*
1523 		 * Use the maximum clock and number of lanes the eDP panel
1524 		 * advertizes being capable of. The panels are generally
1525 		 * designed to support only a single clock and lane
1526 		 * configuration, and typically these values correspond to the
1527 		 * native resolution of the panel.
1528 		 */
1529 		min_lane_count = max_lane_count;
1530 		min_clock = max_clock;
1531 	}
1532 
1533 	for (; bpp >= 6*3; bpp -= 2*3) {
1534 		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1535 						   bpp);
1536 
1537 		for (clock = min_clock; clock <= max_clock; clock++) {
1538 			for (lane_count = min_lane_count;
1539 				lane_count <= max_lane_count;
1540 				lane_count <<= 1) {
1541 
1542 				link_clock = common_rates[clock];
1543 				link_avail = intel_dp_max_data_rate(link_clock,
1544 								    lane_count);
1545 
1546 				if (mode_rate <= link_avail) {
1547 					goto found;
1548 				}
1549 			}
1550 		}
1551 	}
1552 
1553 	return false;
1554 
1555 found:
1556 	if (intel_dp->color_range_auto) {
1557 		/*
1558 		 * See:
1559 		 * CEA-861-E - 5.1 Default Encoding Parameters
1560 		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1561 		 */
1562 		if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
1563 			intel_dp->color_range = DP_COLOR_RANGE_16_235;
1564 		else
1565 			intel_dp->color_range = 0;
1566 	}
1567 
1568 	if (intel_dp->color_range)
1569 		pipe_config->limited_color_range = true;
1570 
1571 	intel_dp->lane_count = lane_count;
1572 
1573 	if (intel_dp->num_sink_rates) {
1574 		intel_dp->link_bw = 0;
1575 		intel_dp->rate_select =
1576 			intel_dp_rate_select(intel_dp, common_rates[clock]);
1577 	} else {
1578 		intel_dp->link_bw =
1579 			drm_dp_link_rate_to_bw_code(common_rates[clock]);
1580 		intel_dp->rate_select = 0;
1581 	}
1582 
1583 	pipe_config->pipe_bpp = bpp;
1584 	pipe_config->port_clock = common_rates[clock];
1585 
1586 	DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1587 		      intel_dp->link_bw, intel_dp->lane_count,
1588 		      pipe_config->port_clock, bpp);
1589 	DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1590 		      mode_rate, link_avail);
1591 
1592 	intel_link_compute_m_n(bpp, lane_count,
1593 			       adjusted_mode->crtc_clock,
1594 			       pipe_config->port_clock,
1595 			       &pipe_config->dp_m_n);
1596 
1597 	if (intel_connector->panel.downclock_mode != NULL &&
1598 		dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1599 			pipe_config->has_drrs = true;
1600 			intel_link_compute_m_n(bpp, lane_count,
1601 				intel_connector->panel.downclock_mode->clock,
1602 				pipe_config->port_clock,
1603 				&pipe_config->dp_m2_n2);
1604 	}
1605 
1606 	if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1607 		skl_edp_set_pll_config(pipe_config, common_rates[clock]);
1608 	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1609 		hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1610 	else
1611 		intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
1612 
1613 	return true;
1614 }
1615 
1616 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1617 {
1618 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1619 	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1620 	struct drm_device *dev = crtc->base.dev;
1621 	struct drm_i915_private *dev_priv = dev->dev_private;
1622 	u32 dpa_ctl;
1623 
1624 	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1625 		      crtc->config->port_clock);
1626 	dpa_ctl = I915_READ(DP_A);
1627 	dpa_ctl &= ~DP_PLL_FREQ_MASK;
1628 
1629 	if (crtc->config->port_clock == 162000) {
1630 		/* For a long time we've carried around a ILK-DevA w/a for the
1631 		 * 160MHz clock. If we're really unlucky, it's still required.
1632 		 */
1633 		DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1634 		dpa_ctl |= DP_PLL_FREQ_160MHZ;
1635 		intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1636 	} else {
1637 		dpa_ctl |= DP_PLL_FREQ_270MHZ;
1638 		intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1639 	}
1640 
1641 	I915_WRITE(DP_A, dpa_ctl);
1642 
1643 	POSTING_READ(DP_A);
1644 	udelay(500);
1645 }
1646 
1647 static void intel_dp_prepare(struct intel_encoder *encoder)
1648 {
1649 	struct drm_device *dev = encoder->base.dev;
1650 	struct drm_i915_private *dev_priv = dev->dev_private;
1651 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1652 	enum port port = dp_to_dig_port(intel_dp)->port;
1653 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1654 	struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1655 
1656 	/*
1657 	 * There are four kinds of DP registers:
1658 	 *
1659 	 * 	IBX PCH
1660 	 * 	SNB CPU
1661 	 *	IVB CPU
1662 	 * 	CPT PCH
1663 	 *
1664 	 * IBX PCH and CPU are the same for almost everything,
1665 	 * except that the CPU DP PLL is configured in this
1666 	 * register
1667 	 *
1668 	 * CPT PCH is quite different, having many bits moved
1669 	 * to the TRANS_DP_CTL register instead. That
1670 	 * configuration happens (oddly) in ironlake_pch_enable
1671 	 */
1672 
1673 	/* Preserve the BIOS-computed detected bit. This is
1674 	 * supposed to be read-only.
1675 	 */
1676 	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1677 
1678 	/* Handle DP bits in common between all three register formats */
1679 	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1680 	intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1681 
1682 	if (crtc->config->has_audio)
1683 		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1684 
1685 	/* Split out the IBX/CPU vs CPT settings */
1686 
1687 	if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1688 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1689 			intel_dp->DP |= DP_SYNC_HS_HIGH;
1690 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1691 			intel_dp->DP |= DP_SYNC_VS_HIGH;
1692 		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1693 
1694 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1695 			intel_dp->DP |= DP_ENHANCED_FRAMING;
1696 
1697 		intel_dp->DP |= crtc->pipe << 29;
1698 	} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1699 		if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1700 			intel_dp->DP |= intel_dp->color_range;
1701 
1702 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1703 			intel_dp->DP |= DP_SYNC_HS_HIGH;
1704 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1705 			intel_dp->DP |= DP_SYNC_VS_HIGH;
1706 		intel_dp->DP |= DP_LINK_TRAIN_OFF;
1707 
1708 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1709 			intel_dp->DP |= DP_ENHANCED_FRAMING;
1710 
1711 		if (!IS_CHERRYVIEW(dev)) {
1712 			if (crtc->pipe == 1)
1713 				intel_dp->DP |= DP_PIPEB_SELECT;
1714 		} else {
1715 			intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1716 		}
1717 	} else {
1718 		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1719 	}
1720 }
1721 
1722 #define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1723 #define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1724 
1725 #define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1726 #define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
1727 
1728 #define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1729 #define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1730 
1731 static void wait_panel_status(struct intel_dp *intel_dp,
1732 				       u32 mask,
1733 				       u32 value)
1734 {
1735 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1736 	struct drm_i915_private *dev_priv = dev->dev_private;
1737 	u32 pp_stat_reg, pp_ctrl_reg;
1738 
1739 	lockdep_assert_held(&dev_priv->pps_mutex);
1740 
1741 	pp_stat_reg = _pp_stat_reg(intel_dp);
1742 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1743 
1744 	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1745 			mask, value,
1746 			I915_READ(pp_stat_reg),
1747 			I915_READ(pp_ctrl_reg));
1748 
1749 	if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1750 		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1751 				I915_READ(pp_stat_reg),
1752 				I915_READ(pp_ctrl_reg));
1753 	}
1754 
1755 	DRM_DEBUG_KMS("Wait complete\n");
1756 }
1757 
1758 static void wait_panel_on(struct intel_dp *intel_dp)
1759 {
1760 	DRM_DEBUG_KMS("Wait for panel power on\n");
1761 	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1762 }
1763 
1764 static void wait_panel_off(struct intel_dp *intel_dp)
1765 {
1766 	DRM_DEBUG_KMS("Wait for panel power off time\n");
1767 	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1768 }
1769 
1770 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1771 {
1772 	DRM_DEBUG_KMS("Wait for panel power cycle\n");
1773 
1774 	/* When we disable the VDD override bit last we have to do the manual
1775 	 * wait. */
1776 	wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1777 				       intel_dp->panel_power_cycle_delay);
1778 
1779 	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1780 }
1781 
1782 static void wait_backlight_on(struct intel_dp *intel_dp)
1783 {
1784 	wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1785 				       intel_dp->backlight_on_delay);
1786 }
1787 
1788 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1789 {
1790 	wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1791 				       intel_dp->backlight_off_delay);
1792 }
1793 
1794 /* Read the current pp_control value, unlocking the register if it
1795  * is locked
1796  */
1797 
1798 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1799 {
1800 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1801 	struct drm_i915_private *dev_priv = dev->dev_private;
1802 	u32 control;
1803 
1804 	lockdep_assert_held(&dev_priv->pps_mutex);
1805 
1806 	control = I915_READ(_pp_ctrl_reg(intel_dp));
1807 	control &= ~PANEL_UNLOCK_MASK;
1808 	control |= PANEL_UNLOCK_REGS;
1809 	return control;
1810 }
1811 
1812 /*
1813  * Must be paired with edp_panel_vdd_off().
1814  * Must hold pps_mutex around the whole on/off sequence.
1815  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1816  */
1817 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1818 {
1819 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1820 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1821 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1822 	struct drm_i915_private *dev_priv = dev->dev_private;
1823 	enum intel_display_power_domain power_domain;
1824 	u32 pp;
1825 	u32 pp_stat_reg, pp_ctrl_reg;
1826 	bool need_to_disable = !intel_dp->want_panel_vdd;
1827 
1828 	lockdep_assert_held(&dev_priv->pps_mutex);
1829 
1830 	if (!is_edp(intel_dp))
1831 		return false;
1832 
1833 	cancel_delayed_work(&intel_dp->panel_vdd_work);
1834 	intel_dp->want_panel_vdd = true;
1835 
1836 	if (edp_have_panel_vdd(intel_dp))
1837 		return need_to_disable;
1838 
1839 	power_domain = intel_display_port_power_domain(intel_encoder);
1840 	intel_display_power_get(dev_priv, power_domain);
1841 
1842 	DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1843 		      port_name(intel_dig_port->port));
1844 
1845 	if (!edp_have_panel_power(intel_dp))
1846 		wait_panel_power_cycle(intel_dp);
1847 
1848 	pp = ironlake_get_pp_control(intel_dp);
1849 	pp |= EDP_FORCE_VDD;
1850 
1851 	pp_stat_reg = _pp_stat_reg(intel_dp);
1852 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1853 
1854 	I915_WRITE(pp_ctrl_reg, pp);
1855 	POSTING_READ(pp_ctrl_reg);
1856 	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1857 			I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1858 	/*
1859 	 * If the panel wasn't on, delay before accessing aux channel
1860 	 */
1861 	if (!edp_have_panel_power(intel_dp)) {
1862 		DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1863 			      port_name(intel_dig_port->port));
1864 		msleep(intel_dp->panel_power_up_delay);
1865 	}
1866 
1867 	return need_to_disable;
1868 }
1869 
1870 /*
1871  * Must be paired with intel_edp_panel_vdd_off() or
1872  * intel_edp_panel_off().
1873  * Nested calls to these functions are not allowed since
1874  * we drop the lock. Caller must use some higher level
1875  * locking to prevent nested calls from other threads.
1876  */
1877 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1878 {
1879 	bool vdd;
1880 
1881 	if (!is_edp(intel_dp))
1882 		return;
1883 
1884 	pps_lock(intel_dp);
1885 	vdd = edp_panel_vdd_on(intel_dp);
1886 	pps_unlock(intel_dp);
1887 
1888 	I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1889 	     port_name(dp_to_dig_port(intel_dp)->port));
1890 }
1891 
1892 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1893 {
1894 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1895 	struct drm_i915_private *dev_priv = dev->dev_private;
1896 	struct intel_digital_port *intel_dig_port =
1897 		dp_to_dig_port(intel_dp);
1898 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1899 	enum intel_display_power_domain power_domain;
1900 	u32 pp;
1901 	u32 pp_stat_reg, pp_ctrl_reg;
1902 
1903 	lockdep_assert_held(&dev_priv->pps_mutex);
1904 
1905 	WARN_ON(intel_dp->want_panel_vdd);
1906 
1907 	if (!edp_have_panel_vdd(intel_dp))
1908 		return;
1909 
1910 	DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1911 		      port_name(intel_dig_port->port));
1912 
1913 	pp = ironlake_get_pp_control(intel_dp);
1914 	pp &= ~EDP_FORCE_VDD;
1915 
1916 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1917 	pp_stat_reg = _pp_stat_reg(intel_dp);
1918 
1919 	I915_WRITE(pp_ctrl_reg, pp);
1920 	POSTING_READ(pp_ctrl_reg);
1921 
1922 	/* Make sure sequencer is idle before allowing subsequent activity */
1923 	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1924 	I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1925 
1926 	if ((pp & POWER_TARGET_ON) == 0)
1927 		intel_dp->last_power_cycle = jiffies;
1928 
1929 	power_domain = intel_display_port_power_domain(intel_encoder);
1930 	intel_display_power_put(dev_priv, power_domain);
1931 }
1932 
1933 static void edp_panel_vdd_work(struct work_struct *__work)
1934 {
1935 	struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1936 						 struct intel_dp, panel_vdd_work);
1937 
1938 	pps_lock(intel_dp);
1939 	if (!intel_dp->want_panel_vdd)
1940 		edp_panel_vdd_off_sync(intel_dp);
1941 	pps_unlock(intel_dp);
1942 }
1943 
1944 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1945 {
1946 	unsigned long delay;
1947 
1948 	/*
1949 	 * Queue the timer to fire a long time from now (relative to the power
1950 	 * down delay) to keep the panel power up across a sequence of
1951 	 * operations.
1952 	 */
1953 	delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1954 	schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1955 }
1956 
1957 /*
1958  * Must be paired with edp_panel_vdd_on().
1959  * Must hold pps_mutex around the whole on/off sequence.
1960  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1961  */
1962 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1963 {
1964 	struct drm_i915_private *dev_priv =
1965 		intel_dp_to_dev(intel_dp)->dev_private;
1966 
1967 	lockdep_assert_held(&dev_priv->pps_mutex);
1968 
1969 	if (!is_edp(intel_dp))
1970 		return;
1971 
1972 	I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1973 	     port_name(dp_to_dig_port(intel_dp)->port));
1974 
1975 	intel_dp->want_panel_vdd = false;
1976 
1977 	if (sync)
1978 		edp_panel_vdd_off_sync(intel_dp);
1979 	else
1980 		edp_panel_vdd_schedule_off(intel_dp);
1981 }
1982 
1983 static void edp_panel_on(struct intel_dp *intel_dp)
1984 {
1985 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1986 	struct drm_i915_private *dev_priv = dev->dev_private;
1987 	u32 pp;
1988 	u32 pp_ctrl_reg;
1989 
1990 	lockdep_assert_held(&dev_priv->pps_mutex);
1991 
1992 	if (!is_edp(intel_dp))
1993 		return;
1994 
1995 	DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1996 		      port_name(dp_to_dig_port(intel_dp)->port));
1997 
1998 	if (WARN(edp_have_panel_power(intel_dp),
1999 		 "eDP port %c panel power already on\n",
2000 		 port_name(dp_to_dig_port(intel_dp)->port)))
2001 		return;
2002 
2003 	wait_panel_power_cycle(intel_dp);
2004 
2005 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2006 	pp = ironlake_get_pp_control(intel_dp);
2007 	if (IS_GEN5(dev)) {
2008 		/* ILK workaround: disable reset around power sequence */
2009 		pp &= ~PANEL_POWER_RESET;
2010 		I915_WRITE(pp_ctrl_reg, pp);
2011 		POSTING_READ(pp_ctrl_reg);
2012 	}
2013 
2014 	pp |= POWER_TARGET_ON;
2015 	if (!IS_GEN5(dev))
2016 		pp |= PANEL_POWER_RESET;
2017 
2018 	I915_WRITE(pp_ctrl_reg, pp);
2019 	POSTING_READ(pp_ctrl_reg);
2020 
2021 	wait_panel_on(intel_dp);
2022 	intel_dp->last_power_on = jiffies;
2023 
2024 	if (IS_GEN5(dev)) {
2025 		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2026 		I915_WRITE(pp_ctrl_reg, pp);
2027 		POSTING_READ(pp_ctrl_reg);
2028 	}
2029 }
2030 
2031 void intel_edp_panel_on(struct intel_dp *intel_dp)
2032 {
2033 	if (!is_edp(intel_dp))
2034 		return;
2035 
2036 	pps_lock(intel_dp);
2037 	edp_panel_on(intel_dp);
2038 	pps_unlock(intel_dp);
2039 }
2040 
2041 
2042 static void edp_panel_off(struct intel_dp *intel_dp)
2043 {
2044 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2045 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
2046 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2047 	struct drm_i915_private *dev_priv = dev->dev_private;
2048 	enum intel_display_power_domain power_domain;
2049 	u32 pp;
2050 	u32 pp_ctrl_reg;
2051 
2052 	lockdep_assert_held(&dev_priv->pps_mutex);
2053 
2054 	if (!is_edp(intel_dp))
2055 		return;
2056 
2057 	DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2058 		      port_name(dp_to_dig_port(intel_dp)->port));
2059 
2060 	WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2061 	     port_name(dp_to_dig_port(intel_dp)->port));
2062 
2063 	pp = ironlake_get_pp_control(intel_dp);
2064 	/* We need to switch off panel power _and_ force vdd, for otherwise some
2065 	 * panels get very unhappy and cease to work. */
2066 	pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2067 		EDP_BLC_ENABLE);
2068 
2069 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2070 
2071 	intel_dp->want_panel_vdd = false;
2072 
2073 	I915_WRITE(pp_ctrl_reg, pp);
2074 	POSTING_READ(pp_ctrl_reg);
2075 
2076 	intel_dp->last_power_cycle = jiffies;
2077 	wait_panel_off(intel_dp);
2078 
2079 	/* We got a reference when we enabled the VDD. */
2080 	power_domain = intel_display_port_power_domain(intel_encoder);
2081 	intel_display_power_put(dev_priv, power_domain);
2082 }
2083 
2084 void intel_edp_panel_off(struct intel_dp *intel_dp)
2085 {
2086 	if (!is_edp(intel_dp))
2087 		return;
2088 
2089 	pps_lock(intel_dp);
2090 	edp_panel_off(intel_dp);
2091 	pps_unlock(intel_dp);
2092 }
2093 
2094 /* Enable backlight in the panel power control. */
2095 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2096 {
2097 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2098 	struct drm_device *dev = intel_dig_port->base.base.dev;
2099 	struct drm_i915_private *dev_priv = dev->dev_private;
2100 	u32 pp;
2101 	u32 pp_ctrl_reg;
2102 
2103 	/*
2104 	 * If we enable the backlight right away following a panel power
2105 	 * on, we may see slight flicker as the panel syncs with the eDP
2106 	 * link.  So delay a bit to make sure the image is solid before
2107 	 * allowing it to appear.
2108 	 */
2109 	wait_backlight_on(intel_dp);
2110 
2111 	pps_lock(intel_dp);
2112 
2113 	pp = ironlake_get_pp_control(intel_dp);
2114 	pp |= EDP_BLC_ENABLE;
2115 
2116 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2117 
2118 	I915_WRITE(pp_ctrl_reg, pp);
2119 	POSTING_READ(pp_ctrl_reg);
2120 
2121 	pps_unlock(intel_dp);
2122 }
2123 
2124 /* Enable backlight PWM and backlight PP control. */
2125 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2126 {
2127 	if (!is_edp(intel_dp))
2128 		return;
2129 
2130 	DRM_DEBUG_KMS("\n");
2131 
2132 	intel_panel_enable_backlight(intel_dp->attached_connector);
2133 	_intel_edp_backlight_on(intel_dp);
2134 }
2135 
2136 /* Disable backlight in the panel power control. */
2137 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2138 {
2139 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2140 	struct drm_i915_private *dev_priv = dev->dev_private;
2141 	u32 pp;
2142 	u32 pp_ctrl_reg;
2143 
2144 	if (!is_edp(intel_dp))
2145 		return;
2146 
2147 	pps_lock(intel_dp);
2148 
2149 	pp = ironlake_get_pp_control(intel_dp);
2150 	pp &= ~EDP_BLC_ENABLE;
2151 
2152 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2153 
2154 	I915_WRITE(pp_ctrl_reg, pp);
2155 	POSTING_READ(pp_ctrl_reg);
2156 
2157 	pps_unlock(intel_dp);
2158 
2159 	intel_dp->last_backlight_off = jiffies;
2160 	edp_wait_backlight_off(intel_dp);
2161 }
2162 
2163 /* Disable backlight PP control and backlight PWM. */
2164 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2165 {
2166 	if (!is_edp(intel_dp))
2167 		return;
2168 
2169 	DRM_DEBUG_KMS("\n");
2170 
2171 	_intel_edp_backlight_off(intel_dp);
2172 	intel_panel_disable_backlight(intel_dp->attached_connector);
2173 }
2174 
2175 /*
2176  * Hook for controlling the panel power control backlight through the bl_power
2177  * sysfs attribute. Take care to handle multiple calls.
2178  */
2179 static void intel_edp_backlight_power(struct intel_connector *connector,
2180 				      bool enable)
2181 {
2182 	struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2183 	bool is_enabled;
2184 
2185 	pps_lock(intel_dp);
2186 	is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2187 	pps_unlock(intel_dp);
2188 
2189 	if (is_enabled == enable)
2190 		return;
2191 
2192 	DRM_DEBUG_KMS("panel power control backlight %s\n",
2193 		      enable ? "enable" : "disable");
2194 
2195 	if (enable)
2196 		_intel_edp_backlight_on(intel_dp);
2197 	else
2198 		_intel_edp_backlight_off(intel_dp);
2199 }
2200 
2201 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2202 {
2203 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2204 	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2205 	struct drm_device *dev = crtc->dev;
2206 	struct drm_i915_private *dev_priv = dev->dev_private;
2207 	u32 dpa_ctl;
2208 
2209 	assert_pipe_disabled(dev_priv,
2210 			     to_intel_crtc(crtc)->pipe);
2211 
2212 	DRM_DEBUG_KMS("\n");
2213 	dpa_ctl = I915_READ(DP_A);
2214 	WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2215 	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2216 
2217 	/* We don't adjust intel_dp->DP while tearing down the link, to
2218 	 * facilitate link retraining (e.g. after hotplug). Hence clear all
2219 	 * enable bits here to ensure that we don't enable too much. */
2220 	intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2221 	intel_dp->DP |= DP_PLL_ENABLE;
2222 	I915_WRITE(DP_A, intel_dp->DP);
2223 	POSTING_READ(DP_A);
2224 	udelay(200);
2225 }
2226 
2227 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2228 {
2229 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2230 	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2231 	struct drm_device *dev = crtc->dev;
2232 	struct drm_i915_private *dev_priv = dev->dev_private;
2233 	u32 dpa_ctl;
2234 
2235 	assert_pipe_disabled(dev_priv,
2236 			     to_intel_crtc(crtc)->pipe);
2237 
2238 	dpa_ctl = I915_READ(DP_A);
2239 	WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2240 	     "dp pll off, should be on\n");
2241 	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2242 
2243 	/* We can't rely on the value tracked for the DP register in
2244 	 * intel_dp->DP because link_down must not change that (otherwise link
2245 	 * re-training will fail. */
2246 	dpa_ctl &= ~DP_PLL_ENABLE;
2247 	I915_WRITE(DP_A, dpa_ctl);
2248 	POSTING_READ(DP_A);
2249 	udelay(200);
2250 }
2251 
2252 /* If the sink supports it, try to set the power state appropriately */
2253 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2254 {
2255 	int ret, i;
2256 
2257 	/* Should have a valid DPCD by this point */
2258 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2259 		return;
2260 
2261 	if (mode != DRM_MODE_DPMS_ON) {
2262 		ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2263 					 DP_SET_POWER_D3);
2264 	} else {
2265 		/*
2266 		 * When turning on, we need to retry for 1ms to give the sink
2267 		 * time to wake up.
2268 		 */
2269 		for (i = 0; i < 3; i++) {
2270 			ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2271 						 DP_SET_POWER_D0);
2272 			if (ret == 1)
2273 				break;
2274 			msleep(1);
2275 		}
2276 	}
2277 
2278 	if (ret != 1)
2279 		DRM_DEBUG_KMS("failed to %s sink power state\n",
2280 			      mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2281 }
2282 
2283 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2284 				  enum i915_pipe *pipe)
2285 {
2286 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2287 	enum port port = dp_to_dig_port(intel_dp)->port;
2288 	struct drm_device *dev = encoder->base.dev;
2289 	struct drm_i915_private *dev_priv = dev->dev_private;
2290 	enum intel_display_power_domain power_domain;
2291 	u32 tmp;
2292 
2293 	power_domain = intel_display_port_power_domain(encoder);
2294 	if (!intel_display_power_is_enabled(dev_priv, power_domain))
2295 		return false;
2296 
2297 	tmp = I915_READ(intel_dp->output_reg);
2298 
2299 	if (!(tmp & DP_PORT_EN))
2300 		return false;
2301 
2302 	if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
2303 		*pipe = PORT_TO_PIPE_CPT(tmp);
2304 	} else if (IS_CHERRYVIEW(dev)) {
2305 		*pipe = DP_PORT_TO_PIPE_CHV(tmp);
2306 	} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
2307 		*pipe = PORT_TO_PIPE(tmp);
2308 	} else {
2309 		u32 trans_sel;
2310 		u32 trans_dp;
2311 		int i;
2312 
2313 		switch (intel_dp->output_reg) {
2314 		case PCH_DP_B:
2315 			trans_sel = TRANS_DP_PORT_SEL_B;
2316 			break;
2317 		case PCH_DP_C:
2318 			trans_sel = TRANS_DP_PORT_SEL_C;
2319 			break;
2320 		case PCH_DP_D:
2321 			trans_sel = TRANS_DP_PORT_SEL_D;
2322 			break;
2323 		default:
2324 			return true;
2325 		}
2326 
2327 		for_each_pipe(dev_priv, i) {
2328 			trans_dp = I915_READ(TRANS_DP_CTL(i));
2329 			if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2330 				*pipe = i;
2331 				return true;
2332 			}
2333 		}
2334 
2335 		DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2336 			      intel_dp->output_reg);
2337 	}
2338 
2339 	return true;
2340 }
2341 
2342 static void intel_dp_get_config(struct intel_encoder *encoder,
2343 				struct intel_crtc_state *pipe_config)
2344 {
2345 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2346 	u32 tmp, flags = 0;
2347 	struct drm_device *dev = encoder->base.dev;
2348 	struct drm_i915_private *dev_priv = dev->dev_private;
2349 	enum port port = dp_to_dig_port(intel_dp)->port;
2350 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2351 	int dotclock;
2352 
2353 	tmp = I915_READ(intel_dp->output_reg);
2354 
2355 	pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2356 
2357 	if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
2358 		if (tmp & DP_SYNC_HS_HIGH)
2359 			flags |= DRM_MODE_FLAG_PHSYNC;
2360 		else
2361 			flags |= DRM_MODE_FLAG_NHSYNC;
2362 
2363 		if (tmp & DP_SYNC_VS_HIGH)
2364 			flags |= DRM_MODE_FLAG_PVSYNC;
2365 		else
2366 			flags |= DRM_MODE_FLAG_NVSYNC;
2367 	} else {
2368 		tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2369 		if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2370 			flags |= DRM_MODE_FLAG_PHSYNC;
2371 		else
2372 			flags |= DRM_MODE_FLAG_NHSYNC;
2373 
2374 		if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2375 			flags |= DRM_MODE_FLAG_PVSYNC;
2376 		else
2377 			flags |= DRM_MODE_FLAG_NVSYNC;
2378 	}
2379 
2380 	pipe_config->base.adjusted_mode.flags |= flags;
2381 
2382 	if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2383 	    tmp & DP_COLOR_RANGE_16_235)
2384 		pipe_config->limited_color_range = true;
2385 
2386 	pipe_config->has_dp_encoder = true;
2387 
2388 	intel_dp_get_m_n(crtc, pipe_config);
2389 
2390 	if (port == PORT_A) {
2391 		if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2392 			pipe_config->port_clock = 162000;
2393 		else
2394 			pipe_config->port_clock = 270000;
2395 	}
2396 
2397 	dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2398 					    &pipe_config->dp_m_n);
2399 
2400 	if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2401 		ironlake_check_encoder_dotclock(pipe_config, dotclock);
2402 
2403 	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2404 
2405 	if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2406 	    pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2407 		/*
2408 		 * This is a big fat ugly hack.
2409 		 *
2410 		 * Some machines in UEFI boot mode provide us a VBT that has 18
2411 		 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2412 		 * unknown we fail to light up. Yet the same BIOS boots up with
2413 		 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2414 		 * max, not what it tells us to use.
2415 		 *
2416 		 * Note: This will still be broken if the eDP panel is not lit
2417 		 * up by the BIOS, and thus we can't get the mode at module
2418 		 * load.
2419 		 */
2420 		DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2421 			      pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2422 		dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2423 	}
2424 }
2425 
2426 static void intel_disable_dp(struct intel_encoder *encoder)
2427 {
2428 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2429 	struct drm_device *dev = encoder->base.dev;
2430 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2431 
2432 	if (crtc->config->has_audio)
2433 		intel_audio_codec_disable(encoder);
2434 
2435 	if (HAS_PSR(dev) && !HAS_DDI(dev))
2436 		intel_psr_disable(intel_dp);
2437 
2438 	/* Make sure the panel is off before trying to change the mode. But also
2439 	 * ensure that we have vdd while we switch off the panel. */
2440 	intel_edp_panel_vdd_on(intel_dp);
2441 	intel_edp_backlight_off(intel_dp);
2442 	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2443 	intel_edp_panel_off(intel_dp);
2444 
2445 	/* disable the port before the pipe on g4x */
2446 	if (INTEL_INFO(dev)->gen < 5)
2447 		intel_dp_link_down(intel_dp);
2448 }
2449 
2450 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2451 {
2452 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2453 	enum port port = dp_to_dig_port(intel_dp)->port;
2454 
2455 	intel_dp_link_down(intel_dp);
2456 	if (port == PORT_A)
2457 		ironlake_edp_pll_off(intel_dp);
2458 }
2459 
2460 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2461 {
2462 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2463 
2464 	intel_dp_link_down(intel_dp);
2465 }
2466 
2467 static void chv_post_disable_dp(struct intel_encoder *encoder)
2468 {
2469 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2470 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2471 	struct drm_device *dev = encoder->base.dev;
2472 	struct drm_i915_private *dev_priv = dev->dev_private;
2473 	struct intel_crtc *intel_crtc =
2474 		to_intel_crtc(encoder->base.crtc);
2475 	enum dpio_channel ch = vlv_dport_to_channel(dport);
2476 	enum i915_pipe pipe = intel_crtc->pipe;
2477 	u32 val;
2478 
2479 	intel_dp_link_down(intel_dp);
2480 
2481 	mutex_lock(&dev_priv->dpio_lock);
2482 
2483 	/* Propagate soft reset to data lane reset */
2484 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2485 	val |= CHV_PCS_REQ_SOFTRESET_EN;
2486 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2487 
2488 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2489 	val |= CHV_PCS_REQ_SOFTRESET_EN;
2490 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2491 
2492 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2493 	val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2494 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2495 
2496 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2497 	val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2498 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2499 
2500 	mutex_unlock(&dev_priv->dpio_lock);
2501 }
2502 
2503 static void
2504 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2505 			 uint32_t *DP,
2506 			 uint8_t dp_train_pat)
2507 {
2508 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2509 	struct drm_device *dev = intel_dig_port->base.base.dev;
2510 	struct drm_i915_private *dev_priv = dev->dev_private;
2511 	enum port port = intel_dig_port->port;
2512 
2513 	if (HAS_DDI(dev)) {
2514 		uint32_t temp = I915_READ(DP_TP_CTL(port));
2515 
2516 		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2517 			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2518 		else
2519 			temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2520 
2521 		temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2522 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2523 		case DP_TRAINING_PATTERN_DISABLE:
2524 			temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2525 
2526 			break;
2527 		case DP_TRAINING_PATTERN_1:
2528 			temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2529 			break;
2530 		case DP_TRAINING_PATTERN_2:
2531 			temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2532 			break;
2533 		case DP_TRAINING_PATTERN_3:
2534 			temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2535 			break;
2536 		}
2537 		I915_WRITE(DP_TP_CTL(port), temp);
2538 
2539 	} else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2540 		*DP &= ~DP_LINK_TRAIN_MASK_CPT;
2541 
2542 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2543 		case DP_TRAINING_PATTERN_DISABLE:
2544 			*DP |= DP_LINK_TRAIN_OFF_CPT;
2545 			break;
2546 		case DP_TRAINING_PATTERN_1:
2547 			*DP |= DP_LINK_TRAIN_PAT_1_CPT;
2548 			break;
2549 		case DP_TRAINING_PATTERN_2:
2550 			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2551 			break;
2552 		case DP_TRAINING_PATTERN_3:
2553 			DRM_ERROR("DP training pattern 3 not supported\n");
2554 			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2555 			break;
2556 		}
2557 
2558 	} else {
2559 		if (IS_CHERRYVIEW(dev))
2560 			*DP &= ~DP_LINK_TRAIN_MASK_CHV;
2561 		else
2562 			*DP &= ~DP_LINK_TRAIN_MASK;
2563 
2564 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2565 		case DP_TRAINING_PATTERN_DISABLE:
2566 			*DP |= DP_LINK_TRAIN_OFF;
2567 			break;
2568 		case DP_TRAINING_PATTERN_1:
2569 			*DP |= DP_LINK_TRAIN_PAT_1;
2570 			break;
2571 		case DP_TRAINING_PATTERN_2:
2572 			*DP |= DP_LINK_TRAIN_PAT_2;
2573 			break;
2574 		case DP_TRAINING_PATTERN_3:
2575 			if (IS_CHERRYVIEW(dev)) {
2576 				*DP |= DP_LINK_TRAIN_PAT_3_CHV;
2577 			} else {
2578 				DRM_ERROR("DP training pattern 3 not supported\n");
2579 				*DP |= DP_LINK_TRAIN_PAT_2;
2580 			}
2581 			break;
2582 		}
2583 	}
2584 }
2585 
2586 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2587 {
2588 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2589 	struct drm_i915_private *dev_priv = dev->dev_private;
2590 
2591 	/* enable with pattern 1 (as per spec) */
2592 	_intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2593 				 DP_TRAINING_PATTERN_1);
2594 
2595 	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2596 	POSTING_READ(intel_dp->output_reg);
2597 
2598 	/*
2599 	 * Magic for VLV/CHV. We _must_ first set up the register
2600 	 * without actually enabling the port, and then do another
2601 	 * write to enable the port. Otherwise link training will
2602 	 * fail when the power sequencer is freshly used for this port.
2603 	 */
2604 	intel_dp->DP |= DP_PORT_EN;
2605 
2606 	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2607 	POSTING_READ(intel_dp->output_reg);
2608 }
2609 
2610 static void intel_enable_dp(struct intel_encoder *encoder)
2611 {
2612 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2613 	struct drm_device *dev = encoder->base.dev;
2614 	struct drm_i915_private *dev_priv = dev->dev_private;
2615 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2616 	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2617 
2618 	if (WARN_ON(dp_reg & DP_PORT_EN))
2619 		return;
2620 
2621 	pps_lock(intel_dp);
2622 
2623 	if (IS_VALLEYVIEW(dev))
2624 		vlv_init_panel_power_sequencer(intel_dp);
2625 
2626 	intel_dp_enable_port(intel_dp);
2627 
2628 	edp_panel_vdd_on(intel_dp);
2629 	edp_panel_on(intel_dp);
2630 	edp_panel_vdd_off(intel_dp, true);
2631 
2632 	pps_unlock(intel_dp);
2633 
2634 	if (IS_VALLEYVIEW(dev))
2635 		vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
2636 
2637 	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2638 	intel_dp_start_link_train(intel_dp);
2639 	intel_dp_complete_link_train(intel_dp);
2640 	intel_dp_stop_link_train(intel_dp);
2641 
2642 	if (crtc->config->has_audio) {
2643 		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2644 				 pipe_name(crtc->pipe));
2645 		intel_audio_codec_enable(encoder);
2646 	}
2647 }
2648 
2649 static void g4x_enable_dp(struct intel_encoder *encoder)
2650 {
2651 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2652 
2653 	intel_enable_dp(encoder);
2654 	intel_edp_backlight_on(intel_dp);
2655 }
2656 
2657 static void vlv_enable_dp(struct intel_encoder *encoder)
2658 {
2659 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2660 
2661 	intel_edp_backlight_on(intel_dp);
2662 	intel_psr_enable(intel_dp);
2663 }
2664 
2665 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2666 {
2667 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2668 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2669 
2670 	intel_dp_prepare(encoder);
2671 
2672 	/* Only ilk+ has port A */
2673 	if (dport->port == PORT_A) {
2674 		ironlake_set_pll_cpu_edp(intel_dp);
2675 		ironlake_edp_pll_on(intel_dp);
2676 	}
2677 }
2678 
2679 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2680 {
2681 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2682 	struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2683 	enum i915_pipe pipe = intel_dp->pps_pipe;
2684 	int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2685 
2686 	edp_panel_vdd_off_sync(intel_dp);
2687 
2688 	/*
2689 	 * VLV seems to get confused when multiple power seqeuencers
2690 	 * have the same port selected (even if only one has power/vdd
2691 	 * enabled). The failure manifests as vlv_wait_port_ready() failing
2692 	 * CHV on the other hand doesn't seem to mind having the same port
2693 	 * selected in multiple power seqeuencers, but let's clear the
2694 	 * port select always when logically disconnecting a power sequencer
2695 	 * from a port.
2696 	 */
2697 	DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2698 		      pipe_name(pipe), port_name(intel_dig_port->port));
2699 	I915_WRITE(pp_on_reg, 0);
2700 	POSTING_READ(pp_on_reg);
2701 
2702 	intel_dp->pps_pipe = INVALID_PIPE;
2703 }
2704 
2705 static void vlv_steal_power_sequencer(struct drm_device *dev,
2706 				      enum i915_pipe pipe)
2707 {
2708 	struct drm_i915_private *dev_priv = dev->dev_private;
2709 	struct intel_encoder *encoder;
2710 
2711 	lockdep_assert_held(&dev_priv->pps_mutex);
2712 
2713 	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2714 		return;
2715 
2716 	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2717 			    base.head) {
2718 		struct intel_dp *intel_dp;
2719 		enum port port;
2720 
2721 		if (encoder->type != INTEL_OUTPUT_EDP)
2722 			continue;
2723 
2724 		intel_dp = enc_to_intel_dp(&encoder->base);
2725 		port = dp_to_dig_port(intel_dp)->port;
2726 
2727 		if (intel_dp->pps_pipe != pipe)
2728 			continue;
2729 
2730 		DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2731 			      pipe_name(pipe), port_name(port));
2732 
2733 		WARN(encoder->connectors_active,
2734 		     "stealing pipe %c power sequencer from active eDP port %c\n",
2735 		     pipe_name(pipe), port_name(port));
2736 
2737 		/* make sure vdd is off before we steal it */
2738 		vlv_detach_power_sequencer(intel_dp);
2739 	}
2740 }
2741 
2742 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2743 {
2744 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2745 	struct intel_encoder *encoder = &intel_dig_port->base;
2746 	struct drm_device *dev = encoder->base.dev;
2747 	struct drm_i915_private *dev_priv = dev->dev_private;
2748 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2749 
2750 	lockdep_assert_held(&dev_priv->pps_mutex);
2751 
2752 	if (!is_edp(intel_dp))
2753 		return;
2754 
2755 	if (intel_dp->pps_pipe == crtc->pipe)
2756 		return;
2757 
2758 	/*
2759 	 * If another power sequencer was being used on this
2760 	 * port previously make sure to turn off vdd there while
2761 	 * we still have control of it.
2762 	 */
2763 	if (intel_dp->pps_pipe != INVALID_PIPE)
2764 		vlv_detach_power_sequencer(intel_dp);
2765 
2766 	/*
2767 	 * We may be stealing the power
2768 	 * sequencer from another port.
2769 	 */
2770 	vlv_steal_power_sequencer(dev, crtc->pipe);
2771 
2772 	/* now it's all ours */
2773 	intel_dp->pps_pipe = crtc->pipe;
2774 
2775 	DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2776 		      pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2777 
2778 	/* init power sequencer on this pipe and port */
2779 	intel_dp_init_panel_power_sequencer(dev, intel_dp);
2780 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2781 }
2782 
2783 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2784 {
2785 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2786 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2787 	struct drm_device *dev = encoder->base.dev;
2788 	struct drm_i915_private *dev_priv = dev->dev_private;
2789 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2790 	enum dpio_channel port = vlv_dport_to_channel(dport);
2791 	int pipe = intel_crtc->pipe;
2792 	u32 val;
2793 
2794 	mutex_lock(&dev_priv->dpio_lock);
2795 
2796 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2797 	val = 0;
2798 	if (pipe)
2799 		val |= (1<<21);
2800 	else
2801 		val &= ~(1<<21);
2802 	val |= 0x001000c4;
2803 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2804 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2805 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2806 
2807 	mutex_unlock(&dev_priv->dpio_lock);
2808 
2809 	intel_enable_dp(encoder);
2810 }
2811 
2812 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2813 {
2814 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2815 	struct drm_device *dev = encoder->base.dev;
2816 	struct drm_i915_private *dev_priv = dev->dev_private;
2817 	struct intel_crtc *intel_crtc =
2818 		to_intel_crtc(encoder->base.crtc);
2819 	enum dpio_channel port = vlv_dport_to_channel(dport);
2820 	int pipe = intel_crtc->pipe;
2821 
2822 	intel_dp_prepare(encoder);
2823 
2824 	/* Program Tx lane resets to default */
2825 	mutex_lock(&dev_priv->dpio_lock);
2826 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2827 			 DPIO_PCS_TX_LANE2_RESET |
2828 			 DPIO_PCS_TX_LANE1_RESET);
2829 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2830 			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2831 			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2832 			 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2833 				 DPIO_PCS_CLK_SOFT_RESET);
2834 
2835 	/* Fix up inter-pair skew failure */
2836 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2837 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2838 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2839 	mutex_unlock(&dev_priv->dpio_lock);
2840 }
2841 
2842 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2843 {
2844 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2845 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2846 	struct drm_device *dev = encoder->base.dev;
2847 	struct drm_i915_private *dev_priv = dev->dev_private;
2848 	struct intel_crtc *intel_crtc =
2849 		to_intel_crtc(encoder->base.crtc);
2850 	enum dpio_channel ch = vlv_dport_to_channel(dport);
2851 	int pipe = intel_crtc->pipe;
2852 	int data, i;
2853 	u32 val;
2854 
2855 	mutex_lock(&dev_priv->dpio_lock);
2856 
2857 	/* allow hardware to manage TX FIFO reset source */
2858 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2859 	val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2860 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2861 
2862 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2863 	val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2864 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2865 
2866 	/* Deassert soft data lane reset*/
2867 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2868 	val |= CHV_PCS_REQ_SOFTRESET_EN;
2869 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2870 
2871 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2872 	val |= CHV_PCS_REQ_SOFTRESET_EN;
2873 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2874 
2875 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2876 	val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2877 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2878 
2879 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2880 	val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2881 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2882 
2883 	/* Program Tx lane latency optimal setting*/
2884 	for (i = 0; i < 4; i++) {
2885 		/* Set the upar bit */
2886 		data = (i == 1) ? 0x0 : 0x1;
2887 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2888 				data << DPIO_UPAR_SHIFT);
2889 	}
2890 
2891 	/* Data lane stagger programming */
2892 	/* FIXME: Fix up value only after power analysis */
2893 
2894 	mutex_unlock(&dev_priv->dpio_lock);
2895 
2896 	intel_enable_dp(encoder);
2897 }
2898 
2899 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2900 {
2901 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2902 	struct drm_device *dev = encoder->base.dev;
2903 	struct drm_i915_private *dev_priv = dev->dev_private;
2904 	struct intel_crtc *intel_crtc =
2905 		to_intel_crtc(encoder->base.crtc);
2906 	enum dpio_channel ch = vlv_dport_to_channel(dport);
2907 	enum i915_pipe pipe = intel_crtc->pipe;
2908 	u32 val;
2909 
2910 	intel_dp_prepare(encoder);
2911 
2912 	mutex_lock(&dev_priv->dpio_lock);
2913 
2914 	/* program left/right clock distribution */
2915 	if (pipe != PIPE_B) {
2916 		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2917 		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2918 		if (ch == DPIO_CH0)
2919 			val |= CHV_BUFLEFTENA1_FORCE;
2920 		if (ch == DPIO_CH1)
2921 			val |= CHV_BUFRIGHTENA1_FORCE;
2922 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2923 	} else {
2924 		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2925 		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2926 		if (ch == DPIO_CH0)
2927 			val |= CHV_BUFLEFTENA2_FORCE;
2928 		if (ch == DPIO_CH1)
2929 			val |= CHV_BUFRIGHTENA2_FORCE;
2930 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2931 	}
2932 
2933 	/* program clock channel usage */
2934 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2935 	val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2936 	if (pipe != PIPE_B)
2937 		val &= ~CHV_PCS_USEDCLKCHANNEL;
2938 	else
2939 		val |= CHV_PCS_USEDCLKCHANNEL;
2940 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2941 
2942 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2943 	val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2944 	if (pipe != PIPE_B)
2945 		val &= ~CHV_PCS_USEDCLKCHANNEL;
2946 	else
2947 		val |= CHV_PCS_USEDCLKCHANNEL;
2948 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2949 
2950 	/*
2951 	 * This a a bit weird since generally CL
2952 	 * matches the pipe, but here we need to
2953 	 * pick the CL based on the port.
2954 	 */
2955 	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2956 	if (pipe != PIPE_B)
2957 		val &= ~CHV_CMN_USEDCLKCHANNEL;
2958 	else
2959 		val |= CHV_CMN_USEDCLKCHANNEL;
2960 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2961 
2962 	mutex_unlock(&dev_priv->dpio_lock);
2963 }
2964 
2965 /*
2966  * Native read with retry for link status and receiver capability reads for
2967  * cases where the sink may still be asleep.
2968  *
2969  * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2970  * supposed to retry 3 times per the spec.
2971  */
2972 static ssize_t
2973 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2974 			void *buffer, size_t size)
2975 {
2976 	ssize_t ret;
2977 	int i;
2978 
2979 	/*
2980 	 * Sometime we just get the same incorrect byte repeated
2981 	 * over the entire buffer. Doing just one throw away read
2982 	 * initially seems to "solve" it.
2983 	 */
2984 	drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2985 
2986 	for (i = 0; i < 3; i++) {
2987 		ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2988 		if (ret == size)
2989 			return ret;
2990 		msleep(1);
2991 	}
2992 
2993 	return ret;
2994 }
2995 
2996 /*
2997  * Fetch AUX CH registers 0x202 - 0x207 which contain
2998  * link status information
2999  */
3000 static bool
3001 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3002 {
3003 	return intel_dp_dpcd_read_wake(&intel_dp->aux,
3004 				       DP_LANE0_1_STATUS,
3005 				       link_status,
3006 				       DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3007 }
3008 
3009 /* These are source-specific values. */
3010 static uint8_t
3011 intel_dp_voltage_max(struct intel_dp *intel_dp)
3012 {
3013 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3014 	struct drm_i915_private *dev_priv = dev->dev_private;
3015 	enum port port = dp_to_dig_port(intel_dp)->port;
3016 
3017 	if (INTEL_INFO(dev)->gen >= 9) {
3018 		if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
3019 			return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3020 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3021 	} else if (IS_VALLEYVIEW(dev))
3022 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3023 	else if (IS_GEN7(dev) && port == PORT_A)
3024 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3025 	else if (HAS_PCH_CPT(dev) && port != PORT_A)
3026 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3027 	else
3028 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3029 }
3030 
3031 static uint8_t
3032 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3033 {
3034 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3035 	enum port port = dp_to_dig_port(intel_dp)->port;
3036 
3037 	if (INTEL_INFO(dev)->gen >= 9) {
3038 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3039 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3040 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3041 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3042 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3043 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3044 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3045 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3046 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3047 		default:
3048 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3049 		}
3050 	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3051 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3052 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3053 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3054 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3055 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3056 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3057 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3058 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3059 		default:
3060 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3061 		}
3062 	} else if (IS_VALLEYVIEW(dev)) {
3063 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3064 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3065 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3066 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3067 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3068 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3069 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3070 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3071 		default:
3072 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3073 		}
3074 	} else if (IS_GEN7(dev) && port == PORT_A) {
3075 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3076 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3077 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3078 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3079 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3080 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3081 		default:
3082 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3083 		}
3084 	} else {
3085 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3086 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3087 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3088 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3089 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3090 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3091 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3092 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3093 		default:
3094 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3095 		}
3096 	}
3097 }
3098 
3099 static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
3100 {
3101 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3102 	struct drm_i915_private *dev_priv = dev->dev_private;
3103 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3104 	struct intel_crtc *intel_crtc =
3105 		to_intel_crtc(dport->base.base.crtc);
3106 	unsigned long demph_reg_value, preemph_reg_value,
3107 		uniqtranscale_reg_value;
3108 	uint8_t train_set = intel_dp->train_set[0];
3109 	enum dpio_channel port = vlv_dport_to_channel(dport);
3110 	int pipe = intel_crtc->pipe;
3111 
3112 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3113 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3114 		preemph_reg_value = 0x0004000;
3115 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3116 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3117 			demph_reg_value = 0x2B405555;
3118 			uniqtranscale_reg_value = 0x552AB83A;
3119 			break;
3120 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3121 			demph_reg_value = 0x2B404040;
3122 			uniqtranscale_reg_value = 0x5548B83A;
3123 			break;
3124 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3125 			demph_reg_value = 0x2B245555;
3126 			uniqtranscale_reg_value = 0x5560B83A;
3127 			break;
3128 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3129 			demph_reg_value = 0x2B405555;
3130 			uniqtranscale_reg_value = 0x5598DA3A;
3131 			break;
3132 		default:
3133 			return 0;
3134 		}
3135 		break;
3136 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3137 		preemph_reg_value = 0x0002000;
3138 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3139 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3140 			demph_reg_value = 0x2B404040;
3141 			uniqtranscale_reg_value = 0x5552B83A;
3142 			break;
3143 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3144 			demph_reg_value = 0x2B404848;
3145 			uniqtranscale_reg_value = 0x5580B83A;
3146 			break;
3147 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3148 			demph_reg_value = 0x2B404040;
3149 			uniqtranscale_reg_value = 0x55ADDA3A;
3150 			break;
3151 		default:
3152 			return 0;
3153 		}
3154 		break;
3155 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3156 		preemph_reg_value = 0x0000000;
3157 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3158 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3159 			demph_reg_value = 0x2B305555;
3160 			uniqtranscale_reg_value = 0x5570B83A;
3161 			break;
3162 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3163 			demph_reg_value = 0x2B2B4040;
3164 			uniqtranscale_reg_value = 0x55ADDA3A;
3165 			break;
3166 		default:
3167 			return 0;
3168 		}
3169 		break;
3170 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3171 		preemph_reg_value = 0x0006000;
3172 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3173 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3174 			demph_reg_value = 0x1B405555;
3175 			uniqtranscale_reg_value = 0x55ADDA3A;
3176 			break;
3177 		default:
3178 			return 0;
3179 		}
3180 		break;
3181 	default:
3182 		return 0;
3183 	}
3184 
3185 	mutex_lock(&dev_priv->dpio_lock);
3186 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3187 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3188 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3189 			 uniqtranscale_reg_value);
3190 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3191 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3192 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3193 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3194 	mutex_unlock(&dev_priv->dpio_lock);
3195 
3196 	return 0;
3197 }
3198 
3199 static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
3200 {
3201 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3202 	struct drm_i915_private *dev_priv = dev->dev_private;
3203 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3204 	struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3205 	u32 deemph_reg_value, margin_reg_value, val;
3206 	uint8_t train_set = intel_dp->train_set[0];
3207 	enum dpio_channel ch = vlv_dport_to_channel(dport);
3208 	enum i915_pipe pipe = intel_crtc->pipe;
3209 	int i;
3210 
3211 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3212 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3213 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3214 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3215 			deemph_reg_value = 128;
3216 			margin_reg_value = 52;
3217 			break;
3218 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3219 			deemph_reg_value = 128;
3220 			margin_reg_value = 77;
3221 			break;
3222 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3223 			deemph_reg_value = 128;
3224 			margin_reg_value = 102;
3225 			break;
3226 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3227 			deemph_reg_value = 128;
3228 			margin_reg_value = 154;
3229 			/* FIXME extra to set for 1200 */
3230 			break;
3231 		default:
3232 			return 0;
3233 		}
3234 		break;
3235 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3236 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3237 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3238 			deemph_reg_value = 85;
3239 			margin_reg_value = 78;
3240 			break;
3241 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3242 			deemph_reg_value = 85;
3243 			margin_reg_value = 116;
3244 			break;
3245 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3246 			deemph_reg_value = 85;
3247 			margin_reg_value = 154;
3248 			break;
3249 		default:
3250 			return 0;
3251 		}
3252 		break;
3253 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3254 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3255 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3256 			deemph_reg_value = 64;
3257 			margin_reg_value = 104;
3258 			break;
3259 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3260 			deemph_reg_value = 64;
3261 			margin_reg_value = 154;
3262 			break;
3263 		default:
3264 			return 0;
3265 		}
3266 		break;
3267 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3268 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3269 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3270 			deemph_reg_value = 43;
3271 			margin_reg_value = 154;
3272 			break;
3273 		default:
3274 			return 0;
3275 		}
3276 		break;
3277 	default:
3278 		return 0;
3279 	}
3280 
3281 	mutex_lock(&dev_priv->dpio_lock);
3282 
3283 	/* Clear calc init */
3284 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3285 	val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3286 	val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3287 	val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3288 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3289 
3290 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3291 	val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3292 	val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3293 	val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3294 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3295 
3296 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3297 	val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3298 	val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3299 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3300 
3301 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3302 	val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3303 	val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3304 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3305 
3306 	/* Program swing deemph */
3307 	for (i = 0; i < 4; i++) {
3308 		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3309 		val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3310 		val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3311 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3312 	}
3313 
3314 	/* Program swing margin */
3315 	for (i = 0; i < 4; i++) {
3316 		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3317 		val &= ~DPIO_SWING_MARGIN000_MASK;
3318 		val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3319 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3320 	}
3321 
3322 	/* Disable unique transition scale */
3323 	for (i = 0; i < 4; i++) {
3324 		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3325 		val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3326 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3327 	}
3328 
3329 	if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
3330 			== DP_TRAIN_PRE_EMPH_LEVEL_0) &&
3331 		((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
3332 			== DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
3333 
3334 		/*
3335 		 * The document said it needs to set bit 27 for ch0 and bit 26
3336 		 * for ch1. Might be a typo in the doc.
3337 		 * For now, for this unique transition scale selection, set bit
3338 		 * 27 for ch0 and ch1.
3339 		 */
3340 		for (i = 0; i < 4; i++) {
3341 			val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3342 			val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3343 			vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3344 		}
3345 
3346 		for (i = 0; i < 4; i++) {
3347 			val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3348 			val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3349 			val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3350 			vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3351 		}
3352 	}
3353 
3354 	/* Start swing calculation */
3355 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3356 	val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3357 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3358 
3359 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3360 	val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3361 	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3362 
3363 	/* LRC Bypass */
3364 	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3365 	val |= DPIO_LRC_BYPASS;
3366 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3367 
3368 	mutex_unlock(&dev_priv->dpio_lock);
3369 
3370 	return 0;
3371 }
3372 
3373 static void
3374 intel_get_adjust_train(struct intel_dp *intel_dp,
3375 		       const uint8_t link_status[DP_LINK_STATUS_SIZE])
3376 {
3377 	uint8_t v = 0;
3378 	uint8_t p = 0;
3379 	int lane;
3380 	uint8_t voltage_max;
3381 	uint8_t preemph_max;
3382 
3383 	for (lane = 0; lane < intel_dp->lane_count; lane++) {
3384 		uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3385 		uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3386 
3387 		if (this_v > v)
3388 			v = this_v;
3389 		if (this_p > p)
3390 			p = this_p;
3391 	}
3392 
3393 	voltage_max = intel_dp_voltage_max(intel_dp);
3394 	if (v >= voltage_max)
3395 		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3396 
3397 	preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3398 	if (p >= preemph_max)
3399 		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3400 
3401 	for (lane = 0; lane < 4; lane++)
3402 		intel_dp->train_set[lane] = v | p;
3403 }
3404 
3405 static uint32_t
3406 intel_gen4_signal_levels(uint8_t train_set)
3407 {
3408 	uint32_t	signal_levels = 0;
3409 
3410 	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3411 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3412 	default:
3413 		signal_levels |= DP_VOLTAGE_0_4;
3414 		break;
3415 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3416 		signal_levels |= DP_VOLTAGE_0_6;
3417 		break;
3418 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3419 		signal_levels |= DP_VOLTAGE_0_8;
3420 		break;
3421 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3422 		signal_levels |= DP_VOLTAGE_1_2;
3423 		break;
3424 	}
3425 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3426 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3427 	default:
3428 		signal_levels |= DP_PRE_EMPHASIS_0;
3429 		break;
3430 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3431 		signal_levels |= DP_PRE_EMPHASIS_3_5;
3432 		break;
3433 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3434 		signal_levels |= DP_PRE_EMPHASIS_6;
3435 		break;
3436 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3437 		signal_levels |= DP_PRE_EMPHASIS_9_5;
3438 		break;
3439 	}
3440 	return signal_levels;
3441 }
3442 
3443 /* Gen6's DP voltage swing and pre-emphasis control */
3444 static uint32_t
3445 intel_gen6_edp_signal_levels(uint8_t train_set)
3446 {
3447 	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3448 					 DP_TRAIN_PRE_EMPHASIS_MASK);
3449 	switch (signal_levels) {
3450 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3451 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3452 		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3453 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3454 		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3455 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3456 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3457 		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3458 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3459 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3460 		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3461 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3462 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3463 		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3464 	default:
3465 		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3466 			      "0x%x\n", signal_levels);
3467 		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3468 	}
3469 }
3470 
3471 /* Gen7's DP voltage swing and pre-emphasis control */
3472 static uint32_t
3473 intel_gen7_edp_signal_levels(uint8_t train_set)
3474 {
3475 	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3476 					 DP_TRAIN_PRE_EMPHASIS_MASK);
3477 	switch (signal_levels) {
3478 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3479 		return EDP_LINK_TRAIN_400MV_0DB_IVB;
3480 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3481 		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3482 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3483 		return EDP_LINK_TRAIN_400MV_6DB_IVB;
3484 
3485 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3486 		return EDP_LINK_TRAIN_600MV_0DB_IVB;
3487 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3488 		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3489 
3490 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3491 		return EDP_LINK_TRAIN_800MV_0DB_IVB;
3492 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3493 		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3494 
3495 	default:
3496 		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3497 			      "0x%x\n", signal_levels);
3498 		return EDP_LINK_TRAIN_500MV_0DB_IVB;
3499 	}
3500 }
3501 
3502 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3503 static uint32_t
3504 intel_hsw_signal_levels(uint8_t train_set)
3505 {
3506 	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3507 					 DP_TRAIN_PRE_EMPHASIS_MASK);
3508 	switch (signal_levels) {
3509 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3510 		return DDI_BUF_TRANS_SELECT(0);
3511 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3512 		return DDI_BUF_TRANS_SELECT(1);
3513 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3514 		return DDI_BUF_TRANS_SELECT(2);
3515 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3516 		return DDI_BUF_TRANS_SELECT(3);
3517 
3518 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3519 		return DDI_BUF_TRANS_SELECT(4);
3520 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3521 		return DDI_BUF_TRANS_SELECT(5);
3522 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3523 		return DDI_BUF_TRANS_SELECT(6);
3524 
3525 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3526 		return DDI_BUF_TRANS_SELECT(7);
3527 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3528 		return DDI_BUF_TRANS_SELECT(8);
3529 
3530 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3531 		return DDI_BUF_TRANS_SELECT(9);
3532 	default:
3533 		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3534 			      "0x%x\n", signal_levels);
3535 		return DDI_BUF_TRANS_SELECT(0);
3536 	}
3537 }
3538 
3539 /* Properly updates "DP" with the correct signal levels. */
3540 static void
3541 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3542 {
3543 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3544 	enum port port = intel_dig_port->port;
3545 	struct drm_device *dev = intel_dig_port->base.base.dev;
3546 	uint32_t signal_levels, mask;
3547 	uint8_t train_set = intel_dp->train_set[0];
3548 
3549 	if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
3550 		signal_levels = intel_hsw_signal_levels(train_set);
3551 		mask = DDI_BUF_EMP_MASK;
3552 	} else if (IS_CHERRYVIEW(dev)) {
3553 		signal_levels = intel_chv_signal_levels(intel_dp);
3554 		mask = 0;
3555 	} else if (IS_VALLEYVIEW(dev)) {
3556 		signal_levels = intel_vlv_signal_levels(intel_dp);
3557 		mask = 0;
3558 	} else if (IS_GEN7(dev) && port == PORT_A) {
3559 		signal_levels = intel_gen7_edp_signal_levels(train_set);
3560 		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3561 	} else if (IS_GEN6(dev) && port == PORT_A) {
3562 		signal_levels = intel_gen6_edp_signal_levels(train_set);
3563 		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3564 	} else {
3565 		signal_levels = intel_gen4_signal_levels(train_set);
3566 		mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3567 	}
3568 
3569 	DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3570 
3571 	*DP = (*DP & ~mask) | signal_levels;
3572 }
3573 
3574 static bool
3575 intel_dp_set_link_train(struct intel_dp *intel_dp,
3576 			uint32_t *DP,
3577 			uint8_t dp_train_pat)
3578 {
3579 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3580 	struct drm_device *dev = intel_dig_port->base.base.dev;
3581 	struct drm_i915_private *dev_priv = dev->dev_private;
3582 	uint8_t buf[sizeof(intel_dp->train_set) + 1];
3583 	int ret, len;
3584 
3585 	_intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3586 
3587 	I915_WRITE(intel_dp->output_reg, *DP);
3588 	POSTING_READ(intel_dp->output_reg);
3589 
3590 	buf[0] = dp_train_pat;
3591 	if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3592 	    DP_TRAINING_PATTERN_DISABLE) {
3593 		/* don't write DP_TRAINING_LANEx_SET on disable */
3594 		len = 1;
3595 	} else {
3596 		/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3597 		memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3598 		len = intel_dp->lane_count + 1;
3599 	}
3600 
3601 	ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3602 				buf, len);
3603 
3604 	return ret == len;
3605 }
3606 
3607 static bool
3608 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3609 			uint8_t dp_train_pat)
3610 {
3611 	memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3612 	intel_dp_set_signal_levels(intel_dp, DP);
3613 	return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3614 }
3615 
3616 static bool
3617 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3618 			   const uint8_t link_status[DP_LINK_STATUS_SIZE])
3619 {
3620 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3621 	struct drm_device *dev = intel_dig_port->base.base.dev;
3622 	struct drm_i915_private *dev_priv = dev->dev_private;
3623 	int ret;
3624 
3625 	intel_get_adjust_train(intel_dp, link_status);
3626 	intel_dp_set_signal_levels(intel_dp, DP);
3627 
3628 	I915_WRITE(intel_dp->output_reg, *DP);
3629 	POSTING_READ(intel_dp->output_reg);
3630 
3631 	ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3632 				intel_dp->train_set, intel_dp->lane_count);
3633 
3634 	return ret == intel_dp->lane_count;
3635 }
3636 
3637 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3638 {
3639 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3640 	struct drm_device *dev = intel_dig_port->base.base.dev;
3641 	struct drm_i915_private *dev_priv = dev->dev_private;
3642 	enum port port = intel_dig_port->port;
3643 	uint32_t val;
3644 
3645 	if (!HAS_DDI(dev))
3646 		return;
3647 
3648 	val = I915_READ(DP_TP_CTL(port));
3649 	val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3650 	val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3651 	I915_WRITE(DP_TP_CTL(port), val);
3652 
3653 	/*
3654 	 * On PORT_A we can have only eDP in SST mode. There the only reason
3655 	 * we need to set idle transmission mode is to work around a HW issue
3656 	 * where we enable the pipe while not in idle link-training mode.
3657 	 * In this case there is requirement to wait for a minimum number of
3658 	 * idle patterns to be sent.
3659 	 */
3660 	if (port == PORT_A)
3661 		return;
3662 
3663 	if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3664 		     1))
3665 		DRM_ERROR("Timed out waiting for DP idle patterns\n");
3666 }
3667 
3668 /* Enable corresponding port and start training pattern 1 */
3669 void
3670 intel_dp_start_link_train(struct intel_dp *intel_dp)
3671 {
3672 	struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3673 	struct drm_device *dev = encoder->dev;
3674 	int i;
3675 	uint8_t voltage;
3676 	int voltage_tries, loop_tries;
3677 	uint32_t DP = intel_dp->DP;
3678 	uint8_t link_config[2];
3679 
3680 	if (HAS_DDI(dev))
3681 		intel_ddi_prepare_link_retrain(encoder);
3682 
3683 	/* Write the link configuration data */
3684 	link_config[0] = intel_dp->link_bw;
3685 	link_config[1] = intel_dp->lane_count;
3686 	if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3687 		link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3688 	drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3689 	if (intel_dp->num_sink_rates)
3690 		drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3691 				&intel_dp->rate_select, 1);
3692 
3693 	link_config[0] = 0;
3694 	link_config[1] = DP_SET_ANSI_8B10B;
3695 	drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3696 
3697 	DP |= DP_PORT_EN;
3698 
3699 	/* clock recovery */
3700 	if (!intel_dp_reset_link_train(intel_dp, &DP,
3701 				       DP_TRAINING_PATTERN_1 |
3702 				       DP_LINK_SCRAMBLING_DISABLE)) {
3703 		DRM_ERROR("failed to enable link training\n");
3704 		return;
3705 	}
3706 
3707 	voltage = 0xff;
3708 	voltage_tries = 0;
3709 	loop_tries = 0;
3710 	for (;;) {
3711 		uint8_t link_status[DP_LINK_STATUS_SIZE];
3712 
3713 		drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3714 		if (!intel_dp_get_link_status(intel_dp, link_status)) {
3715 			DRM_ERROR("failed to get link status\n");
3716 			break;
3717 		}
3718 
3719 		if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3720 			DRM_DEBUG_KMS("clock recovery OK\n");
3721 			break;
3722 		}
3723 
3724 		/* Check to see if we've tried the max voltage */
3725 		for (i = 0; i < intel_dp->lane_count; i++)
3726 			if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3727 				break;
3728 		if (i == intel_dp->lane_count) {
3729 			++loop_tries;
3730 			if (loop_tries == 5) {
3731 				DRM_ERROR("too many full retries, give up\n");
3732 				break;
3733 			}
3734 			intel_dp_reset_link_train(intel_dp, &DP,
3735 						  DP_TRAINING_PATTERN_1 |
3736 						  DP_LINK_SCRAMBLING_DISABLE);
3737 			voltage_tries = 0;
3738 			continue;
3739 		}
3740 
3741 		/* Check to see if we've tried the same voltage 5 times */
3742 		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3743 			++voltage_tries;
3744 			if (voltage_tries == 5) {
3745 				DRM_ERROR("too many voltage retries, give up\n");
3746 				break;
3747 			}
3748 		} else
3749 			voltage_tries = 0;
3750 		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3751 
3752 		/* Update training set as requested by target */
3753 		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3754 			DRM_ERROR("failed to update link training\n");
3755 			break;
3756 		}
3757 	}
3758 
3759 	intel_dp->DP = DP;
3760 }
3761 
3762 void
3763 intel_dp_complete_link_train(struct intel_dp *intel_dp)
3764 {
3765 	bool channel_eq = false;
3766 	int tries, cr_tries;
3767 	uint32_t DP = intel_dp->DP;
3768 	uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3769 
3770 	/* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3771 	if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3772 		training_pattern = DP_TRAINING_PATTERN_3;
3773 
3774 	/* channel equalization */
3775 	if (!intel_dp_set_link_train(intel_dp, &DP,
3776 				     training_pattern |
3777 				     DP_LINK_SCRAMBLING_DISABLE)) {
3778 		DRM_ERROR("failed to start channel equalization\n");
3779 		return;
3780 	}
3781 
3782 	tries = 0;
3783 	cr_tries = 0;
3784 	channel_eq = false;
3785 	for (;;) {
3786 		uint8_t link_status[DP_LINK_STATUS_SIZE];
3787 
3788 		if (cr_tries > 5) {
3789 			DRM_ERROR("failed to train DP, aborting\n");
3790 			break;
3791 		}
3792 
3793 		drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3794 		if (!intel_dp_get_link_status(intel_dp, link_status)) {
3795 			DRM_ERROR("failed to get link status\n");
3796 			break;
3797 		}
3798 
3799 		/* Make sure clock is still ok */
3800 		if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3801 			intel_dp_start_link_train(intel_dp);
3802 			intel_dp_set_link_train(intel_dp, &DP,
3803 						training_pattern |
3804 						DP_LINK_SCRAMBLING_DISABLE);
3805 			cr_tries++;
3806 			continue;
3807 		}
3808 
3809 		if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3810 			channel_eq = true;
3811 			break;
3812 		}
3813 
3814 		/* Try 5 times, then try clock recovery if that fails */
3815 		if (tries > 5) {
3816 			intel_dp_start_link_train(intel_dp);
3817 			intel_dp_set_link_train(intel_dp, &DP,
3818 						training_pattern |
3819 						DP_LINK_SCRAMBLING_DISABLE);
3820 			tries = 0;
3821 			cr_tries++;
3822 			continue;
3823 		}
3824 
3825 		/* Update training set as requested by target */
3826 		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3827 			DRM_ERROR("failed to update link training\n");
3828 			break;
3829 		}
3830 		++tries;
3831 	}
3832 
3833 	intel_dp_set_idle_link_train(intel_dp);
3834 
3835 	intel_dp->DP = DP;
3836 
3837 	if (channel_eq)
3838 		DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3839 
3840 }
3841 
3842 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3843 {
3844 	intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3845 				DP_TRAINING_PATTERN_DISABLE);
3846 }
3847 
3848 static void
3849 intel_dp_link_down(struct intel_dp *intel_dp)
3850 {
3851 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3852 	enum port port = intel_dig_port->port;
3853 	struct drm_device *dev = intel_dig_port->base.base.dev;
3854 	struct drm_i915_private *dev_priv = dev->dev_private;
3855 	uint32_t DP = intel_dp->DP;
3856 
3857 	if (WARN_ON(HAS_DDI(dev)))
3858 		return;
3859 
3860 	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3861 		return;
3862 
3863 	DRM_DEBUG_KMS("\n");
3864 
3865 	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
3866 		DP &= ~DP_LINK_TRAIN_MASK_CPT;
3867 		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
3868 	} else {
3869 		if (IS_CHERRYVIEW(dev))
3870 			DP &= ~DP_LINK_TRAIN_MASK_CHV;
3871 		else
3872 			DP &= ~DP_LINK_TRAIN_MASK;
3873 		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
3874 	}
3875 	POSTING_READ(intel_dp->output_reg);
3876 
3877 	if (HAS_PCH_IBX(dev) &&
3878 	    I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
3879 		/* Hardware workaround: leaving our transcoder select
3880 		 * set to transcoder B while it's off will prevent the
3881 		 * corresponding HDMI output on transcoder A.
3882 		 *
3883 		 * Combine this with another hardware workaround:
3884 		 * transcoder select bit can only be cleared while the
3885 		 * port is enabled.
3886 		 */
3887 		DP &= ~DP_PIPEB_SELECT;
3888 		I915_WRITE(intel_dp->output_reg, DP);
3889 		POSTING_READ(intel_dp->output_reg);
3890 	}
3891 
3892 	DP &= ~DP_AUDIO_OUTPUT_ENABLE;
3893 	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3894 	POSTING_READ(intel_dp->output_reg);
3895 	msleep(intel_dp->panel_power_down_delay);
3896 }
3897 
3898 static bool
3899 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3900 {
3901 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3902 	struct drm_device *dev = dig_port->base.base.dev;
3903 	struct drm_i915_private *dev_priv = dev->dev_private;
3904 	uint8_t rev;
3905 
3906 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3907 				    sizeof(intel_dp->dpcd)) < 0)
3908 		return false; /* aux transfer failed */
3909 
3910 	DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3911 
3912 	if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3913 		return false; /* DPCD not present */
3914 
3915 	/* Check if the panel supports PSR */
3916 	memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3917 	if (is_edp(intel_dp)) {
3918 		intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3919 					intel_dp->psr_dpcd,
3920 					sizeof(intel_dp->psr_dpcd));
3921 		if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3922 			dev_priv->psr.sink_support = true;
3923 			DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3924 		}
3925 	}
3926 
3927 	/* Training Pattern 3 support, both source and sink */
3928 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3929 	    intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3930 	    (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
3931 		intel_dp->use_tps3 = true;
3932 		DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3933 	} else
3934 		intel_dp->use_tps3 = false;
3935 
3936 	/* Intermediate frequency support */
3937 	if (is_edp(intel_dp) &&
3938 	    (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] &	DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3939 	    (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3940 	    (rev >= 0x03)) { /* eDp v1.4 or higher */
3941 		__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3942 		int i;
3943 
3944 		intel_dp_dpcd_read_wake(&intel_dp->aux,
3945 				DP_SUPPORTED_LINK_RATES,
3946 				sink_rates,
3947 				sizeof(sink_rates));
3948 
3949 		for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3950 			int val = le16_to_cpu(sink_rates[i]);
3951 
3952 			if (val == 0)
3953 				break;
3954 
3955 			/* Value read is in kHz while drm clock is saved in deca-kHz */
3956 			intel_dp->sink_rates[i] = (val * 200) / 10;
3957 		}
3958 		intel_dp->num_sink_rates = i;
3959 	}
3960 
3961 	intel_dp_print_rates(intel_dp);
3962 
3963 	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3964 	      DP_DWN_STRM_PORT_PRESENT))
3965 		return true; /* native DP sink */
3966 
3967 	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3968 		return true; /* no per-port downstream info */
3969 
3970 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3971 				    intel_dp->downstream_ports,
3972 				    DP_MAX_DOWNSTREAM_PORTS) < 0)
3973 		return false; /* downstream port status fetch failed */
3974 
3975 	return true;
3976 }
3977 
3978 static void
3979 intel_dp_probe_oui(struct intel_dp *intel_dp)
3980 {
3981 	u8 buf[3];
3982 
3983 	if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3984 		return;
3985 
3986 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3987 		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3988 			      buf[0], buf[1], buf[2]);
3989 
3990 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3991 		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3992 			      buf[0], buf[1], buf[2]);
3993 }
3994 
3995 #if 0
3996 static bool
3997 intel_dp_probe_mst(struct intel_dp *intel_dp)
3998 {
3999 	u8 buf[1];
4000 
4001 	if (!intel_dp->can_mst)
4002 		return false;
4003 
4004 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4005 		return false;
4006 
4007 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4008 		if (buf[0] & DP_MST_CAP) {
4009 			DRM_DEBUG_KMS("Sink is MST capable\n");
4010 			intel_dp->is_mst = true;
4011 		} else {
4012 			DRM_DEBUG_KMS("Sink is not MST capable\n");
4013 			intel_dp->is_mst = false;
4014 		}
4015 	}
4016 
4017 	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4018 	return intel_dp->is_mst;
4019 }
4020 #endif
4021 
4022 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4023 {
4024 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4025 	struct drm_device *dev = intel_dig_port->base.base.dev;
4026 	struct intel_crtc *intel_crtc =
4027 		to_intel_crtc(intel_dig_port->base.base.crtc);
4028 	u8 buf;
4029 	int test_crc_count;
4030 	int attempts = 6;
4031 
4032 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4033 		return -EIO;
4034 
4035 	if (!(buf & DP_TEST_CRC_SUPPORTED))
4036 		return -ENOTTY;
4037 
4038 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4039 		return -EIO;
4040 
4041 	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4042 				buf | DP_TEST_SINK_START) < 0)
4043 		return -EIO;
4044 
4045 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4046 		return -EIO;
4047 	test_crc_count = buf & DP_TEST_COUNT_MASK;
4048 
4049 	do {
4050 		if (drm_dp_dpcd_readb(&intel_dp->aux,
4051 				      DP_TEST_SINK_MISC, &buf) < 0)
4052 			return -EIO;
4053 		intel_wait_for_vblank(dev, intel_crtc->pipe);
4054 	} while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
4055 
4056 	if (attempts == 0) {
4057 		DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
4058 		return -ETIMEDOUT;
4059 	}
4060 
4061 	if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
4062 		return -EIO;
4063 
4064 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4065 		return -EIO;
4066 	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4067 			       buf & ~DP_TEST_SINK_START) < 0)
4068 		return -EIO;
4069 
4070 	return 0;
4071 }
4072 
4073 static bool
4074 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4075 {
4076 	return intel_dp_dpcd_read_wake(&intel_dp->aux,
4077 				       DP_DEVICE_SERVICE_IRQ_VECTOR,
4078 				       sink_irq_vector, 1) == 1;
4079 }
4080 
4081 static void
4082 intel_dp_handle_test_request(struct intel_dp *intel_dp)
4083 {
4084 	/* NAK by default */
4085 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
4086 }
4087 
4088 #if 0
4089 static int
4090 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4091 {
4092 	bool bret;
4093 
4094 	if (intel_dp->is_mst) {
4095 		u8 esi[16] = { 0 };
4096 		int ret = 0;
4097 		int retry;
4098 		bool handled;
4099 		bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4100 go_again:
4101 		if (bret == true) {
4102 
4103 			/* check link status - esi[10] = 0x200c */
4104 			if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4105 				DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4106 				intel_dp_start_link_train(intel_dp);
4107 				intel_dp_complete_link_train(intel_dp);
4108 				intel_dp_stop_link_train(intel_dp);
4109 			}
4110 
4111 			DRM_DEBUG_KMS("got esi %3ph\n", esi);
4112 			ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4113 
4114 			if (handled) {
4115 				for (retry = 0; retry < 3; retry++) {
4116 					int wret;
4117 					wret = drm_dp_dpcd_write(&intel_dp->aux,
4118 								 DP_SINK_COUNT_ESI+1,
4119 								 &esi[1], 3);
4120 					if (wret == 3) {
4121 						break;
4122 					}
4123 				}
4124 
4125 				bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4126 				if (bret == true) {
4127 					DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4128 					goto go_again;
4129 				}
4130 			} else
4131 				ret = 0;
4132 
4133 			return ret;
4134 		} else {
4135 			struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4136 			DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4137 			intel_dp->is_mst = false;
4138 			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4139 			/* send a hotplug event */
4140 			drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4141 		}
4142 	}
4143 	return -EINVAL;
4144 }
4145 #endif
4146 
4147 /*
4148  * According to DP spec
4149  * 5.1.2:
4150  *  1. Read DPCD
4151  *  2. Configure link according to Receiver Capabilities
4152  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4153  *  4. Check link status on receipt of hot-plug interrupt
4154  */
4155 static void
4156 intel_dp_check_link_status(struct intel_dp *intel_dp)
4157 {
4158 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4159 	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4160 	u8 sink_irq_vector;
4161 	u8 link_status[DP_LINK_STATUS_SIZE];
4162 
4163 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4164 
4165 	if (!intel_encoder->connectors_active)
4166 		return;
4167 
4168 	if (WARN_ON(!intel_encoder->base.crtc))
4169 		return;
4170 
4171 	if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4172 		return;
4173 
4174 	/* Try to read receiver status if the link appears to be up */
4175 	if (!intel_dp_get_link_status(intel_dp, link_status)) {
4176 		return;
4177 	}
4178 
4179 	/* Now read the DPCD to see if it's actually running */
4180 	if (!intel_dp_get_dpcd(intel_dp)) {
4181 		return;
4182 	}
4183 
4184 	/* Try to read the source of the interrupt */
4185 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4186 	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4187 		/* Clear interrupt source */
4188 		drm_dp_dpcd_writeb(&intel_dp->aux,
4189 				   DP_DEVICE_SERVICE_IRQ_VECTOR,
4190 				   sink_irq_vector);
4191 
4192 		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4193 			intel_dp_handle_test_request(intel_dp);
4194 		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4195 			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4196 	}
4197 
4198 	if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4199 		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4200 			      intel_encoder->base.name);
4201 		intel_dp_start_link_train(intel_dp);
4202 		intel_dp_complete_link_train(intel_dp);
4203 		intel_dp_stop_link_train(intel_dp);
4204 	}
4205 }
4206 
4207 /* XXX this is probably wrong for multiple downstream ports */
4208 static enum drm_connector_status
4209 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4210 {
4211 	uint8_t *dpcd = intel_dp->dpcd;
4212 	uint8_t type;
4213 
4214 	if (!intel_dp_get_dpcd(intel_dp))
4215 		return connector_status_disconnected;
4216 
4217 	/* if there's no downstream port, we're done */
4218 	if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4219 		return connector_status_connected;
4220 
4221 	/* If we're HPD-aware, SINK_COUNT changes dynamically */
4222 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4223 	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4224 		uint8_t reg;
4225 
4226 		if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4227 					    &reg, 1) < 0)
4228 			return connector_status_unknown;
4229 
4230 		return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4231 					      : connector_status_disconnected;
4232 	}
4233 
4234 	/* If no HPD, poke DDC gently */
4235 	if (drm_probe_ddc(intel_dp->aux.ddc))
4236 		return connector_status_connected;
4237 
4238 	/* Well we tried, say unknown for unreliable port types */
4239 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4240 		type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4241 		if (type == DP_DS_PORT_TYPE_VGA ||
4242 		    type == DP_DS_PORT_TYPE_NON_EDID)
4243 			return connector_status_unknown;
4244 	} else {
4245 		type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4246 			DP_DWN_STRM_PORT_TYPE_MASK;
4247 		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4248 		    type == DP_DWN_STRM_PORT_TYPE_OTHER)
4249 			return connector_status_unknown;
4250 	}
4251 
4252 	/* Anything else is out of spec, warn and ignore */
4253 	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4254 	return connector_status_disconnected;
4255 }
4256 
4257 static enum drm_connector_status
4258 edp_detect(struct intel_dp *intel_dp)
4259 {
4260 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4261 	enum drm_connector_status status;
4262 
4263 	status = intel_panel_detect(dev);
4264 	if (status == connector_status_unknown)
4265 		status = connector_status_connected;
4266 
4267 	return status;
4268 }
4269 
4270 static enum drm_connector_status
4271 ironlake_dp_detect(struct intel_dp *intel_dp)
4272 {
4273 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4274 	struct drm_i915_private *dev_priv = dev->dev_private;
4275 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4276 
4277 	if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4278 		return connector_status_disconnected;
4279 
4280 	return intel_dp_detect_dpcd(intel_dp);
4281 }
4282 
4283 static int g4x_digital_port_connected(struct drm_device *dev,
4284 				       struct intel_digital_port *intel_dig_port)
4285 {
4286 	struct drm_i915_private *dev_priv = dev->dev_private;
4287 	uint32_t bit;
4288 
4289 	if (IS_VALLEYVIEW(dev)) {
4290 		switch (intel_dig_port->port) {
4291 		case PORT_B:
4292 			bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4293 			break;
4294 		case PORT_C:
4295 			bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4296 			break;
4297 		case PORT_D:
4298 			bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4299 			break;
4300 		default:
4301 			return -EINVAL;
4302 		}
4303 	} else {
4304 		switch (intel_dig_port->port) {
4305 		case PORT_B:
4306 			bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4307 			break;
4308 		case PORT_C:
4309 			bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4310 			break;
4311 		case PORT_D:
4312 			bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4313 			break;
4314 		default:
4315 			return -EINVAL;
4316 		}
4317 	}
4318 
4319 	if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
4320 		return 0;
4321 	return 1;
4322 }
4323 
4324 static enum drm_connector_status
4325 g4x_dp_detect(struct intel_dp *intel_dp)
4326 {
4327 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4328 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4329 	int ret;
4330 
4331 	/* Can't disconnect eDP, but you can close the lid... */
4332 	if (is_edp(intel_dp)) {
4333 		enum drm_connector_status status;
4334 
4335 		status = intel_panel_detect(dev);
4336 		if (status == connector_status_unknown)
4337 			status = connector_status_connected;
4338 		return status;
4339 	}
4340 
4341 	ret = g4x_digital_port_connected(dev, intel_dig_port);
4342 	if (ret == -EINVAL)
4343 		return connector_status_unknown;
4344 	else if (ret == 0)
4345 		return connector_status_disconnected;
4346 
4347 	return intel_dp_detect_dpcd(intel_dp);
4348 }
4349 
4350 static struct edid *
4351 intel_dp_get_edid(struct intel_dp *intel_dp)
4352 {
4353 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4354 
4355 	/* use cached edid if we have one */
4356 	if (intel_connector->edid) {
4357 		/* invalid edid */
4358 		if (IS_ERR(intel_connector->edid))
4359 			return NULL;
4360 
4361 		return drm_edid_duplicate(intel_connector->edid);
4362 	} else
4363 		return drm_get_edid(&intel_connector->base,
4364 				    intel_dp->aux.ddc);
4365 }
4366 
4367 static void
4368 intel_dp_set_edid(struct intel_dp *intel_dp)
4369 {
4370 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4371 	struct edid *edid;
4372 
4373 	edid = intel_dp_get_edid(intel_dp);
4374 	intel_connector->detect_edid = edid;
4375 
4376 	if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4377 		intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4378 	else
4379 		intel_dp->has_audio = drm_detect_monitor_audio(edid);
4380 }
4381 
4382 static void
4383 intel_dp_unset_edid(struct intel_dp *intel_dp)
4384 {
4385 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4386 
4387 	kfree(intel_connector->detect_edid);
4388 	intel_connector->detect_edid = NULL;
4389 
4390 	intel_dp->has_audio = false;
4391 }
4392 
4393 static enum intel_display_power_domain
4394 intel_dp_power_get(struct intel_dp *dp)
4395 {
4396 	struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4397 	enum intel_display_power_domain power_domain;
4398 
4399 	power_domain = intel_display_port_power_domain(encoder);
4400 	intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4401 
4402 	return power_domain;
4403 }
4404 
4405 static void
4406 intel_dp_power_put(struct intel_dp *dp,
4407 		   enum intel_display_power_domain power_domain)
4408 {
4409 	struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4410 	intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4411 }
4412 
4413 static enum drm_connector_status
4414 intel_dp_detect(struct drm_connector *connector, bool force)
4415 {
4416 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4417 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4418 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4419 	struct drm_device *dev = connector->dev;
4420 	enum drm_connector_status status;
4421 	enum intel_display_power_domain power_domain;
4422 
4423 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4424 		      connector->base.id, connector->name);
4425 	intel_dp_unset_edid(intel_dp);
4426 
4427 	if (intel_dp->is_mst) {
4428 		/* MST devices are disconnected from a monitor POV */
4429 		if (intel_encoder->type != INTEL_OUTPUT_EDP)
4430 			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4431 		return connector_status_disconnected;
4432 	}
4433 
4434 	power_domain = intel_dp_power_get(intel_dp);
4435 
4436 	/* Can't disconnect eDP, but you can close the lid... */
4437 	if (is_edp(intel_dp))
4438 		status = edp_detect(intel_dp);
4439 	else if (HAS_PCH_SPLIT(dev))
4440 		status = ironlake_dp_detect(intel_dp);
4441 	else
4442 		status = g4x_dp_detect(intel_dp);
4443 	if (status != connector_status_connected)
4444 		goto out;
4445 
4446 	intel_dp_probe_oui(intel_dp);
4447 
4448 #if 0
4449 	ret = intel_dp_probe_mst(intel_dp);
4450 	if (ret) {
4451 		/* if we are in MST mode then this connector
4452 		   won't appear connected or have anything with EDID on it */
4453 		if (intel_encoder->type != INTEL_OUTPUT_EDP)
4454 			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4455 		status = connector_status_disconnected;
4456 		goto out;
4457 	}
4458 #endif
4459 
4460 	intel_dp_set_edid(intel_dp);
4461 
4462 	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4463 		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4464 	status = connector_status_connected;
4465 
4466 out:
4467 	intel_dp_power_put(intel_dp, power_domain);
4468 	return status;
4469 }
4470 
4471 static void
4472 intel_dp_force(struct drm_connector *connector)
4473 {
4474 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4475 	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4476 	enum intel_display_power_domain power_domain;
4477 
4478 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4479 		      connector->base.id, connector->name);
4480 	intel_dp_unset_edid(intel_dp);
4481 
4482 	if (connector->status != connector_status_connected)
4483 		return;
4484 
4485 	power_domain = intel_dp_power_get(intel_dp);
4486 
4487 	intel_dp_set_edid(intel_dp);
4488 
4489 	intel_dp_power_put(intel_dp, power_domain);
4490 
4491 	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4492 		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4493 }
4494 
4495 static int intel_dp_get_modes(struct drm_connector *connector)
4496 {
4497 	struct intel_connector *intel_connector = to_intel_connector(connector);
4498 	struct edid *edid;
4499 
4500 	edid = intel_connector->detect_edid;
4501 	if (edid) {
4502 		int ret = intel_connector_update_modes(connector, edid);
4503 		if (ret)
4504 			return ret;
4505 	}
4506 
4507 	/* if eDP has no EDID, fall back to fixed mode */
4508 	if (is_edp(intel_attached_dp(connector)) &&
4509 	    intel_connector->panel.fixed_mode) {
4510 		struct drm_display_mode *mode;
4511 
4512 		mode = drm_mode_duplicate(connector->dev,
4513 					  intel_connector->panel.fixed_mode);
4514 		if (mode) {
4515 			drm_mode_probed_add(connector, mode);
4516 			return 1;
4517 		}
4518 	}
4519 
4520 	return 0;
4521 }
4522 
4523 static bool
4524 intel_dp_detect_audio(struct drm_connector *connector)
4525 {
4526 	bool has_audio = false;
4527 	struct edid *edid;
4528 
4529 	edid = to_intel_connector(connector)->detect_edid;
4530 	if (edid)
4531 		has_audio = drm_detect_monitor_audio(edid);
4532 
4533 	return has_audio;
4534 }
4535 
4536 static int
4537 intel_dp_set_property(struct drm_connector *connector,
4538 		      struct drm_property *property,
4539 		      uint64_t val)
4540 {
4541 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
4542 	struct intel_connector *intel_connector = to_intel_connector(connector);
4543 	struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4544 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4545 	int ret;
4546 
4547 	ret = drm_object_property_set_value(&connector->base, property, val);
4548 	if (ret)
4549 		return ret;
4550 
4551 	if (property == dev_priv->force_audio_property) {
4552 		int i = val;
4553 		bool has_audio;
4554 
4555 		if (i == intel_dp->force_audio)
4556 			return 0;
4557 
4558 		intel_dp->force_audio = i;
4559 
4560 		if (i == HDMI_AUDIO_AUTO)
4561 			has_audio = intel_dp_detect_audio(connector);
4562 		else
4563 			has_audio = (i == HDMI_AUDIO_ON);
4564 
4565 		if (has_audio == intel_dp->has_audio)
4566 			return 0;
4567 
4568 		intel_dp->has_audio = has_audio;
4569 		goto done;
4570 	}
4571 
4572 	if (property == dev_priv->broadcast_rgb_property) {
4573 		bool old_auto = intel_dp->color_range_auto;
4574 		uint32_t old_range = intel_dp->color_range;
4575 
4576 		switch (val) {
4577 		case INTEL_BROADCAST_RGB_AUTO:
4578 			intel_dp->color_range_auto = true;
4579 			break;
4580 		case INTEL_BROADCAST_RGB_FULL:
4581 			intel_dp->color_range_auto = false;
4582 			intel_dp->color_range = 0;
4583 			break;
4584 		case INTEL_BROADCAST_RGB_LIMITED:
4585 			intel_dp->color_range_auto = false;
4586 			intel_dp->color_range = DP_COLOR_RANGE_16_235;
4587 			break;
4588 		default:
4589 			return -EINVAL;
4590 		}
4591 
4592 		if (old_auto == intel_dp->color_range_auto &&
4593 		    old_range == intel_dp->color_range)
4594 			return 0;
4595 
4596 		goto done;
4597 	}
4598 
4599 	if (is_edp(intel_dp) &&
4600 	    property == connector->dev->mode_config.scaling_mode_property) {
4601 		if (val == DRM_MODE_SCALE_NONE) {
4602 			DRM_DEBUG_KMS("no scaling not supported\n");
4603 			return -EINVAL;
4604 		}
4605 
4606 		if (intel_connector->panel.fitting_mode == val) {
4607 			/* the eDP scaling property is not changed */
4608 			return 0;
4609 		}
4610 		intel_connector->panel.fitting_mode = val;
4611 
4612 		goto done;
4613 	}
4614 
4615 	return -EINVAL;
4616 
4617 done:
4618 	if (intel_encoder->base.crtc)
4619 		intel_crtc_restore_mode(intel_encoder->base.crtc);
4620 
4621 	return 0;
4622 }
4623 
4624 static void
4625 intel_dp_connector_destroy(struct drm_connector *connector)
4626 {
4627 	struct intel_connector *intel_connector = to_intel_connector(connector);
4628 
4629 	kfree(intel_connector->detect_edid);
4630 
4631 	if (!IS_ERR_OR_NULL(intel_connector->edid))
4632 		kfree(intel_connector->edid);
4633 
4634 	/* Can't call is_edp() since the encoder may have been destroyed
4635 	 * already. */
4636 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4637 		intel_panel_fini(&intel_connector->panel);
4638 
4639 	drm_connector_cleanup(connector);
4640 	kfree(connector);
4641 }
4642 
4643 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4644 {
4645 	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4646 	struct intel_dp *intel_dp = &intel_dig_port->dp;
4647 
4648 	drm_dp_aux_unregister(&intel_dp->aux);
4649 	intel_dp_mst_encoder_cleanup(intel_dig_port);
4650 	if (is_edp(intel_dp)) {
4651 		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4652 		/*
4653 		 * vdd might still be enabled do to the delayed vdd off.
4654 		 * Make sure vdd is actually turned off here.
4655 		 */
4656 		pps_lock(intel_dp);
4657 		edp_panel_vdd_off_sync(intel_dp);
4658 		pps_unlock(intel_dp);
4659 
4660 #if 0
4661 		if (intel_dp->edp_notifier.notifier_call) {
4662 			unregister_reboot_notifier(&intel_dp->edp_notifier);
4663 			intel_dp->edp_notifier.notifier_call = NULL;
4664 		}
4665 #endif
4666 	}
4667 	drm_encoder_cleanup(encoder);
4668 	kfree(intel_dig_port);
4669 }
4670 
4671 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4672 {
4673 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4674 
4675 	if (!is_edp(intel_dp))
4676 		return;
4677 
4678 	/*
4679 	 * vdd might still be enabled do to the delayed vdd off.
4680 	 * Make sure vdd is actually turned off here.
4681 	 */
4682 	cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4683 	pps_lock(intel_dp);
4684 	edp_panel_vdd_off_sync(intel_dp);
4685 	pps_unlock(intel_dp);
4686 }
4687 
4688 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4689 {
4690 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4691 	struct drm_device *dev = intel_dig_port->base.base.dev;
4692 	struct drm_i915_private *dev_priv = dev->dev_private;
4693 	enum intel_display_power_domain power_domain;
4694 
4695 	lockdep_assert_held(&dev_priv->pps_mutex);
4696 
4697 	if (!edp_have_panel_vdd(intel_dp))
4698 		return;
4699 
4700 	/*
4701 	 * The VDD bit needs a power domain reference, so if the bit is
4702 	 * already enabled when we boot or resume, grab this reference and
4703 	 * schedule a vdd off, so we don't hold on to the reference
4704 	 * indefinitely.
4705 	 */
4706 	DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4707 	power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4708 	intel_display_power_get(dev_priv, power_domain);
4709 
4710 	edp_panel_vdd_schedule_off(intel_dp);
4711 }
4712 
4713 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4714 {
4715 	struct intel_dp *intel_dp;
4716 
4717 	if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4718 		return;
4719 
4720 	intel_dp = enc_to_intel_dp(encoder);
4721 
4722 	pps_lock(intel_dp);
4723 
4724 	/*
4725 	 * Read out the current power sequencer assignment,
4726 	 * in case the BIOS did something with it.
4727 	 */
4728 	if (IS_VALLEYVIEW(encoder->dev))
4729 		vlv_initial_power_sequencer_setup(intel_dp);
4730 
4731 	intel_edp_panel_vdd_sanitize(intel_dp);
4732 
4733 	pps_unlock(intel_dp);
4734 }
4735 
4736 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4737 	.dpms = intel_connector_dpms,
4738 	.detect = intel_dp_detect,
4739 	.force = intel_dp_force,
4740 	.fill_modes = drm_helper_probe_single_connector_modes,
4741 	.set_property = intel_dp_set_property,
4742 	.atomic_get_property = intel_connector_atomic_get_property,
4743 	.destroy = intel_dp_connector_destroy,
4744 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4745 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4746 };
4747 
4748 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4749 	.get_modes = intel_dp_get_modes,
4750 	.mode_valid = intel_dp_mode_valid,
4751 	.best_encoder = intel_best_encoder,
4752 };
4753 
4754 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4755 	.reset = intel_dp_encoder_reset,
4756 	.destroy = intel_dp_encoder_destroy,
4757 };
4758 
4759 void
4760 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
4761 {
4762 	return;
4763 }
4764 
4765 bool
4766 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4767 {
4768 	struct intel_dp *intel_dp = &intel_dig_port->dp;
4769 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4770 	struct drm_device *dev = intel_dig_port->base.base.dev;
4771 	struct drm_i915_private *dev_priv = dev->dev_private;
4772 	enum intel_display_power_domain power_domain;
4773 	bool ret = true;
4774 
4775 	if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4776 		intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4777 
4778 	if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4779 		/*
4780 		 * vdd off can generate a long pulse on eDP which
4781 		 * would require vdd on to handle it, and thus we
4782 		 * would end up in an endless cycle of
4783 		 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4784 		 */
4785 		DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4786 			      port_name(intel_dig_port->port));
4787 		return false;
4788 	}
4789 
4790 	DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4791 		      port_name(intel_dig_port->port),
4792 		      long_hpd ? "long" : "short");
4793 
4794 	power_domain = intel_display_port_power_domain(intel_encoder);
4795 	intel_display_power_get(dev_priv, power_domain);
4796 
4797 	if (long_hpd) {
4798 		ret = true;
4799 
4800 		if (HAS_PCH_SPLIT(dev)) {
4801 			if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4802 				goto mst_fail;
4803 		} else {
4804 			if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4805 				goto mst_fail;
4806 		}
4807 
4808 		if (!intel_dp_get_dpcd(intel_dp)) {
4809 			goto mst_fail;
4810 		}
4811 
4812 		intel_dp_probe_oui(intel_dp);
4813 
4814 #if 0
4815 		if (!intel_dp_probe_mst(intel_dp))
4816 			goto mst_fail;
4817 #endif
4818 
4819 	} else {
4820 		if (intel_dp->is_mst) {
4821 #if 0
4822 			if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4823 				goto mst_fail;
4824 #endif
4825 		}
4826 
4827 		if (!intel_dp->is_mst) {
4828 			/*
4829 			 * we'll check the link status via the normal hot plug path later -
4830 			 * but for short hpds we should check it now
4831 			 */
4832 			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4833 			intel_dp_check_link_status(intel_dp);
4834 			drm_modeset_unlock(&dev->mode_config.connection_mutex);
4835 		}
4836 	}
4837 
4838 	goto put_power;
4839 mst_fail:
4840 	/* if we were in MST mode, and device is not there get out of MST mode */
4841 	if (intel_dp->is_mst) {
4842 		DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4843 		intel_dp->is_mst = false;
4844 #if 0
4845 		drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4846 #endif
4847 	}
4848 put_power:
4849 	intel_display_power_put(dev_priv, power_domain);
4850 
4851 	return ret;
4852 }
4853 
4854 /* Return which DP Port should be selected for Transcoder DP control */
4855 int
4856 intel_trans_dp_port_sel(struct drm_crtc *crtc)
4857 {
4858 	struct drm_device *dev = crtc->dev;
4859 	struct intel_encoder *intel_encoder;
4860 	struct intel_dp *intel_dp;
4861 
4862 	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4863 		intel_dp = enc_to_intel_dp(&intel_encoder->base);
4864 
4865 		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4866 		    intel_encoder->type == INTEL_OUTPUT_EDP)
4867 			return intel_dp->output_reg;
4868 	}
4869 
4870 	return -1;
4871 }
4872 
4873 /* check the VBT to see whether the eDP is on DP-D port */
4874 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
4875 {
4876 	struct drm_i915_private *dev_priv = dev->dev_private;
4877 	union child_device_config *p_child;
4878 	int i;
4879 	static const short port_mapping[] = {
4880 		[PORT_B] = PORT_IDPB,
4881 		[PORT_C] = PORT_IDPC,
4882 		[PORT_D] = PORT_IDPD,
4883 	};
4884 
4885 	if (port == PORT_A)
4886 		return true;
4887 
4888 	if (!dev_priv->vbt.child_dev_num)
4889 		return false;
4890 
4891 	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4892 		p_child = dev_priv->vbt.child_dev + i;
4893 
4894 		if (p_child->common.dvo_port == port_mapping[port] &&
4895 		    (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4896 		    (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
4897 			return true;
4898 	}
4899 	return false;
4900 }
4901 
4902 void
4903 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4904 {
4905 	struct intel_connector *intel_connector = to_intel_connector(connector);
4906 
4907 	intel_attach_force_audio_property(connector);
4908 	intel_attach_broadcast_rgb_property(connector);
4909 	intel_dp->color_range_auto = true;
4910 
4911 	if (is_edp(intel_dp)) {
4912 		drm_mode_create_scaling_mode_property(connector->dev);
4913 		drm_object_attach_property(
4914 			&connector->base,
4915 			connector->dev->mode_config.scaling_mode_property,
4916 			DRM_MODE_SCALE_ASPECT);
4917 		intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
4918 	}
4919 }
4920 
4921 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4922 {
4923 	intel_dp->last_power_cycle = jiffies;
4924 	intel_dp->last_power_on = jiffies;
4925 	intel_dp->last_backlight_off = jiffies;
4926 }
4927 
4928 static void
4929 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4930 				    struct intel_dp *intel_dp)
4931 {
4932 	struct drm_i915_private *dev_priv = dev->dev_private;
4933 	struct edp_power_seq cur, vbt, spec,
4934 		*final = &intel_dp->pps_delays;
4935 	u32 pp_on, pp_off, pp_div, pp;
4936 	int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
4937 
4938 	lockdep_assert_held(&dev_priv->pps_mutex);
4939 
4940 	/* already initialized? */
4941 	if (final->t11_t12 != 0)
4942 		return;
4943 
4944 	if (HAS_PCH_SPLIT(dev)) {
4945 		pp_ctrl_reg = PCH_PP_CONTROL;
4946 		pp_on_reg = PCH_PP_ON_DELAYS;
4947 		pp_off_reg = PCH_PP_OFF_DELAYS;
4948 		pp_div_reg = PCH_PP_DIVISOR;
4949 	} else {
4950 		enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4951 
4952 		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4953 		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4954 		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4955 		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4956 	}
4957 
4958 	/* Workaround: Need to write PP_CONTROL with the unlock key as
4959 	 * the very first thing. */
4960 	pp = ironlake_get_pp_control(intel_dp);
4961 	I915_WRITE(pp_ctrl_reg, pp);
4962 
4963 	pp_on = I915_READ(pp_on_reg);
4964 	pp_off = I915_READ(pp_off_reg);
4965 	pp_div = I915_READ(pp_div_reg);
4966 
4967 	/* Pull timing values out of registers */
4968 	cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4969 		PANEL_POWER_UP_DELAY_SHIFT;
4970 
4971 	cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4972 		PANEL_LIGHT_ON_DELAY_SHIFT;
4973 
4974 	cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4975 		PANEL_LIGHT_OFF_DELAY_SHIFT;
4976 
4977 	cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4978 		PANEL_POWER_DOWN_DELAY_SHIFT;
4979 
4980 	cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4981 		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4982 
4983 	DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4984 		      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4985 
4986 	vbt = dev_priv->vbt.edp_pps;
4987 
4988 	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4989 	 * our hw here, which are all in 100usec. */
4990 	spec.t1_t3 = 210 * 10;
4991 	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4992 	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4993 	spec.t10 = 500 * 10;
4994 	/* This one is special and actually in units of 100ms, but zero
4995 	 * based in the hw (so we need to add 100 ms). But the sw vbt
4996 	 * table multiplies it with 1000 to make it in units of 100usec,
4997 	 * too. */
4998 	spec.t11_t12 = (510 + 100) * 10;
4999 
5000 	DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5001 		      vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5002 
5003 	/* Use the max of the register settings and vbt. If both are
5004 	 * unset, fall back to the spec limits. */
5005 #define assign_final(field)	final->field = (max(cur.field, vbt.field) == 0 ? \
5006 				       spec.field : \
5007 				       max(cur.field, vbt.field))
5008 	assign_final(t1_t3);
5009 	assign_final(t8);
5010 	assign_final(t9);
5011 	assign_final(t10);
5012 	assign_final(t11_t12);
5013 #undef assign_final
5014 
5015 #define get_delay(field)	(DIV_ROUND_UP(final->field, 10))
5016 	intel_dp->panel_power_up_delay = get_delay(t1_t3);
5017 	intel_dp->backlight_on_delay = get_delay(t8);
5018 	intel_dp->backlight_off_delay = get_delay(t9);
5019 	intel_dp->panel_power_down_delay = get_delay(t10);
5020 	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5021 #undef get_delay
5022 
5023 	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5024 		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5025 		      intel_dp->panel_power_cycle_delay);
5026 
5027 	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5028 		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5029 }
5030 
5031 static void
5032 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5033 					      struct intel_dp *intel_dp)
5034 {
5035 	struct drm_i915_private *dev_priv = dev->dev_private;
5036 	u32 pp_on, pp_off, pp_div, port_sel = 0;
5037 	int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5038 	int pp_on_reg, pp_off_reg, pp_div_reg;
5039 	enum port port = dp_to_dig_port(intel_dp)->port;
5040 	const struct edp_power_seq *seq = &intel_dp->pps_delays;
5041 
5042 	lockdep_assert_held(&dev_priv->pps_mutex);
5043 
5044 	if (HAS_PCH_SPLIT(dev)) {
5045 		pp_on_reg = PCH_PP_ON_DELAYS;
5046 		pp_off_reg = PCH_PP_OFF_DELAYS;
5047 		pp_div_reg = PCH_PP_DIVISOR;
5048 	} else {
5049 		enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5050 
5051 		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5052 		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5053 		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5054 	}
5055 
5056 	/*
5057 	 * And finally store the new values in the power sequencer. The
5058 	 * backlight delays are set to 1 because we do manual waits on them. For
5059 	 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5060 	 * we'll end up waiting for the backlight off delay twice: once when we
5061 	 * do the manual sleep, and once when we disable the panel and wait for
5062 	 * the PP_STATUS bit to become zero.
5063 	 */
5064 	pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5065 		(1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5066 	pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5067 		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5068 	/* Compute the divisor for the pp clock, simply match the Bspec
5069 	 * formula. */
5070 	pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5071 	pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5072 			<< PANEL_POWER_CYCLE_DELAY_SHIFT);
5073 
5074 	/* Haswell doesn't have any port selection bits for the panel
5075 	 * power sequencer any more. */
5076 	if (IS_VALLEYVIEW(dev)) {
5077 		port_sel = PANEL_PORT_SELECT_VLV(port);
5078 	} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5079 		if (port == PORT_A)
5080 			port_sel = PANEL_PORT_SELECT_DPA;
5081 		else
5082 			port_sel = PANEL_PORT_SELECT_DPD;
5083 	}
5084 
5085 	pp_on |= port_sel;
5086 
5087 	I915_WRITE(pp_on_reg, pp_on);
5088 	I915_WRITE(pp_off_reg, pp_off);
5089 	I915_WRITE(pp_div_reg, pp_div);
5090 
5091 	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5092 		      I915_READ(pp_on_reg),
5093 		      I915_READ(pp_off_reg),
5094 		      I915_READ(pp_div_reg));
5095 }
5096 
5097 /**
5098  * intel_dp_set_drrs_state - program registers for RR switch to take effect
5099  * @dev: DRM device
5100  * @refresh_rate: RR to be programmed
5101  *
5102  * This function gets called when refresh rate (RR) has to be changed from
5103  * one frequency to another. Switches can be between high and low RR
5104  * supported by the panel or to any other RR based on media playback (in
5105  * this case, RR value needs to be passed from user space).
5106  *
5107  * The caller of this function needs to take a lock on dev_priv->drrs.
5108  */
5109 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5110 {
5111 	struct drm_i915_private *dev_priv = dev->dev_private;
5112 	struct intel_encoder *encoder;
5113 	struct intel_digital_port *dig_port = NULL;
5114 	struct intel_dp *intel_dp = dev_priv->drrs.dp;
5115 	struct intel_crtc_state *config = NULL;
5116 	struct intel_crtc *intel_crtc = NULL;
5117 	u32 reg, val;
5118 	enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5119 
5120 	if (refresh_rate <= 0) {
5121 		DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5122 		return;
5123 	}
5124 
5125 	if (intel_dp == NULL) {
5126 		DRM_DEBUG_KMS("DRRS not supported.\n");
5127 		return;
5128 	}
5129 
5130 	/*
5131 	 * FIXME: This needs proper synchronization with psr state for some
5132 	 * platforms that cannot have PSR and DRRS enabled at the same time.
5133 	 */
5134 
5135 	dig_port = dp_to_dig_port(intel_dp);
5136 	encoder = &dig_port->base;
5137 	intel_crtc = to_intel_crtc(encoder->base.crtc);
5138 
5139 	if (!intel_crtc) {
5140 		DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5141 		return;
5142 	}
5143 
5144 	config = intel_crtc->config;
5145 
5146 	if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5147 		DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5148 		return;
5149 	}
5150 
5151 	if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5152 			refresh_rate)
5153 		index = DRRS_LOW_RR;
5154 
5155 	if (index == dev_priv->drrs.refresh_rate_type) {
5156 		DRM_DEBUG_KMS(
5157 			"DRRS requested for previously set RR...ignoring\n");
5158 		return;
5159 	}
5160 
5161 	if (!intel_crtc->active) {
5162 		DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5163 		return;
5164 	}
5165 
5166 	if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5167 		switch (index) {
5168 		case DRRS_HIGH_RR:
5169 			intel_dp_set_m_n(intel_crtc, M1_N1);
5170 			break;
5171 		case DRRS_LOW_RR:
5172 			intel_dp_set_m_n(intel_crtc, M2_N2);
5173 			break;
5174 		case DRRS_MAX_RR:
5175 		default:
5176 			DRM_ERROR("Unsupported refreshrate type\n");
5177 		}
5178 	} else if (INTEL_INFO(dev)->gen > 6) {
5179 		reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5180 		val = I915_READ(reg);
5181 
5182 		if (index > DRRS_HIGH_RR) {
5183 			if (IS_VALLEYVIEW(dev))
5184 				val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5185 			else
5186 				val |= PIPECONF_EDP_RR_MODE_SWITCH;
5187 		} else {
5188 			if (IS_VALLEYVIEW(dev))
5189 				val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5190 			else
5191 				val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5192 		}
5193 		I915_WRITE(reg, val);
5194 	}
5195 
5196 	dev_priv->drrs.refresh_rate_type = index;
5197 
5198 	DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5199 }
5200 
5201 /**
5202  * intel_edp_drrs_enable - init drrs struct if supported
5203  * @intel_dp: DP struct
5204  *
5205  * Initializes frontbuffer_bits and drrs.dp
5206  */
5207 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5208 {
5209 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5210 	struct drm_i915_private *dev_priv = dev->dev_private;
5211 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5212 	struct drm_crtc *crtc = dig_port->base.base.crtc;
5213 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5214 
5215 	if (!intel_crtc->config->has_drrs) {
5216 		DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5217 		return;
5218 	}
5219 
5220 	mutex_lock(&dev_priv->drrs.mutex);
5221 	if (WARN_ON(dev_priv->drrs.dp)) {
5222 		DRM_ERROR("DRRS already enabled\n");
5223 		goto unlock;
5224 	}
5225 
5226 	dev_priv->drrs.busy_frontbuffer_bits = 0;
5227 
5228 	dev_priv->drrs.dp = intel_dp;
5229 
5230 unlock:
5231 	mutex_unlock(&dev_priv->drrs.mutex);
5232 }
5233 
5234 /**
5235  * intel_edp_drrs_disable - Disable DRRS
5236  * @intel_dp: DP struct
5237  *
5238  */
5239 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5240 {
5241 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5242 	struct drm_i915_private *dev_priv = dev->dev_private;
5243 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5244 	struct drm_crtc *crtc = dig_port->base.base.crtc;
5245 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5246 
5247 	if (!intel_crtc->config->has_drrs)
5248 		return;
5249 
5250 	mutex_lock(&dev_priv->drrs.mutex);
5251 	if (!dev_priv->drrs.dp) {
5252 		mutex_unlock(&dev_priv->drrs.mutex);
5253 		return;
5254 	}
5255 
5256 	if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5257 		intel_dp_set_drrs_state(dev_priv->dev,
5258 			intel_dp->attached_connector->panel.
5259 			fixed_mode->vrefresh);
5260 
5261 	dev_priv->drrs.dp = NULL;
5262 	mutex_unlock(&dev_priv->drrs.mutex);
5263 
5264 	cancel_delayed_work_sync(&dev_priv->drrs.work);
5265 }
5266 
5267 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5268 {
5269 	struct drm_i915_private *dev_priv =
5270 		container_of(work, typeof(*dev_priv), drrs.work.work);
5271 	struct intel_dp *intel_dp;
5272 
5273 	mutex_lock(&dev_priv->drrs.mutex);
5274 
5275 	intel_dp = dev_priv->drrs.dp;
5276 
5277 	if (!intel_dp)
5278 		goto unlock;
5279 
5280 	/*
5281 	 * The delayed work can race with an invalidate hence we need to
5282 	 * recheck.
5283 	 */
5284 
5285 	if (dev_priv->drrs.busy_frontbuffer_bits)
5286 		goto unlock;
5287 
5288 	if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5289 		intel_dp_set_drrs_state(dev_priv->dev,
5290 			intel_dp->attached_connector->panel.
5291 			downclock_mode->vrefresh);
5292 
5293 unlock:
5294 	mutex_unlock(&dev_priv->drrs.mutex);
5295 }
5296 
5297 /**
5298  * intel_edp_drrs_invalidate - Invalidate DRRS
5299  * @dev: DRM device
5300  * @frontbuffer_bits: frontbuffer plane tracking bits
5301  *
5302  * When there is a disturbance on screen (due to cursor movement/time
5303  * update etc), DRRS needs to be invalidated, i.e. need to switch to
5304  * high RR.
5305  *
5306  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5307  */
5308 void intel_edp_drrs_invalidate(struct drm_device *dev,
5309 		unsigned frontbuffer_bits)
5310 {
5311 	struct drm_i915_private *dev_priv = dev->dev_private;
5312 	struct drm_crtc *crtc;
5313 	enum i915_pipe pipe;
5314 
5315 	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5316 		return;
5317 
5318 	cancel_delayed_work(&dev_priv->drrs.work);
5319 
5320 	mutex_lock(&dev_priv->drrs.mutex);
5321 	if (!dev_priv->drrs.dp) {
5322 		mutex_unlock(&dev_priv->drrs.mutex);
5323 		return;
5324 	}
5325 
5326 	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5327 	pipe = to_intel_crtc(crtc)->pipe;
5328 
5329 	if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
5330 		intel_dp_set_drrs_state(dev_priv->dev,
5331 				dev_priv->drrs.dp->attached_connector->panel.
5332 				fixed_mode->vrefresh);
5333 	}
5334 
5335 	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5336 
5337 	dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5338 	mutex_unlock(&dev_priv->drrs.mutex);
5339 }
5340 
5341 /**
5342  * intel_edp_drrs_flush - Flush DRRS
5343  * @dev: DRM device
5344  * @frontbuffer_bits: frontbuffer plane tracking bits
5345  *
5346  * When there is no movement on screen, DRRS work can be scheduled.
5347  * This DRRS work is responsible for setting relevant registers after a
5348  * timeout of 1 second.
5349  *
5350  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5351  */
5352 void intel_edp_drrs_flush(struct drm_device *dev,
5353 		unsigned frontbuffer_bits)
5354 {
5355 	struct drm_i915_private *dev_priv = dev->dev_private;
5356 	struct drm_crtc *crtc;
5357 	enum i915_pipe pipe;
5358 
5359 	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5360 		return;
5361 
5362 	cancel_delayed_work(&dev_priv->drrs.work);
5363 
5364 	mutex_lock(&dev_priv->drrs.mutex);
5365 	if (!dev_priv->drrs.dp) {
5366 		mutex_unlock(&dev_priv->drrs.mutex);
5367 		return;
5368 	}
5369 
5370 	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5371 	pipe = to_intel_crtc(crtc)->pipe;
5372 	dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5373 
5374 	if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5375 			!dev_priv->drrs.busy_frontbuffer_bits)
5376 		schedule_delayed_work(&dev_priv->drrs.work,
5377 				msecs_to_jiffies(1000));
5378 	mutex_unlock(&dev_priv->drrs.mutex);
5379 }
5380 
5381 /**
5382  * DOC: Display Refresh Rate Switching (DRRS)
5383  *
5384  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5385  * which enables swtching between low and high refresh rates,
5386  * dynamically, based on the usage scenario. This feature is applicable
5387  * for internal panels.
5388  *
5389  * Indication that the panel supports DRRS is given by the panel EDID, which
5390  * would list multiple refresh rates for one resolution.
5391  *
5392  * DRRS is of 2 types - static and seamless.
5393  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5394  * (may appear as a blink on screen) and is used in dock-undock scenario.
5395  * Seamless DRRS involves changing RR without any visual effect to the user
5396  * and can be used during normal system usage. This is done by programming
5397  * certain registers.
5398  *
5399  * Support for static/seamless DRRS may be indicated in the VBT based on
5400  * inputs from the panel spec.
5401  *
5402  * DRRS saves power by switching to low RR based on usage scenarios.
5403  *
5404  * eDP DRRS:-
5405  *        The implementation is based on frontbuffer tracking implementation.
5406  * When there is a disturbance on the screen triggered by user activity or a
5407  * periodic system activity, DRRS is disabled (RR is changed to high RR).
5408  * When there is no movement on screen, after a timeout of 1 second, a switch
5409  * to low RR is made.
5410  *        For integration with frontbuffer tracking code,
5411  * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5412  *
5413  * DRRS can be further extended to support other internal panels and also
5414  * the scenario of video playback wherein RR is set based on the rate
5415  * requested by userspace.
5416  */
5417 
5418 /**
5419  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5420  * @intel_connector: eDP connector
5421  * @fixed_mode: preferred mode of panel
5422  *
5423  * This function is  called only once at driver load to initialize basic
5424  * DRRS stuff.
5425  *
5426  * Returns:
5427  * Downclock mode if panel supports it, else return NULL.
5428  * DRRS support is determined by the presence of downclock mode (apart
5429  * from VBT setting).
5430  */
5431 static struct drm_display_mode *
5432 intel_dp_drrs_init(struct intel_connector *intel_connector,
5433 		struct drm_display_mode *fixed_mode)
5434 {
5435 	struct drm_connector *connector = &intel_connector->base;
5436 	struct drm_device *dev = connector->dev;
5437 	struct drm_i915_private *dev_priv = dev->dev_private;
5438 	struct drm_display_mode *downclock_mode = NULL;
5439 
5440 	INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5441 	lockinit(&dev_priv->drrs.mutex, "i915dm", 0, LK_CANRECURSE);
5442 
5443 	if (INTEL_INFO(dev)->gen <= 6) {
5444 		DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5445 		return NULL;
5446 	}
5447 
5448 	if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5449 		DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5450 		return NULL;
5451 	}
5452 
5453 	downclock_mode = intel_find_panel_downclock
5454 					(dev, fixed_mode, connector);
5455 
5456 	if (!downclock_mode) {
5457 		DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5458 		return NULL;
5459 	}
5460 
5461 	dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5462 
5463 	dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5464 	DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5465 	return downclock_mode;
5466 }
5467 
5468 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5469 				     struct intel_connector *intel_connector)
5470 {
5471 	struct drm_connector *connector = &intel_connector->base;
5472 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5473 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5474 	struct drm_device *dev = intel_encoder->base.dev;
5475 	struct drm_i915_private *dev_priv = dev->dev_private;
5476 	struct drm_display_mode *fixed_mode = NULL;
5477 	struct drm_display_mode *downclock_mode = NULL;
5478 	bool has_dpcd;
5479 	struct drm_display_mode *scan;
5480 	struct edid *edid;
5481 	enum i915_pipe pipe = INVALID_PIPE;
5482 
5483 	if (!is_edp(intel_dp))
5484 		return true;
5485 
5486 	pps_lock(intel_dp);
5487 	intel_edp_panel_vdd_sanitize(intel_dp);
5488 	pps_unlock(intel_dp);
5489 
5490 	/* Cache DPCD and EDID for edp. */
5491 	has_dpcd = intel_dp_get_dpcd(intel_dp);
5492 
5493 	if (has_dpcd) {
5494 		if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5495 			dev_priv->no_aux_handshake =
5496 				intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5497 				DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5498 	} else {
5499 		/* if this fails, presume the device is a ghost */
5500 		DRM_INFO("failed to retrieve link info, disabling eDP\n");
5501 		return false;
5502 	}
5503 
5504 	/* We now know it's not a ghost, init power sequence regs. */
5505 	pps_lock(intel_dp);
5506 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5507 	pps_unlock(intel_dp);
5508 
5509 	mutex_lock(&dev->mode_config.mutex);
5510 	edid = drm_get_edid(connector, intel_dp->aux.ddc);
5511 	if (edid) {
5512 		if (drm_add_edid_modes(connector, edid)) {
5513 			drm_mode_connector_update_edid_property(connector,
5514 								edid);
5515 			drm_edid_to_eld(connector, edid);
5516 		} else {
5517 			kfree(edid);
5518 			edid = ERR_PTR(-EINVAL);
5519 		}
5520 	} else {
5521 		edid = ERR_PTR(-ENOENT);
5522 	}
5523 	intel_connector->edid = edid;
5524 
5525 	/* prefer fixed mode from EDID if available */
5526 	list_for_each_entry(scan, &connector->probed_modes, head) {
5527 		if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5528 			fixed_mode = drm_mode_duplicate(dev, scan);
5529 			downclock_mode = intel_dp_drrs_init(
5530 						intel_connector, fixed_mode);
5531 			break;
5532 		}
5533 	}
5534 
5535 	/* fallback to VBT if available for eDP */
5536 	if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5537 		fixed_mode = drm_mode_duplicate(dev,
5538 					dev_priv->vbt.lfp_lvds_vbt_mode);
5539 		if (fixed_mode)
5540 			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5541 	}
5542 	mutex_unlock(&dev->mode_config.mutex);
5543 
5544 	if (IS_VALLEYVIEW(dev)) {
5545 #if 0
5546 		intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5547 		register_reboot_notifier(&intel_dp->edp_notifier);
5548 #endif
5549 
5550 		/*
5551 		 * Figure out the current pipe for the initial backlight setup.
5552 		 * If the current pipe isn't valid, try the PPS pipe, and if that
5553 		 * fails just assume pipe A.
5554 		 */
5555 		if (IS_CHERRYVIEW(dev))
5556 			pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5557 		else
5558 			pipe = PORT_TO_PIPE(intel_dp->DP);
5559 
5560 		if (pipe != PIPE_A && pipe != PIPE_B)
5561 			pipe = intel_dp->pps_pipe;
5562 
5563 		if (pipe != PIPE_A && pipe != PIPE_B)
5564 			pipe = PIPE_A;
5565 
5566 		DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5567 			      pipe_name(pipe));
5568 	}
5569 
5570 	intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5571 	intel_connector->panel.backlight_power = intel_edp_backlight_power;
5572 	intel_panel_setup_backlight(connector, pipe);
5573 
5574 	return true;
5575 }
5576 
5577 bool
5578 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5579 			struct intel_connector *intel_connector)
5580 {
5581 	struct drm_connector *connector = &intel_connector->base;
5582 	struct intel_dp *intel_dp = &intel_dig_port->dp;
5583 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5584 	struct drm_device *dev = intel_encoder->base.dev;
5585 	struct drm_i915_private *dev_priv = dev->dev_private;
5586 	enum port port = intel_dig_port->port;
5587 	int type;
5588 
5589 	intel_dp->pps_pipe = INVALID_PIPE;
5590 
5591 	/* intel_dp vfuncs */
5592 	if (INTEL_INFO(dev)->gen >= 9)
5593 		intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5594 	else if (IS_VALLEYVIEW(dev))
5595 		intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5596 	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5597 		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5598 	else if (HAS_PCH_SPLIT(dev))
5599 		intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5600 	else
5601 		intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5602 
5603 	if (INTEL_INFO(dev)->gen >= 9)
5604 		intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5605 	else
5606 		intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5607 
5608 	/* Preserve the current hw state. */
5609 	intel_dp->DP = I915_READ(intel_dp->output_reg);
5610 	intel_dp->attached_connector = intel_connector;
5611 
5612 	if (intel_dp_is_edp(dev, port))
5613 		type = DRM_MODE_CONNECTOR_eDP;
5614 	else
5615 		type = DRM_MODE_CONNECTOR_DisplayPort;
5616 
5617 	/*
5618 	 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5619 	 * for DP the encoder type can be set by the caller to
5620 	 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5621 	 */
5622 	if (type == DRM_MODE_CONNECTOR_eDP)
5623 		intel_encoder->type = INTEL_OUTPUT_EDP;
5624 
5625 	/* eDP only on port B and/or C on vlv/chv */
5626 	if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5627 		    port != PORT_B && port != PORT_C))
5628 		return false;
5629 
5630 	DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5631 			type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5632 			port_name(port));
5633 
5634 	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5635 	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5636 
5637 	connector->interlace_allowed = true;
5638 	connector->doublescan_allowed = 0;
5639 
5640 	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5641 			  edp_panel_vdd_work);
5642 
5643 	intel_connector_attach_encoder(intel_connector, intel_encoder);
5644 	drm_connector_register(connector);
5645 
5646 	if (HAS_DDI(dev))
5647 		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5648 	else
5649 		intel_connector->get_hw_state = intel_connector_get_hw_state;
5650 	intel_connector->unregister = intel_dp_connector_unregister;
5651 
5652 	/* Set up the hotplug pin. */
5653 	switch (port) {
5654 	case PORT_A:
5655 		intel_encoder->hpd_pin = HPD_PORT_A;
5656 		break;
5657 	case PORT_B:
5658 		intel_encoder->hpd_pin = HPD_PORT_B;
5659 		break;
5660 	case PORT_C:
5661 		intel_encoder->hpd_pin = HPD_PORT_C;
5662 		break;
5663 	case PORT_D:
5664 		intel_encoder->hpd_pin = HPD_PORT_D;
5665 		break;
5666 	default:
5667 		BUG();
5668 	}
5669 
5670 	if (is_edp(intel_dp)) {
5671 		pps_lock(intel_dp);
5672 		intel_dp_init_panel_power_timestamps(intel_dp);
5673 		if (IS_VALLEYVIEW(dev))
5674 			vlv_initial_power_sequencer_setup(intel_dp);
5675 		else
5676 			intel_dp_init_panel_power_sequencer(dev, intel_dp);
5677 		pps_unlock(intel_dp);
5678 	}
5679 
5680 	intel_dp_aux_init(intel_dp, intel_connector);
5681 
5682 	/* init MST on ports that can support it */
5683 	if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
5684 		if (port == PORT_B || port == PORT_C || port == PORT_D) {
5685 			intel_dp_mst_encoder_init(intel_dig_port,
5686 						  intel_connector->base.base.id);
5687 		}
5688 	}
5689 
5690 	if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5691 		drm_dp_aux_unregister(&intel_dp->aux);
5692 		if (is_edp(intel_dp)) {
5693 			cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5694 			/*
5695 			 * vdd might still be enabled do to the delayed vdd off.
5696 			 * Make sure vdd is actually turned off here.
5697 			 */
5698 			pps_lock(intel_dp);
5699 			edp_panel_vdd_off_sync(intel_dp);
5700 			pps_unlock(intel_dp);
5701 		}
5702 		drm_connector_unregister(connector);
5703 		drm_connector_cleanup(connector);
5704 		return false;
5705 	}
5706 
5707 	intel_dp_add_properties(intel_dp, connector);
5708 
5709 	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5710 	 * 0xd.  Failure to do so will result in spurious interrupts being
5711 	 * generated on the port when a cable is not attached.
5712 	 */
5713 	if (IS_G4X(dev) && !IS_GM45(dev)) {
5714 		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5715 		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5716 	}
5717 
5718 	return true;
5719 }
5720 
5721 void
5722 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5723 {
5724 	struct drm_i915_private *dev_priv = dev->dev_private;
5725 	struct intel_digital_port *intel_dig_port;
5726 	struct intel_encoder *intel_encoder;
5727 	struct drm_encoder *encoder;
5728 	struct intel_connector *intel_connector;
5729 
5730 	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5731 	if (!intel_dig_port)
5732 		return;
5733 
5734 	intel_connector = intel_connector_alloc();
5735 	if (!intel_connector) {
5736 		kfree(intel_dig_port);
5737 		return;
5738 	}
5739 
5740 	intel_encoder = &intel_dig_port->base;
5741 	encoder = &intel_encoder->base;
5742 
5743 	drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5744 			 DRM_MODE_ENCODER_TMDS);
5745 
5746 	intel_encoder->compute_config = intel_dp_compute_config;
5747 	intel_encoder->disable = intel_disable_dp;
5748 	intel_encoder->get_hw_state = intel_dp_get_hw_state;
5749 	intel_encoder->get_config = intel_dp_get_config;
5750 	intel_encoder->suspend = intel_dp_encoder_suspend;
5751 	if (IS_CHERRYVIEW(dev)) {
5752 		intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5753 		intel_encoder->pre_enable = chv_pre_enable_dp;
5754 		intel_encoder->enable = vlv_enable_dp;
5755 		intel_encoder->post_disable = chv_post_disable_dp;
5756 	} else if (IS_VALLEYVIEW(dev)) {
5757 		intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5758 		intel_encoder->pre_enable = vlv_pre_enable_dp;
5759 		intel_encoder->enable = vlv_enable_dp;
5760 		intel_encoder->post_disable = vlv_post_disable_dp;
5761 	} else {
5762 		intel_encoder->pre_enable = g4x_pre_enable_dp;
5763 		intel_encoder->enable = g4x_enable_dp;
5764 		if (INTEL_INFO(dev)->gen >= 5)
5765 			intel_encoder->post_disable = ilk_post_disable_dp;
5766 	}
5767 
5768 	intel_dig_port->port = port;
5769 	intel_dig_port->dp.output_reg = output_reg;
5770 
5771 	intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5772 	if (IS_CHERRYVIEW(dev)) {
5773 		if (port == PORT_D)
5774 			intel_encoder->crtc_mask = 1 << 2;
5775 		else
5776 			intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5777 	} else {
5778 		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5779 	}
5780 	intel_encoder->cloneable = 0;
5781 	intel_encoder->hot_plug = intel_dp_hot_plug;
5782 
5783 	intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5784 	dev_priv->hpd_irq_port[port] = intel_dig_port;
5785 
5786 	if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5787 		drm_encoder_cleanup(encoder);
5788 		kfree(intel_dig_port);
5789 		kfree(intel_connector);
5790 	}
5791 }
5792 
5793 #if 0
5794 void intel_dp_mst_suspend(struct drm_device *dev)
5795 {
5796 	struct drm_i915_private *dev_priv = dev->dev_private;
5797 	int i;
5798 
5799 	/* disable MST */
5800 	for (i = 0; i < I915_MAX_PORTS; i++) {
5801 		struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5802 		if (!intel_dig_port)
5803 			continue;
5804 
5805 		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5806 			if (!intel_dig_port->dp.can_mst)
5807 				continue;
5808 			if (intel_dig_port->dp.is_mst)
5809 				drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5810 		}
5811 	}
5812 }
5813 #endif
5814 
5815 void intel_dp_mst_resume(struct drm_device *dev)
5816 {
5817 	struct drm_i915_private *dev_priv = dev->dev_private;
5818 	int i;
5819 
5820 	for (i = 0; i < I915_MAX_PORTS; i++) {
5821 		struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5822 		if (!intel_dig_port)
5823 			continue;
5824 		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5825 #if 0
5826 			int ret;
5827 
5828 			if (!intel_dig_port->dp.can_mst)
5829 				continue;
5830 
5831 			ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5832 			if (ret != 0) {
5833 				intel_dp_check_mst_status(&intel_dig_port->dp);
5834 			}
5835 #endif
5836 		}
5837 	}
5838 }
5839