xref: /dragonfly/sys/dev/drm/i915/intel_dp.c (revision cb740add)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27 
28 #include <linux/i2c.h>
29 #include <linux/export.h>
30 #include <linux/notifier.h>
31 #include <drm/drmP.h>
32 #include <linux/slab.h>
33 #include <drm/drm_atomic_helper.h>
34 #include <drm/drm_crtc.h>
35 #include <drm/drm_crtc_helper.h>
36 #include <drm/drm_edid.h>
37 #include "intel_drv.h"
38 #include <drm/i915_drm.h>
39 #include "i915_drv.h"
40 
41 #define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
42 
43 static int disable_aux_irq = 0;
44 TUNABLE_INT("drm.i915.disable_aux_irq", &disable_aux_irq);
45 
46 /* Compliance test status bits  */
47 #define INTEL_DP_RESOLUTION_SHIFT_MASK	0
48 #define INTEL_DP_RESOLUTION_PREFERRED	(1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49 #define INTEL_DP_RESOLUTION_STANDARD	(2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
50 #define INTEL_DP_RESOLUTION_FAILSAFE	(3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
51 
52 struct dp_link_dpll {
53 	int clock;
54 	struct dpll dpll;
55 };
56 
57 static const struct dp_link_dpll gen4_dpll[] = {
58 	{ 162000,
59 		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
60 	{ 270000,
61 		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
62 };
63 
64 static const struct dp_link_dpll pch_dpll[] = {
65 	{ 162000,
66 		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
67 	{ 270000,
68 		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
69 };
70 
71 static const struct dp_link_dpll vlv_dpll[] = {
72 	{ 162000,
73 		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
74 	{ 270000,
75 		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
76 };
77 
78 /*
79  * CHV supports eDP 1.4 that have  more link rates.
80  * Below only provides the fixed rate but exclude variable rate.
81  */
82 static const struct dp_link_dpll chv_dpll[] = {
83 	/*
84 	 * CHV requires to program fractional division for m2.
85 	 * m2 is stored in fixed point format using formula below
86 	 * (m2_int << 22) | m2_fraction
87 	 */
88 	{ 162000,	/* m2_int = 32, m2_fraction = 1677722 */
89 		{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
90 	{ 270000,	/* m2_int = 27, m2_fraction = 0 */
91 		{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
92 	{ 540000,	/* m2_int = 27, m2_fraction = 0 */
93 		{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
94 };
95 
96 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
97 				  324000, 432000, 540000 };
98 static const int skl_rates[] = { 162000, 216000, 270000,
99 				  324000, 432000, 540000 };
100 static const int default_rates[] = { 162000, 270000, 540000 };
101 
102 /**
103  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
104  * @intel_dp: DP struct
105  *
106  * If a CPU or PCH DP output is attached to an eDP panel, this function
107  * will return true, and false otherwise.
108  */
109 static bool is_edp(struct intel_dp *intel_dp)
110 {
111 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
112 
113 	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
114 }
115 
116 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
117 {
118 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
119 
120 	return intel_dig_port->base.base.dev;
121 }
122 
123 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
124 {
125 	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
126 }
127 
128 static void intel_dp_link_down(struct intel_dp *intel_dp);
129 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
130 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
131 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
132 static void vlv_steal_power_sequencer(struct drm_device *dev,
133 				      enum i915_pipe pipe);
134 
135 static unsigned int intel_dp_unused_lane_mask(int lane_count)
136 {
137 	return ~((1 << lane_count) - 1) & 0xf;
138 }
139 
140 static int
141 intel_dp_max_link_bw(struct intel_dp  *intel_dp)
142 {
143 	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
144 
145 	switch (max_link_bw) {
146 	case DP_LINK_BW_1_62:
147 	case DP_LINK_BW_2_7:
148 	case DP_LINK_BW_5_4:
149 		break;
150 	default:
151 		WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
152 		     max_link_bw);
153 		max_link_bw = DP_LINK_BW_1_62;
154 		break;
155 	}
156 	return max_link_bw;
157 }
158 
159 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
160 {
161 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
162 	u8 source_max, sink_max;
163 
164 	source_max = intel_dig_port->max_lanes;
165 	sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
166 
167 	return min(source_max, sink_max);
168 }
169 
170 /*
171  * The units on the numbers in the next two are... bizarre.  Examples will
172  * make it clearer; this one parallels an example in the eDP spec.
173  *
174  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
175  *
176  *     270000 * 1 * 8 / 10 == 216000
177  *
178  * The actual data capacity of that configuration is 2.16Gbit/s, so the
179  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
180  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
181  * 119000.  At 18bpp that's 2142000 kilobits per second.
182  *
183  * Thus the strange-looking division by 10 in intel_dp_link_required, to
184  * get the result in decakilobits instead of kilobits.
185  */
186 
187 static int
188 intel_dp_link_required(int pixel_clock, int bpp)
189 {
190 	return (pixel_clock * bpp + 9) / 10;
191 }
192 
193 static int
194 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
195 {
196 	return (max_link_clock * max_lanes * 8) / 10;
197 }
198 
199 static enum drm_mode_status
200 intel_dp_mode_valid(struct drm_connector *connector,
201 		    struct drm_display_mode *mode)
202 {
203 	struct intel_dp *intel_dp = intel_attached_dp(connector);
204 	struct intel_connector *intel_connector = to_intel_connector(connector);
205 	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
206 	int target_clock = mode->clock;
207 	int max_rate, mode_rate, max_lanes, max_link_clock;
208 	int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
209 
210 	if (is_edp(intel_dp) && fixed_mode) {
211 		if (mode->hdisplay > fixed_mode->hdisplay)
212 			return MODE_PANEL;
213 
214 		if (mode->vdisplay > fixed_mode->vdisplay)
215 			return MODE_PANEL;
216 
217 		target_clock = fixed_mode->clock;
218 	}
219 
220 	max_link_clock = intel_dp_max_link_rate(intel_dp);
221 	max_lanes = intel_dp_max_lane_count(intel_dp);
222 
223 	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
224 	mode_rate = intel_dp_link_required(target_clock, 18);
225 
226 	if (mode_rate > max_rate || target_clock > max_dotclk)
227 		return MODE_CLOCK_HIGH;
228 
229 	if (mode->clock < 10000)
230 		return MODE_CLOCK_LOW;
231 
232 	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
233 		return MODE_H_ILLEGAL;
234 
235 	return MODE_OK;
236 }
237 
238 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
239 {
240 	int	i;
241 	uint32_t v = 0;
242 
243 	if (src_bytes > 4)
244 		src_bytes = 4;
245 	for (i = 0; i < src_bytes; i++)
246 		v |= ((uint32_t) src[i]) << ((3-i) * 8);
247 	return v;
248 }
249 
250 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
251 {
252 	int i;
253 	if (dst_bytes > 4)
254 		dst_bytes = 4;
255 	for (i = 0; i < dst_bytes; i++)
256 		dst[i] = src >> ((3-i) * 8);
257 }
258 
259 static void
260 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
261 				    struct intel_dp *intel_dp);
262 static void
263 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
264 					      struct intel_dp *intel_dp);
265 
266 static void pps_lock(struct intel_dp *intel_dp)
267 {
268 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
269 	struct intel_encoder *encoder = &intel_dig_port->base;
270 	struct drm_device *dev = encoder->base.dev;
271 	struct drm_i915_private *dev_priv = dev->dev_private;
272 	enum intel_display_power_domain power_domain;
273 
274 	/*
275 	 * See vlv_power_sequencer_reset() why we need
276 	 * a power domain reference here.
277 	 */
278 	power_domain = intel_display_port_aux_power_domain(encoder);
279 	intel_display_power_get(dev_priv, power_domain);
280 
281 	mutex_lock(&dev_priv->pps_mutex);
282 }
283 
284 static void pps_unlock(struct intel_dp *intel_dp)
285 {
286 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
287 	struct intel_encoder *encoder = &intel_dig_port->base;
288 	struct drm_device *dev = encoder->base.dev;
289 	struct drm_i915_private *dev_priv = dev->dev_private;
290 	enum intel_display_power_domain power_domain;
291 
292 	mutex_unlock(&dev_priv->pps_mutex);
293 
294 	power_domain = intel_display_port_aux_power_domain(encoder);
295 	intel_display_power_put(dev_priv, power_domain);
296 }
297 
298 static void
299 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
300 {
301 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
302 	struct drm_device *dev = intel_dig_port->base.base.dev;
303 	struct drm_i915_private *dev_priv = dev->dev_private;
304 	enum i915_pipe pipe = intel_dp->pps_pipe;
305 	bool pll_enabled, release_cl_override = false;
306 	enum dpio_phy phy = DPIO_PHY(pipe);
307 	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
308 	uint32_t DP;
309 
310 	if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
311 		 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
312 		 pipe_name(pipe), port_name(intel_dig_port->port)))
313 		return;
314 
315 	DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
316 		      pipe_name(pipe), port_name(intel_dig_port->port));
317 
318 	/* Preserve the BIOS-computed detected bit. This is
319 	 * supposed to be read-only.
320 	 */
321 	DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
322 	DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
323 	DP |= DP_PORT_WIDTH(1);
324 	DP |= DP_LINK_TRAIN_PAT_1;
325 
326 	if (IS_CHERRYVIEW(dev))
327 		DP |= DP_PIPE_SELECT_CHV(pipe);
328 	else if (pipe == PIPE_B)
329 		DP |= DP_PIPEB_SELECT;
330 
331 	pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
332 
333 	/*
334 	 * The DPLL for the pipe must be enabled for this to work.
335 	 * So enable temporarily it if it's not already enabled.
336 	 */
337 	if (!pll_enabled) {
338 		release_cl_override = IS_CHERRYVIEW(dev) &&
339 			!chv_phy_powergate_ch(dev_priv, phy, ch, true);
340 
341 		if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
342 				     &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
343 			DRM_ERROR("Failed to force on pll for pipe %c!\n",
344 				  pipe_name(pipe));
345 			return;
346 		}
347 	}
348 
349 	/*
350 	 * Similar magic as in intel_dp_enable_port().
351 	 * We _must_ do this port enable + disable trick
352 	 * to make this power seqeuencer lock onto the port.
353 	 * Otherwise even VDD force bit won't work.
354 	 */
355 	I915_WRITE(intel_dp->output_reg, DP);
356 	POSTING_READ(intel_dp->output_reg);
357 
358 	I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
359 	POSTING_READ(intel_dp->output_reg);
360 
361 	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
362 	POSTING_READ(intel_dp->output_reg);
363 
364 	if (!pll_enabled) {
365 		vlv_force_pll_off(dev, pipe);
366 
367 		if (release_cl_override)
368 			chv_phy_powergate_ch(dev_priv, phy, ch, false);
369 	}
370 }
371 
372 static enum i915_pipe
373 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
374 {
375 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
376 	struct drm_device *dev = intel_dig_port->base.base.dev;
377 	struct drm_i915_private *dev_priv = dev->dev_private;
378 	struct intel_encoder *encoder;
379 	unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
380 	enum i915_pipe pipe;
381 
382 	lockdep_assert_held(&dev_priv->pps_mutex);
383 
384 	/* We should never land here with regular DP ports */
385 	WARN_ON(!is_edp(intel_dp));
386 
387 	if (intel_dp->pps_pipe != INVALID_PIPE)
388 		return intel_dp->pps_pipe;
389 
390 	/*
391 	 * We don't have power sequencer currently.
392 	 * Pick one that's not used by other ports.
393 	 */
394 	for_each_intel_encoder(dev, encoder) {
395 		struct intel_dp *tmp;
396 
397 		if (encoder->type != INTEL_OUTPUT_EDP)
398 			continue;
399 
400 		tmp = enc_to_intel_dp(&encoder->base);
401 
402 		if (tmp->pps_pipe != INVALID_PIPE)
403 			pipes &= ~(1 << tmp->pps_pipe);
404 	}
405 
406 	/*
407 	 * Didn't find one. This should not happen since there
408 	 * are two power sequencers and up to two eDP ports.
409 	 */
410 	if (WARN_ON(pipes == 0))
411 		pipe = PIPE_A;
412 	else
413 		pipe = ffs(pipes) - 1;
414 
415 	vlv_steal_power_sequencer(dev, pipe);
416 	intel_dp->pps_pipe = pipe;
417 
418 	DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
419 		      pipe_name(intel_dp->pps_pipe),
420 		      port_name(intel_dig_port->port));
421 
422 	/* init power sequencer on this pipe and port */
423 	intel_dp_init_panel_power_sequencer(dev, intel_dp);
424 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
425 
426 	/*
427 	 * Even vdd force doesn't work until we've made
428 	 * the power sequencer lock in on the port.
429 	 */
430 	vlv_power_sequencer_kick(intel_dp);
431 
432 	return intel_dp->pps_pipe;
433 }
434 
435 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
436 			       enum i915_pipe pipe);
437 
438 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
439 			       enum i915_pipe pipe)
440 {
441 	return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
442 }
443 
444 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
445 				enum i915_pipe pipe)
446 {
447 	return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
448 }
449 
450 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
451 			 enum i915_pipe pipe)
452 {
453 	return true;
454 }
455 
456 static enum i915_pipe
457 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
458 		     enum port port,
459 		     vlv_pipe_check pipe_check)
460 {
461 	enum i915_pipe pipe;
462 
463 	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
464 		u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
465 			PANEL_PORT_SELECT_MASK;
466 
467 		if (port_sel != PANEL_PORT_SELECT_VLV(port))
468 			continue;
469 
470 		if (!pipe_check(dev_priv, pipe))
471 			continue;
472 
473 		return pipe;
474 	}
475 
476 	return INVALID_PIPE;
477 }
478 
479 static void
480 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
481 {
482 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
483 	struct drm_device *dev = intel_dig_port->base.base.dev;
484 	struct drm_i915_private *dev_priv = dev->dev_private;
485 	enum port port = intel_dig_port->port;
486 
487 	lockdep_assert_held(&dev_priv->pps_mutex);
488 
489 	/* try to find a pipe with this port selected */
490 	/* first pick one where the panel is on */
491 	intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
492 						  vlv_pipe_has_pp_on);
493 	/* didn't find one? pick one where vdd is on */
494 	if (intel_dp->pps_pipe == INVALID_PIPE)
495 		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
496 							  vlv_pipe_has_vdd_on);
497 	/* didn't find one? pick one with just the correct port */
498 	if (intel_dp->pps_pipe == INVALID_PIPE)
499 		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
500 							  vlv_pipe_any);
501 
502 	/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
503 	if (intel_dp->pps_pipe == INVALID_PIPE) {
504 		DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
505 			      port_name(port));
506 		return;
507 	}
508 
509 	DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
510 		      port_name(port), pipe_name(intel_dp->pps_pipe));
511 
512 	intel_dp_init_panel_power_sequencer(dev, intel_dp);
513 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
514 }
515 
516 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
517 {
518 	struct drm_device *dev = dev_priv->dev;
519 	struct intel_encoder *encoder;
520 
521 	if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)))
522 		return;
523 
524 	/*
525 	 * We can't grab pps_mutex here due to deadlock with power_domain
526 	 * mutex when power_domain functions are called while holding pps_mutex.
527 	 * That also means that in order to use pps_pipe the code needs to
528 	 * hold both a power domain reference and pps_mutex, and the power domain
529 	 * reference get/put must be done while _not_ holding pps_mutex.
530 	 * pps_{lock,unlock}() do these steps in the correct order, so one
531 	 * should use them always.
532 	 */
533 
534 	for_each_intel_encoder(dev, encoder) {
535 		struct intel_dp *intel_dp;
536 
537 		if (encoder->type != INTEL_OUTPUT_EDP)
538 			continue;
539 
540 		intel_dp = enc_to_intel_dp(&encoder->base);
541 		intel_dp->pps_pipe = INVALID_PIPE;
542 	}
543 }
544 
545 static i915_reg_t
546 _pp_ctrl_reg(struct intel_dp *intel_dp)
547 {
548 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
549 
550 	if (IS_BROXTON(dev))
551 		return BXT_PP_CONTROL(0);
552 	else if (HAS_PCH_SPLIT(dev))
553 		return PCH_PP_CONTROL;
554 	else
555 		return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
556 }
557 
558 static i915_reg_t
559 _pp_stat_reg(struct intel_dp *intel_dp)
560 {
561 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
562 
563 	if (IS_BROXTON(dev))
564 		return BXT_PP_STATUS(0);
565 	else if (HAS_PCH_SPLIT(dev))
566 		return PCH_PP_STATUS;
567 	else
568 		return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
569 }
570 
571 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
572    This function only applicable when panel PM state is not to be tracked */
573 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
574 			      void *unused)
575 {
576 	struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
577 						 edp_notifier);
578 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
579 	struct drm_i915_private *dev_priv = dev->dev_private;
580 
581 #if 0
582 	if (!is_edp(intel_dp) || code != SYS_RESTART)
583 		return 0;
584 #endif
585 
586 	pps_lock(intel_dp);
587 
588 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
589 		enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
590 		i915_reg_t pp_ctrl_reg, pp_div_reg;
591 		u32 pp_div;
592 
593 		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
594 		pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
595 		pp_div = I915_READ(pp_div_reg);
596 		pp_div &= PP_REFERENCE_DIVIDER_MASK;
597 
598 		/* 0x1F write to PP_DIV_REG sets max cycle delay */
599 		I915_WRITE(pp_div_reg, pp_div | 0x1F);
600 		I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
601 		msleep(intel_dp->panel_power_cycle_delay);
602 	}
603 
604 	pps_unlock(intel_dp);
605 
606 	return 0;
607 }
608 
609 static bool edp_have_panel_power(struct intel_dp *intel_dp)
610 {
611 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
612 	struct drm_i915_private *dev_priv = dev->dev_private;
613 
614 	lockdep_assert_held(&dev_priv->pps_mutex);
615 
616 	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
617 	    intel_dp->pps_pipe == INVALID_PIPE)
618 		return false;
619 
620 	return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
621 }
622 
623 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
624 {
625 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
626 	struct drm_i915_private *dev_priv = dev->dev_private;
627 
628 	lockdep_assert_held(&dev_priv->pps_mutex);
629 
630 	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
631 	    intel_dp->pps_pipe == INVALID_PIPE)
632 		return false;
633 
634 	return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
635 }
636 
637 static void
638 intel_dp_check_edp(struct intel_dp *intel_dp)
639 {
640 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
641 	struct drm_i915_private *dev_priv = dev->dev_private;
642 
643 	if (!is_edp(intel_dp))
644 		return;
645 
646 	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
647 		WARN(1, "eDP powered off while attempting aux channel communication.\n");
648 		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
649 			      I915_READ(_pp_stat_reg(intel_dp)),
650 			      I915_READ(_pp_ctrl_reg(intel_dp)));
651 	}
652 }
653 
654 static uint32_t
655 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
656 {
657 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
658 	struct drm_device *dev = intel_dig_port->base.base.dev;
659 	struct drm_i915_private *dev_priv = dev->dev_private;
660 	i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
661 	uint32_t status;
662 	bool done;
663 
664 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
665 	if (has_aux_irq)
666 		done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
667 					  msecs_to_jiffies_timeout(10));
668 	else
669 		done = wait_for_atomic(C, 10) == 0;
670 	if (!done)
671 		DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
672 			  has_aux_irq);
673 #undef C
674 
675 	return status;
676 }
677 
678 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
679 {
680 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
681 	struct drm_device *dev = intel_dig_port->base.base.dev;
682 
683 	/*
684 	 * The clock divider is based off the hrawclk, and would like to run at
685 	 * 2MHz.  So, take the hrawclk value and divide by 2 and use that
686 	 */
687 	return index ? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev), 2);
688 }
689 
690 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
691 {
692 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
693 	struct drm_device *dev = intel_dig_port->base.base.dev;
694 	struct drm_i915_private *dev_priv = dev->dev_private;
695 
696 	if (index)
697 		return 0;
698 
699 	if (intel_dig_port->port == PORT_A) {
700 		return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
701 
702 	} else {
703 		return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
704 	}
705 }
706 
707 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
708 {
709 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
710 	struct drm_device *dev = intel_dig_port->base.base.dev;
711 	struct drm_i915_private *dev_priv = dev->dev_private;
712 
713 	if (intel_dig_port->port == PORT_A) {
714 		if (index)
715 			return 0;
716 		return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
717 	} else if (HAS_PCH_LPT_H(dev_priv)) {
718 		/* Workaround for non-ULT HSW */
719 		switch (index) {
720 		case 0: return 63;
721 		case 1: return 72;
722 		default: return 0;
723 		}
724 	} else  {
725 		return index ? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
726 	}
727 }
728 
729 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
730 {
731 	return index ? 0 : 100;
732 }
733 
734 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
735 {
736 	/*
737 	 * SKL doesn't need us to program the AUX clock divider (Hardware will
738 	 * derive the clock from CDCLK automatically). We still implement the
739 	 * get_aux_clock_divider vfunc to plug-in into the existing code.
740 	 */
741 	return index ? 0 : 1;
742 }
743 
744 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
745 				      bool has_aux_irq,
746 				      int send_bytes,
747 				      uint32_t aux_clock_divider)
748 {
749 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
750 	struct drm_device *dev = intel_dig_port->base.base.dev;
751 	uint32_t precharge, timeout;
752 
753 	if (IS_GEN6(dev))
754 		precharge = 3;
755 	else
756 		precharge = 5;
757 
758 	if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
759 		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
760 	else
761 		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
762 
763 	return DP_AUX_CH_CTL_SEND_BUSY |
764 	       DP_AUX_CH_CTL_DONE |
765 	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
766 	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
767 	       timeout |
768 	       DP_AUX_CH_CTL_RECEIVE_ERROR |
769 	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
770 	       (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
771 	       (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
772 }
773 
774 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
775 				      bool has_aux_irq,
776 				      int send_bytes,
777 				      uint32_t unused)
778 {
779 	return DP_AUX_CH_CTL_SEND_BUSY |
780 	       DP_AUX_CH_CTL_DONE |
781 	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
782 	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
783 	       DP_AUX_CH_CTL_TIME_OUT_1600us |
784 	       DP_AUX_CH_CTL_RECEIVE_ERROR |
785 	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
786 	       DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
787 }
788 
789 static int
790 intel_dp_aux_ch(struct intel_dp *intel_dp,
791 		const uint8_t *send, int send_bytes,
792 		uint8_t *recv, int recv_size)
793 {
794 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
795 	struct drm_device *dev = intel_dig_port->base.base.dev;
796 	struct drm_i915_private *dev_priv = dev->dev_private;
797 	i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
798 	uint32_t aux_clock_divider;
799 	int i, ret, recv_bytes;
800 	uint32_t status;
801 	int try, clock = 0;
802 #ifdef __DragonFly__
803 	bool has_aux_irq = HAS_AUX_IRQ(dev) && !disable_aux_irq;
804 #else
805 	bool has_aux_irq = HAS_AUX_IRQ(dev);
806 #endif
807 	bool vdd;
808 
809 	pps_lock(intel_dp);
810 
811 	/*
812 	 * We will be called with VDD already enabled for dpcd/edid/oui reads.
813 	 * In such cases we want to leave VDD enabled and it's up to upper layers
814 	 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
815 	 * ourselves.
816 	 */
817 	vdd = edp_panel_vdd_on(intel_dp);
818 
819 	/* dp aux is extremely sensitive to irq latency, hence request the
820 	 * lowest possible wakeup latency and so prevent the cpu from going into
821 	 * deep sleep states.
822 	 */
823 	pm_qos_update_request(&dev_priv->pm_qos, 0);
824 
825 	intel_dp_check_edp(intel_dp);
826 
827 	/* Try to wait for any previous AUX channel activity */
828 	for (try = 0; try < 3; try++) {
829 		status = I915_READ_NOTRACE(ch_ctl);
830 		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
831 			break;
832 		msleep(1);
833 	}
834 
835 	if (try == 3) {
836 		static u32 last_status = -1;
837 		const u32 status = I915_READ(ch_ctl);
838 
839 		if (status != last_status) {
840 			WARN(1, "dp_aux_ch not started status 0x%08x\n",
841 			     status);
842 			last_status = status;
843 		}
844 
845 		ret = -EBUSY;
846 		goto out;
847 	}
848 
849 	/* Only 5 data registers! */
850 	if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
851 		ret = -E2BIG;
852 		goto out;
853 	}
854 
855 	while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
856 		u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
857 							  has_aux_irq,
858 							  send_bytes,
859 							  aux_clock_divider);
860 
861 		/* Must try at least 3 times according to DP spec */
862 		for (try = 0; try < 5; try++) {
863 			/* Load the send data into the aux channel data registers */
864 			for (i = 0; i < send_bytes; i += 4)
865 				I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
866 					   intel_dp_pack_aux(send + i,
867 							     send_bytes - i));
868 
869 			/* Send the command and wait for it to complete */
870 			I915_WRITE(ch_ctl, send_ctl);
871 
872 			status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
873 
874 			/* Clear done status and any errors */
875 			I915_WRITE(ch_ctl,
876 				   status |
877 				   DP_AUX_CH_CTL_DONE |
878 				   DP_AUX_CH_CTL_TIME_OUT_ERROR |
879 				   DP_AUX_CH_CTL_RECEIVE_ERROR);
880 
881 			if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
882 				continue;
883 
884 			/* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
885 			 *   400us delay required for errors and timeouts
886 			 *   Timeout errors from the HW already meet this
887 			 *   requirement so skip to next iteration
888 			 */
889 			if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
890 				usleep_range(400, 500);
891 				continue;
892 			}
893 			if (status & DP_AUX_CH_CTL_DONE)
894 				goto done;
895 		}
896 	}
897 
898 	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
899 		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
900 		ret = -EBUSY;
901 		goto out;
902 	}
903 
904 done:
905 	/* Check for timeout or receive error.
906 	 * Timeouts occur when the sink is not connected
907 	 */
908 	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
909 		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
910 		ret = -EIO;
911 		goto out;
912 	}
913 
914 	/* Timeouts occur when the device isn't connected, so they're
915 	 * "normal" -- don't fill the kernel log with these */
916 	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
917 		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
918 		ret = -ETIMEDOUT;
919 		goto out;
920 	}
921 
922 	/* Unload any bytes sent back from the other side */
923 	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
924 		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
925 
926 	/*
927 	 * By BSpec: "Message sizes of 0 or >20 are not allowed."
928 	 * We have no idea of what happened so we return -EBUSY so
929 	 * drm layer takes care for the necessary retries.
930 	 */
931 	if (recv_bytes == 0 || recv_bytes > 20) {
932 		DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
933 			      recv_bytes);
934 		/*
935 		 * FIXME: This patch was created on top of a series that
936 		 * organize the retries at drm level. There EBUSY should
937 		 * also take care for 1ms wait before retrying.
938 		 * That aux retries re-org is still needed and after that is
939 		 * merged we remove this sleep from here.
940 		 */
941 		usleep_range(1000, 1500);
942 		ret = -EBUSY;
943 		goto out;
944 	}
945 
946 	if (recv_bytes > recv_size)
947 		recv_bytes = recv_size;
948 
949 	for (i = 0; i < recv_bytes; i += 4)
950 		intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
951 				    recv + i, recv_bytes - i);
952 
953 	ret = recv_bytes;
954 out:
955 	pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
956 
957 	if (vdd)
958 		edp_panel_vdd_off(intel_dp, false);
959 
960 	pps_unlock(intel_dp);
961 
962 	return ret;
963 }
964 
965 #define BARE_ADDRESS_SIZE	3
966 #define HEADER_SIZE		(BARE_ADDRESS_SIZE + 1)
967 static ssize_t
968 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
969 {
970 	struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
971 	uint8_t txbuf[20], rxbuf[20];
972 	size_t txsize, rxsize;
973 	int ret;
974 
975 	txbuf[0] = (msg->request << 4) |
976 		((msg->address >> 16) & 0xf);
977 	txbuf[1] = (msg->address >> 8) & 0xff;
978 	txbuf[2] = msg->address & 0xff;
979 	txbuf[3] = msg->size - 1;
980 
981 	switch (msg->request & ~DP_AUX_I2C_MOT) {
982 	case DP_AUX_NATIVE_WRITE:
983 	case DP_AUX_I2C_WRITE:
984 	case DP_AUX_I2C_WRITE_STATUS_UPDATE:
985 		txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
986 		rxsize = 2; /* 0 or 1 data bytes */
987 
988 		if (WARN_ON(txsize > 20))
989 			return -E2BIG;
990 
991 		if (msg->buffer)
992 			memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
993 		else
994 			WARN_ON(msg->size);
995 
996 		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
997 		if (ret > 0) {
998 			msg->reply = rxbuf[0] >> 4;
999 
1000 			if (ret > 1) {
1001 				/* Number of bytes written in a short write. */
1002 				ret = clamp_t(int, rxbuf[1], 0, msg->size);
1003 			} else {
1004 				/* Return payload size. */
1005 				ret = msg->size;
1006 			}
1007 		}
1008 		break;
1009 
1010 	case DP_AUX_NATIVE_READ:
1011 	case DP_AUX_I2C_READ:
1012 		txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1013 		rxsize = msg->size + 1;
1014 
1015 		if (WARN_ON(rxsize > 20))
1016 			return -E2BIG;
1017 
1018 		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1019 		if (ret > 0) {
1020 			msg->reply = rxbuf[0] >> 4;
1021 			/*
1022 			 * Assume happy day, and copy the data. The caller is
1023 			 * expected to check msg->reply before touching it.
1024 			 *
1025 			 * Return payload size.
1026 			 */
1027 			ret--;
1028 			memcpy(msg->buffer, rxbuf + 1, ret);
1029 		}
1030 		break;
1031 
1032 	default:
1033 		ret = -EINVAL;
1034 		break;
1035 	}
1036 
1037 	return ret;
1038 }
1039 
1040 static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1041 				       enum port port)
1042 {
1043 	switch (port) {
1044 	case PORT_B:
1045 	case PORT_C:
1046 	case PORT_D:
1047 		return DP_AUX_CH_CTL(port);
1048 	default:
1049 		MISSING_CASE(port);
1050 		return DP_AUX_CH_CTL(PORT_B);
1051 	}
1052 }
1053 
1054 static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1055 					enum port port, int index)
1056 {
1057 	switch (port) {
1058 	case PORT_B:
1059 	case PORT_C:
1060 	case PORT_D:
1061 		return DP_AUX_CH_DATA(port, index);
1062 	default:
1063 		MISSING_CASE(port);
1064 		return DP_AUX_CH_DATA(PORT_B, index);
1065 	}
1066 }
1067 
1068 static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1069 				       enum port port)
1070 {
1071 	switch (port) {
1072 	case PORT_A:
1073 		return DP_AUX_CH_CTL(port);
1074 	case PORT_B:
1075 	case PORT_C:
1076 	case PORT_D:
1077 		return PCH_DP_AUX_CH_CTL(port);
1078 	default:
1079 		MISSING_CASE(port);
1080 		return DP_AUX_CH_CTL(PORT_A);
1081 	}
1082 }
1083 
1084 static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1085 					enum port port, int index)
1086 {
1087 	switch (port) {
1088 	case PORT_A:
1089 		return DP_AUX_CH_DATA(port, index);
1090 	case PORT_B:
1091 	case PORT_C:
1092 	case PORT_D:
1093 		return PCH_DP_AUX_CH_DATA(port, index);
1094 	default:
1095 		MISSING_CASE(port);
1096 		return DP_AUX_CH_DATA(PORT_A, index);
1097 	}
1098 }
1099 
1100 /*
1101  * On SKL we don't have Aux for port E so we rely
1102  * on VBT to set a proper alternate aux channel.
1103  */
1104 static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1105 {
1106 	const struct ddi_vbt_port_info *info =
1107 		&dev_priv->vbt.ddi_port_info[PORT_E];
1108 
1109 	switch (info->alternate_aux_channel) {
1110 	case DP_AUX_A:
1111 		return PORT_A;
1112 	case DP_AUX_B:
1113 		return PORT_B;
1114 	case DP_AUX_C:
1115 		return PORT_C;
1116 	case DP_AUX_D:
1117 		return PORT_D;
1118 	default:
1119 		MISSING_CASE(info->alternate_aux_channel);
1120 		return PORT_A;
1121 	}
1122 }
1123 
1124 static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1125 				       enum port port)
1126 {
1127 	if (port == PORT_E)
1128 		port = skl_porte_aux_port(dev_priv);
1129 
1130 	switch (port) {
1131 	case PORT_A:
1132 	case PORT_B:
1133 	case PORT_C:
1134 	case PORT_D:
1135 		return DP_AUX_CH_CTL(port);
1136 	default:
1137 		MISSING_CASE(port);
1138 		return DP_AUX_CH_CTL(PORT_A);
1139 	}
1140 }
1141 
1142 static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1143 					enum port port, int index)
1144 {
1145 	if (port == PORT_E)
1146 		port = skl_porte_aux_port(dev_priv);
1147 
1148 	switch (port) {
1149 	case PORT_A:
1150 	case PORT_B:
1151 	case PORT_C:
1152 	case PORT_D:
1153 		return DP_AUX_CH_DATA(port, index);
1154 	default:
1155 		MISSING_CASE(port);
1156 		return DP_AUX_CH_DATA(PORT_A, index);
1157 	}
1158 }
1159 
1160 static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1161 					 enum port port)
1162 {
1163 	if (INTEL_INFO(dev_priv)->gen >= 9)
1164 		return skl_aux_ctl_reg(dev_priv, port);
1165 	else if (HAS_PCH_SPLIT(dev_priv))
1166 		return ilk_aux_ctl_reg(dev_priv, port);
1167 	else
1168 		return g4x_aux_ctl_reg(dev_priv, port);
1169 }
1170 
1171 static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1172 					  enum port port, int index)
1173 {
1174 	if (INTEL_INFO(dev_priv)->gen >= 9)
1175 		return skl_aux_data_reg(dev_priv, port, index);
1176 	else if (HAS_PCH_SPLIT(dev_priv))
1177 		return ilk_aux_data_reg(dev_priv, port, index);
1178 	else
1179 		return g4x_aux_data_reg(dev_priv, port, index);
1180 }
1181 
1182 static void intel_aux_reg_init(struct intel_dp *intel_dp)
1183 {
1184 	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1185 	enum port port = dp_to_dig_port(intel_dp)->port;
1186 	int i;
1187 
1188 	intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1189 	for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1190 		intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1191 }
1192 
1193 static void
1194 intel_dp_aux_fini(struct intel_dp *intel_dp)
1195 {
1196 	drm_dp_aux_unregister(&intel_dp->aux);
1197 	kfree(intel_dp->aux.name);
1198 }
1199 
1200 static int
1201 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1202 {
1203 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1204 	enum port port = intel_dig_port->port;
1205 	int ret;
1206 
1207 	intel_aux_reg_init(intel_dp);
1208 
1209 	intel_dp->aux.name = drm_asprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1210 	if (!intel_dp->aux.name)
1211 		return -ENOMEM;
1212 
1213 	intel_dp->aux.dev = connector->base.kdev;
1214 	intel_dp->aux.transfer = intel_dp_aux_transfer;
1215 
1216 #if 0
1217 	DRM_DEBUG_KMS("registering %s bus for %s\n",
1218 		      intel_dp->aux.name,
1219 		      connector->base.kdev->kobj.name);
1220 #endif
1221 
1222 	ret = drm_dp_aux_register(&intel_dp->aux);
1223 	if (ret < 0) {
1224 		DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1225 			  intel_dp->aux.name, ret);
1226 		kfree(intel_dp->aux.name);
1227 		return ret;
1228 	}
1229 
1230 	return 0;
1231 }
1232 
1233 static void
1234 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1235 {
1236 	struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1237 
1238 	intel_dp_aux_fini(intel_dp);
1239 	intel_connector_unregister(intel_connector);
1240 }
1241 
1242 static void
1243 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
1244 {
1245 	u32 ctrl1;
1246 
1247 	memset(&pipe_config->dpll_hw_state, 0,
1248 	       sizeof(pipe_config->dpll_hw_state));
1249 
1250 	pipe_config->ddi_pll_sel = SKL_DPLL0;
1251 	pipe_config->dpll_hw_state.cfgcr1 = 0;
1252 	pipe_config->dpll_hw_state.cfgcr2 = 0;
1253 
1254 	ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1255 	switch (pipe_config->port_clock / 2) {
1256 	case 81000:
1257 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1258 					      SKL_DPLL0);
1259 		break;
1260 	case 135000:
1261 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1262 					      SKL_DPLL0);
1263 		break;
1264 	case 270000:
1265 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1266 					      SKL_DPLL0);
1267 		break;
1268 	case 162000:
1269 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1270 					      SKL_DPLL0);
1271 		break;
1272 	/* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1273 	results in CDCLK change. Need to handle the change of CDCLK by
1274 	disabling pipes and re-enabling them */
1275 	case 108000:
1276 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1277 					      SKL_DPLL0);
1278 		break;
1279 	case 216000:
1280 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1281 					      SKL_DPLL0);
1282 		break;
1283 
1284 	}
1285 	pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1286 }
1287 
1288 void
1289 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
1290 {
1291 	memset(&pipe_config->dpll_hw_state, 0,
1292 	       sizeof(pipe_config->dpll_hw_state));
1293 
1294 	switch (pipe_config->port_clock / 2) {
1295 	case 81000:
1296 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1297 		break;
1298 	case 135000:
1299 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1300 		break;
1301 	case 270000:
1302 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1303 		break;
1304 	}
1305 }
1306 
1307 static int
1308 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1309 {
1310 	if (intel_dp->num_sink_rates) {
1311 		*sink_rates = intel_dp->sink_rates;
1312 		return intel_dp->num_sink_rates;
1313 	}
1314 
1315 	*sink_rates = default_rates;
1316 
1317 	return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1318 }
1319 
1320 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1321 {
1322 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1323 	struct drm_device *dev = dig_port->base.base.dev;
1324 
1325 	/* WaDisableHBR2:skl */
1326 	if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
1327 		return false;
1328 
1329 	if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1330 	    (INTEL_INFO(dev)->gen >= 9))
1331 		return true;
1332 	else
1333 		return false;
1334 }
1335 
1336 static int
1337 intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
1338 {
1339 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1340 	struct drm_device *dev = dig_port->base.base.dev;
1341 	int size;
1342 
1343 	if (IS_BROXTON(dev)) {
1344 		*source_rates = bxt_rates;
1345 		size = ARRAY_SIZE(bxt_rates);
1346 	} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
1347 		*source_rates = skl_rates;
1348 		size = ARRAY_SIZE(skl_rates);
1349 	} else {
1350 		*source_rates = default_rates;
1351 		size = ARRAY_SIZE(default_rates);
1352 	}
1353 
1354 	/* This depends on the fact that 5.4 is last value in the array */
1355 	if (!intel_dp_source_supports_hbr2(intel_dp))
1356 		size--;
1357 
1358 	return size;
1359 }
1360 
1361 static void
1362 intel_dp_set_clock(struct intel_encoder *encoder,
1363 		   struct intel_crtc_state *pipe_config)
1364 {
1365 	struct drm_device *dev = encoder->base.dev;
1366 	const struct dp_link_dpll *divisor = NULL;
1367 	int i, count = 0;
1368 
1369 	if (IS_G4X(dev)) {
1370 		divisor = gen4_dpll;
1371 		count = ARRAY_SIZE(gen4_dpll);
1372 	} else if (HAS_PCH_SPLIT(dev)) {
1373 		divisor = pch_dpll;
1374 		count = ARRAY_SIZE(pch_dpll);
1375 	} else if (IS_CHERRYVIEW(dev)) {
1376 		divisor = chv_dpll;
1377 		count = ARRAY_SIZE(chv_dpll);
1378 	} else if (IS_VALLEYVIEW(dev)) {
1379 		divisor = vlv_dpll;
1380 		count = ARRAY_SIZE(vlv_dpll);
1381 	}
1382 
1383 	if (divisor && count) {
1384 		for (i = 0; i < count; i++) {
1385 			if (pipe_config->port_clock == divisor[i].clock) {
1386 				pipe_config->dpll = divisor[i].dpll;
1387 				pipe_config->clock_set = true;
1388 				break;
1389 			}
1390 		}
1391 	}
1392 }
1393 
1394 static int intersect_rates(const int *source_rates, int source_len,
1395 			   const int *sink_rates, int sink_len,
1396 			   int *common_rates)
1397 {
1398 	int i = 0, j = 0, k = 0;
1399 
1400 	while (i < source_len && j < sink_len) {
1401 		if (source_rates[i] == sink_rates[j]) {
1402 			if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1403 				return k;
1404 			common_rates[k] = source_rates[i];
1405 			++k;
1406 			++i;
1407 			++j;
1408 		} else if (source_rates[i] < sink_rates[j]) {
1409 			++i;
1410 		} else {
1411 			++j;
1412 		}
1413 	}
1414 	return k;
1415 }
1416 
1417 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1418 				 int *common_rates)
1419 {
1420 	const int *source_rates, *sink_rates;
1421 	int source_len, sink_len;
1422 
1423 	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1424 	source_len = intel_dp_source_rates(intel_dp, &source_rates);
1425 
1426 	return intersect_rates(source_rates, source_len,
1427 			       sink_rates, sink_len,
1428 			       common_rates);
1429 }
1430 
1431 static void snprintf_int_array(char *str, size_t len,
1432 			       const int *array, int nelem)
1433 {
1434 	int i;
1435 
1436 	str[0] = '\0';
1437 
1438 	for (i = 0; i < nelem; i++) {
1439 		int r = ksnprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1440 		if (r >= len)
1441 			return;
1442 		str += r;
1443 		len -= r;
1444 	}
1445 }
1446 
1447 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1448 {
1449 	const int *source_rates, *sink_rates;
1450 	int source_len, sink_len, common_len;
1451 	int common_rates[DP_MAX_SUPPORTED_RATES];
1452 	char str[128]; /* FIXME: too big for stack? */
1453 
1454 	if ((drm_debug & DRM_UT_KMS) == 0)
1455 		return;
1456 
1457 	source_len = intel_dp_source_rates(intel_dp, &source_rates);
1458 	snprintf_int_array(str, sizeof(str), source_rates, source_len);
1459 	DRM_DEBUG_KMS("source rates: %s\n", str);
1460 
1461 	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1462 	snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1463 	DRM_DEBUG_KMS("sink rates: %s\n", str);
1464 
1465 	common_len = intel_dp_common_rates(intel_dp, common_rates);
1466 	snprintf_int_array(str, sizeof(str), common_rates, common_len);
1467 	DRM_DEBUG_KMS("common rates: %s\n", str);
1468 }
1469 
1470 static int rate_to_index(int find, const int *rates)
1471 {
1472 	int i = 0;
1473 
1474 	for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1475 		if (find == rates[i])
1476 			break;
1477 
1478 	return i;
1479 }
1480 
1481 int
1482 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1483 {
1484 	int rates[DP_MAX_SUPPORTED_RATES] = {};
1485 	int len;
1486 
1487 	len = intel_dp_common_rates(intel_dp, rates);
1488 	if (WARN_ON(len <= 0))
1489 		return 162000;
1490 
1491 	return rates[rate_to_index(0, rates) - 1];
1492 }
1493 
1494 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1495 {
1496 	return rate_to_index(rate, intel_dp->sink_rates);
1497 }
1498 
1499 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1500 			   uint8_t *link_bw, uint8_t *rate_select)
1501 {
1502 	if (intel_dp->num_sink_rates) {
1503 		*link_bw = 0;
1504 		*rate_select =
1505 			intel_dp_rate_select(intel_dp, port_clock);
1506 	} else {
1507 		*link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1508 		*rate_select = 0;
1509 	}
1510 }
1511 
1512 bool
1513 intel_dp_compute_config(struct intel_encoder *encoder,
1514 			struct intel_crtc_state *pipe_config)
1515 {
1516 	struct drm_device *dev = encoder->base.dev;
1517 	struct drm_i915_private *dev_priv = dev->dev_private;
1518 	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1519 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1520 	enum port port = dp_to_dig_port(intel_dp)->port;
1521 	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1522 	struct intel_connector *intel_connector = intel_dp->attached_connector;
1523 	int lane_count, clock;
1524 	int min_lane_count = 1;
1525 	int max_lane_count = intel_dp_max_lane_count(intel_dp);
1526 	/* Conveniently, the link BW constants become indices with a shift...*/
1527 	int min_clock = 0;
1528 	int max_clock;
1529 	int bpp, mode_rate;
1530 	int link_avail, link_clock;
1531 	int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1532 	int common_len;
1533 	uint8_t link_bw, rate_select;
1534 
1535 	common_len = intel_dp_common_rates(intel_dp, common_rates);
1536 
1537 	/* No common link rates between source and sink */
1538 	WARN_ON(common_len <= 0);
1539 
1540 	max_clock = common_len - 1;
1541 
1542 	if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1543 		pipe_config->has_pch_encoder = true;
1544 
1545 	pipe_config->has_dp_encoder = true;
1546 	pipe_config->has_drrs = false;
1547 	pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1548 
1549 	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1550 		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1551 				       adjusted_mode);
1552 
1553 		if (INTEL_INFO(dev)->gen >= 9) {
1554 			int ret;
1555 			ret = skl_update_scaler_crtc(pipe_config);
1556 			if (ret)
1557 				return ret;
1558 		}
1559 
1560 		if (HAS_GMCH_DISPLAY(dev))
1561 			intel_gmch_panel_fitting(intel_crtc, pipe_config,
1562 						 intel_connector->panel.fitting_mode);
1563 		else
1564 			intel_pch_panel_fitting(intel_crtc, pipe_config,
1565 						intel_connector->panel.fitting_mode);
1566 	}
1567 
1568 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1569 		return false;
1570 
1571 	DRM_DEBUG_KMS("DP link computation with max lane count %i "
1572 		      "max bw %d pixel clock %iKHz\n",
1573 		      max_lane_count, common_rates[max_clock],
1574 		      adjusted_mode->crtc_clock);
1575 
1576 	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
1577 	 * bpc in between. */
1578 	bpp = pipe_config->pipe_bpp;
1579 	if (is_edp(intel_dp)) {
1580 
1581 		/* Get bpp from vbt only for panels that dont have bpp in edid */
1582 		if (intel_connector->base.display_info.bpc == 0 &&
1583 			(dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1584 			DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1585 				      dev_priv->vbt.edp_bpp);
1586 			bpp = dev_priv->vbt.edp_bpp;
1587 		}
1588 
1589 		/*
1590 		 * Use the maximum clock and number of lanes the eDP panel
1591 		 * advertizes being capable of. The panels are generally
1592 		 * designed to support only a single clock and lane
1593 		 * configuration, and typically these values correspond to the
1594 		 * native resolution of the panel.
1595 		 */
1596 		min_lane_count = max_lane_count;
1597 		min_clock = max_clock;
1598 	}
1599 
1600 	for (; bpp >= 6*3; bpp -= 2*3) {
1601 		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1602 						   bpp);
1603 
1604 		for (clock = min_clock; clock <= max_clock; clock++) {
1605 			for (lane_count = min_lane_count;
1606 				lane_count <= max_lane_count;
1607 				lane_count <<= 1) {
1608 
1609 				link_clock = common_rates[clock];
1610 				link_avail = intel_dp_max_data_rate(link_clock,
1611 								    lane_count);
1612 
1613 				if (mode_rate <= link_avail) {
1614 					goto found;
1615 				}
1616 			}
1617 		}
1618 	}
1619 
1620 	return false;
1621 
1622 found:
1623 	if (intel_dp->color_range_auto) {
1624 		/*
1625 		 * See:
1626 		 * CEA-861-E - 5.1 Default Encoding Parameters
1627 		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1628 		 */
1629 		pipe_config->limited_color_range =
1630 			bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1631 	} else {
1632 		pipe_config->limited_color_range =
1633 			intel_dp->limited_color_range;
1634 	}
1635 
1636 	pipe_config->lane_count = lane_count;
1637 
1638 	pipe_config->pipe_bpp = bpp;
1639 	pipe_config->port_clock = common_rates[clock];
1640 
1641 	intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1642 			      &link_bw, &rate_select);
1643 
1644 	DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1645 		      link_bw, rate_select, pipe_config->lane_count,
1646 		      pipe_config->port_clock, bpp);
1647 	DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1648 		      mode_rate, link_avail);
1649 
1650 	intel_link_compute_m_n(bpp, lane_count,
1651 			       adjusted_mode->crtc_clock,
1652 			       pipe_config->port_clock,
1653 			       &pipe_config->dp_m_n);
1654 
1655 	if (intel_connector->panel.downclock_mode != NULL &&
1656 		dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1657 			pipe_config->has_drrs = true;
1658 			intel_link_compute_m_n(bpp, lane_count,
1659 				intel_connector->panel.downclock_mode->clock,
1660 				pipe_config->port_clock,
1661 				&pipe_config->dp_m2_n2);
1662 	}
1663 
1664 	if ((IS_SKYLAKE(dev)  || IS_KABYLAKE(dev)) && is_edp(intel_dp))
1665 		skl_edp_set_pll_config(pipe_config);
1666 	else if (IS_BROXTON(dev))
1667 		/* handled in ddi */;
1668 	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1669 		hsw_dp_set_ddi_pll_sel(pipe_config);
1670 	else
1671 		intel_dp_set_clock(encoder, pipe_config);
1672 
1673 	return true;
1674 }
1675 
1676 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1677 			      const struct intel_crtc_state *pipe_config)
1678 {
1679 	intel_dp->link_rate = pipe_config->port_clock;
1680 	intel_dp->lane_count = pipe_config->lane_count;
1681 }
1682 
1683 static void intel_dp_prepare(struct intel_encoder *encoder)
1684 {
1685 	struct drm_device *dev = encoder->base.dev;
1686 	struct drm_i915_private *dev_priv = dev->dev_private;
1687 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1688 	enum port port = dp_to_dig_port(intel_dp)->port;
1689 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1690 	const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1691 
1692 	intel_dp_set_link_params(intel_dp, crtc->config);
1693 
1694 	/*
1695 	 * There are four kinds of DP registers:
1696 	 *
1697 	 * 	IBX PCH
1698 	 * 	SNB CPU
1699 	 *	IVB CPU
1700 	 * 	CPT PCH
1701 	 *
1702 	 * IBX PCH and CPU are the same for almost everything,
1703 	 * except that the CPU DP PLL is configured in this
1704 	 * register
1705 	 *
1706 	 * CPT PCH is quite different, having many bits moved
1707 	 * to the TRANS_DP_CTL register instead. That
1708 	 * configuration happens (oddly) in ironlake_pch_enable
1709 	 */
1710 
1711 	/* Preserve the BIOS-computed detected bit. This is
1712 	 * supposed to be read-only.
1713 	 */
1714 	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1715 
1716 	/* Handle DP bits in common between all three register formats */
1717 	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1718 	intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1719 
1720 	/* Split out the IBX/CPU vs CPT settings */
1721 
1722 	if (IS_GEN7(dev) && port == PORT_A) {
1723 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1724 			intel_dp->DP |= DP_SYNC_HS_HIGH;
1725 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1726 			intel_dp->DP |= DP_SYNC_VS_HIGH;
1727 		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1728 
1729 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1730 			intel_dp->DP |= DP_ENHANCED_FRAMING;
1731 
1732 		intel_dp->DP |= crtc->pipe << 29;
1733 	} else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1734 		u32 trans_dp;
1735 
1736 		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1737 
1738 		trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1739 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1740 			trans_dp |= TRANS_DP_ENH_FRAMING;
1741 		else
1742 			trans_dp &= ~TRANS_DP_ENH_FRAMING;
1743 		I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1744 	} else {
1745 		if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1746 		    !IS_CHERRYVIEW(dev) && crtc->config->limited_color_range)
1747 			intel_dp->DP |= DP_COLOR_RANGE_16_235;
1748 
1749 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1750 			intel_dp->DP |= DP_SYNC_HS_HIGH;
1751 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1752 			intel_dp->DP |= DP_SYNC_VS_HIGH;
1753 		intel_dp->DP |= DP_LINK_TRAIN_OFF;
1754 
1755 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1756 			intel_dp->DP |= DP_ENHANCED_FRAMING;
1757 
1758 		if (IS_CHERRYVIEW(dev))
1759 			intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1760 		else if (crtc->pipe == PIPE_B)
1761 			intel_dp->DP |= DP_PIPEB_SELECT;
1762 	}
1763 }
1764 
1765 #define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1766 #define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1767 
1768 #define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1769 #define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
1770 
1771 #define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1772 #define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1773 
1774 static void wait_panel_status(struct intel_dp *intel_dp,
1775 				       u32 mask,
1776 				       u32 value)
1777 {
1778 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1779 	struct drm_i915_private *dev_priv = dev->dev_private;
1780 	i915_reg_t pp_stat_reg, pp_ctrl_reg;
1781 
1782 	lockdep_assert_held(&dev_priv->pps_mutex);
1783 
1784 	pp_stat_reg = _pp_stat_reg(intel_dp);
1785 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1786 
1787 	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1788 			mask, value,
1789 			I915_READ(pp_stat_reg),
1790 			I915_READ(pp_ctrl_reg));
1791 
1792 	if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1793 		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1794 				I915_READ(pp_stat_reg),
1795 				I915_READ(pp_ctrl_reg));
1796 	}
1797 
1798 	DRM_DEBUG_KMS("Wait complete\n");
1799 }
1800 
1801 static void wait_panel_on(struct intel_dp *intel_dp)
1802 {
1803 	DRM_DEBUG_KMS("Wait for panel power on\n");
1804 	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1805 }
1806 
1807 static void wait_panel_off(struct intel_dp *intel_dp)
1808 {
1809 	DRM_DEBUG_KMS("Wait for panel power off time\n");
1810 	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1811 }
1812 
1813 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1814 {
1815 	ktime_t panel_power_on_time;
1816 	s64 panel_power_off_duration;
1817 
1818 	DRM_DEBUG_KMS("Wait for panel power cycle\n");
1819 
1820 	/* take the difference of currrent time and panel power off time
1821 	 * and then make panel wait for t11_t12 if needed. */
1822 	panel_power_on_time = ktime_get_boottime();
1823 	panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
1824 
1825 	/* When we disable the VDD override bit last we have to do the manual
1826 	 * wait. */
1827 	if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
1828 		wait_remaining_ms_from_jiffies(jiffies,
1829 				       intel_dp->panel_power_cycle_delay - panel_power_off_duration);
1830 
1831 	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1832 }
1833 
1834 static void wait_backlight_on(struct intel_dp *intel_dp)
1835 {
1836 	wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1837 				       intel_dp->backlight_on_delay);
1838 }
1839 
1840 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1841 {
1842 	wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1843 				       intel_dp->backlight_off_delay);
1844 }
1845 
1846 /* Read the current pp_control value, unlocking the register if it
1847  * is locked
1848  */
1849 
1850 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1851 {
1852 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1853 	struct drm_i915_private *dev_priv = dev->dev_private;
1854 	u32 control;
1855 
1856 	lockdep_assert_held(&dev_priv->pps_mutex);
1857 
1858 	control = I915_READ(_pp_ctrl_reg(intel_dp));
1859 	if (!IS_BROXTON(dev)) {
1860 		control &= ~PANEL_UNLOCK_MASK;
1861 		control |= PANEL_UNLOCK_REGS;
1862 	}
1863 	return control;
1864 }
1865 
1866 /*
1867  * Must be paired with edp_panel_vdd_off().
1868  * Must hold pps_mutex around the whole on/off sequence.
1869  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1870  */
1871 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1872 {
1873 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1874 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1875 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1876 	struct drm_i915_private *dev_priv = dev->dev_private;
1877 	enum intel_display_power_domain power_domain;
1878 	u32 pp;
1879 	i915_reg_t pp_stat_reg, pp_ctrl_reg;
1880 	bool need_to_disable = !intel_dp->want_panel_vdd;
1881 
1882 	lockdep_assert_held(&dev_priv->pps_mutex);
1883 
1884 	if (!is_edp(intel_dp))
1885 		return false;
1886 
1887 	cancel_delayed_work(&intel_dp->panel_vdd_work);
1888 	intel_dp->want_panel_vdd = true;
1889 
1890 	if (edp_have_panel_vdd(intel_dp))
1891 		return need_to_disable;
1892 
1893 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
1894 	intel_display_power_get(dev_priv, power_domain);
1895 
1896 	DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1897 		      port_name(intel_dig_port->port));
1898 
1899 	if (!edp_have_panel_power(intel_dp))
1900 		wait_panel_power_cycle(intel_dp);
1901 
1902 	pp = ironlake_get_pp_control(intel_dp);
1903 	pp |= EDP_FORCE_VDD;
1904 
1905 	pp_stat_reg = _pp_stat_reg(intel_dp);
1906 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1907 
1908 	I915_WRITE(pp_ctrl_reg, pp);
1909 	POSTING_READ(pp_ctrl_reg);
1910 	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1911 			I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1912 	/*
1913 	 * If the panel wasn't on, delay before accessing aux channel
1914 	 */
1915 	if (!edp_have_panel_power(intel_dp)) {
1916 		DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1917 			      port_name(intel_dig_port->port));
1918 		msleep(intel_dp->panel_power_up_delay);
1919 	}
1920 
1921 	return need_to_disable;
1922 }
1923 
1924 /*
1925  * Must be paired with intel_edp_panel_vdd_off() or
1926  * intel_edp_panel_off().
1927  * Nested calls to these functions are not allowed since
1928  * we drop the lock. Caller must use some higher level
1929  * locking to prevent nested calls from other threads.
1930  */
1931 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1932 {
1933 	bool vdd;
1934 
1935 	if (!is_edp(intel_dp))
1936 		return;
1937 
1938 	pps_lock(intel_dp);
1939 	vdd = edp_panel_vdd_on(intel_dp);
1940 	pps_unlock(intel_dp);
1941 
1942 #ifdef __DragonFly__
1943 /* XXX: limit dmesg spam to 16 warnings instead of 137, where is the bug? */
1944 	if(!vdd)
1945 		DRM_ERROR_RATELIMITED("eDP port %c VDD already requested on\n",
1946 		    port_name(dp_to_dig_port(intel_dp)->port));
1947 #else
1948 	I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1949 	     port_name(dp_to_dig_port(intel_dp)->port));
1950 #endif
1951 }
1952 
1953 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1954 {
1955 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1956 	struct drm_i915_private *dev_priv = dev->dev_private;
1957 	struct intel_digital_port *intel_dig_port =
1958 		dp_to_dig_port(intel_dp);
1959 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1960 	enum intel_display_power_domain power_domain;
1961 	u32 pp;
1962 	i915_reg_t pp_stat_reg, pp_ctrl_reg;
1963 
1964 	lockdep_assert_held(&dev_priv->pps_mutex);
1965 
1966 	WARN_ON(intel_dp->want_panel_vdd);
1967 
1968 	if (!edp_have_panel_vdd(intel_dp))
1969 		return;
1970 
1971 	DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1972 		      port_name(intel_dig_port->port));
1973 
1974 	pp = ironlake_get_pp_control(intel_dp);
1975 	pp &= ~EDP_FORCE_VDD;
1976 
1977 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1978 	pp_stat_reg = _pp_stat_reg(intel_dp);
1979 
1980 	I915_WRITE(pp_ctrl_reg, pp);
1981 	POSTING_READ(pp_ctrl_reg);
1982 
1983 	/* Make sure sequencer is idle before allowing subsequent activity */
1984 	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1985 	I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1986 
1987 	if ((pp & POWER_TARGET_ON) == 0)
1988 		intel_dp->panel_power_off_time = ktime_get_boottime();
1989 
1990 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
1991 	intel_display_power_put(dev_priv, power_domain);
1992 }
1993 
1994 static void edp_panel_vdd_work(struct work_struct *__work)
1995 {
1996 	struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1997 						 struct intel_dp, panel_vdd_work);
1998 
1999 	pps_lock(intel_dp);
2000 	if (!intel_dp->want_panel_vdd)
2001 		edp_panel_vdd_off_sync(intel_dp);
2002 	pps_unlock(intel_dp);
2003 }
2004 
2005 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2006 {
2007 	unsigned long delay;
2008 
2009 	/*
2010 	 * Queue the timer to fire a long time from now (relative to the power
2011 	 * down delay) to keep the panel power up across a sequence of
2012 	 * operations.
2013 	 */
2014 	delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2015 	schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2016 }
2017 
2018 /*
2019  * Must be paired with edp_panel_vdd_on().
2020  * Must hold pps_mutex around the whole on/off sequence.
2021  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2022  */
2023 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2024 {
2025 	struct drm_i915_private *dev_priv =
2026 		intel_dp_to_dev(intel_dp)->dev_private;
2027 
2028 	lockdep_assert_held(&dev_priv->pps_mutex);
2029 
2030 	if (!is_edp(intel_dp))
2031 		return;
2032 
2033 	I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2034 	     port_name(dp_to_dig_port(intel_dp)->port));
2035 
2036 	intel_dp->want_panel_vdd = false;
2037 
2038 	if (sync)
2039 		edp_panel_vdd_off_sync(intel_dp);
2040 	else
2041 		edp_panel_vdd_schedule_off(intel_dp);
2042 }
2043 
2044 static void edp_panel_on(struct intel_dp *intel_dp)
2045 {
2046 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2047 	struct drm_i915_private *dev_priv = dev->dev_private;
2048 	u32 pp;
2049 	i915_reg_t pp_ctrl_reg;
2050 
2051 	lockdep_assert_held(&dev_priv->pps_mutex);
2052 
2053 	if (!is_edp(intel_dp))
2054 		return;
2055 
2056 	DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2057 		      port_name(dp_to_dig_port(intel_dp)->port));
2058 
2059 	if (WARN(edp_have_panel_power(intel_dp),
2060 		 "eDP port %c panel power already on\n",
2061 		 port_name(dp_to_dig_port(intel_dp)->port)))
2062 		return;
2063 
2064 	wait_panel_power_cycle(intel_dp);
2065 
2066 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2067 	pp = ironlake_get_pp_control(intel_dp);
2068 	if (IS_GEN5(dev)) {
2069 		/* ILK workaround: disable reset around power sequence */
2070 		pp &= ~PANEL_POWER_RESET;
2071 		I915_WRITE(pp_ctrl_reg, pp);
2072 		POSTING_READ(pp_ctrl_reg);
2073 	}
2074 
2075 	pp |= POWER_TARGET_ON;
2076 	if (!IS_GEN5(dev))
2077 		pp |= PANEL_POWER_RESET;
2078 
2079 	I915_WRITE(pp_ctrl_reg, pp);
2080 	POSTING_READ(pp_ctrl_reg);
2081 
2082 	wait_panel_on(intel_dp);
2083 	intel_dp->last_power_on = jiffies;
2084 
2085 	if (IS_GEN5(dev)) {
2086 		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2087 		I915_WRITE(pp_ctrl_reg, pp);
2088 		POSTING_READ(pp_ctrl_reg);
2089 	}
2090 }
2091 
2092 void intel_edp_panel_on(struct intel_dp *intel_dp)
2093 {
2094 	if (!is_edp(intel_dp))
2095 		return;
2096 
2097 	pps_lock(intel_dp);
2098 	edp_panel_on(intel_dp);
2099 	pps_unlock(intel_dp);
2100 }
2101 
2102 
2103 static void edp_panel_off(struct intel_dp *intel_dp)
2104 {
2105 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2106 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
2107 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2108 	struct drm_i915_private *dev_priv = dev->dev_private;
2109 	enum intel_display_power_domain power_domain;
2110 	u32 pp;
2111 	i915_reg_t pp_ctrl_reg;
2112 
2113 	lockdep_assert_held(&dev_priv->pps_mutex);
2114 
2115 	if (!is_edp(intel_dp))
2116 		return;
2117 
2118 	DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2119 		      port_name(dp_to_dig_port(intel_dp)->port));
2120 
2121 	WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2122 	     port_name(dp_to_dig_port(intel_dp)->port));
2123 
2124 	pp = ironlake_get_pp_control(intel_dp);
2125 	/* We need to switch off panel power _and_ force vdd, for otherwise some
2126 	 * panels get very unhappy and cease to work. */
2127 	pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2128 		EDP_BLC_ENABLE);
2129 
2130 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2131 
2132 	intel_dp->want_panel_vdd = false;
2133 
2134 	I915_WRITE(pp_ctrl_reg, pp);
2135 	POSTING_READ(pp_ctrl_reg);
2136 
2137 	intel_dp->panel_power_off_time = ktime_get_boottime();
2138 	wait_panel_off(intel_dp);
2139 
2140 	/* We got a reference when we enabled the VDD. */
2141 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
2142 	intel_display_power_put(dev_priv, power_domain);
2143 }
2144 
2145 void intel_edp_panel_off(struct intel_dp *intel_dp)
2146 {
2147 	if (!is_edp(intel_dp))
2148 		return;
2149 
2150 	pps_lock(intel_dp);
2151 	edp_panel_off(intel_dp);
2152 	pps_unlock(intel_dp);
2153 }
2154 
2155 /* Enable backlight in the panel power control. */
2156 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2157 {
2158 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2159 	struct drm_device *dev = intel_dig_port->base.base.dev;
2160 	struct drm_i915_private *dev_priv = dev->dev_private;
2161 	u32 pp;
2162 	i915_reg_t pp_ctrl_reg;
2163 
2164 	/*
2165 	 * If we enable the backlight right away following a panel power
2166 	 * on, we may see slight flicker as the panel syncs with the eDP
2167 	 * link.  So delay a bit to make sure the image is solid before
2168 	 * allowing it to appear.
2169 	 */
2170 	wait_backlight_on(intel_dp);
2171 
2172 	pps_lock(intel_dp);
2173 
2174 	pp = ironlake_get_pp_control(intel_dp);
2175 	pp |= EDP_BLC_ENABLE;
2176 
2177 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2178 
2179 	I915_WRITE(pp_ctrl_reg, pp);
2180 	POSTING_READ(pp_ctrl_reg);
2181 
2182 	pps_unlock(intel_dp);
2183 }
2184 
2185 /* Enable backlight PWM and backlight PP control. */
2186 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2187 {
2188 	if (!is_edp(intel_dp))
2189 		return;
2190 
2191 	DRM_DEBUG_KMS("\n");
2192 
2193 	intel_panel_enable_backlight(intel_dp->attached_connector);
2194 	_intel_edp_backlight_on(intel_dp);
2195 }
2196 
2197 /* Disable backlight in the panel power control. */
2198 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2199 {
2200 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2201 	struct drm_i915_private *dev_priv = dev->dev_private;
2202 	u32 pp;
2203 	i915_reg_t pp_ctrl_reg;
2204 
2205 	if (!is_edp(intel_dp))
2206 		return;
2207 
2208 	pps_lock(intel_dp);
2209 
2210 	pp = ironlake_get_pp_control(intel_dp);
2211 	pp &= ~EDP_BLC_ENABLE;
2212 
2213 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2214 
2215 	I915_WRITE(pp_ctrl_reg, pp);
2216 	POSTING_READ(pp_ctrl_reg);
2217 
2218 	pps_unlock(intel_dp);
2219 
2220 	intel_dp->last_backlight_off = jiffies;
2221 	edp_wait_backlight_off(intel_dp);
2222 }
2223 
2224 /* Disable backlight PP control and backlight PWM. */
2225 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2226 {
2227 	if (!is_edp(intel_dp))
2228 		return;
2229 
2230 	DRM_DEBUG_KMS("\n");
2231 
2232 	_intel_edp_backlight_off(intel_dp);
2233 	intel_panel_disable_backlight(intel_dp->attached_connector);
2234 }
2235 
2236 /*
2237  * Hook for controlling the panel power control backlight through the bl_power
2238  * sysfs attribute. Take care to handle multiple calls.
2239  */
2240 static void intel_edp_backlight_power(struct intel_connector *connector,
2241 				      bool enable)
2242 {
2243 	struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2244 	bool is_enabled;
2245 
2246 	pps_lock(intel_dp);
2247 	is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2248 	pps_unlock(intel_dp);
2249 
2250 	if (is_enabled == enable)
2251 		return;
2252 
2253 	DRM_DEBUG_KMS("panel power control backlight %s\n",
2254 		      enable ? "enable" : "disable");
2255 
2256 	if (enable)
2257 		_intel_edp_backlight_on(intel_dp);
2258 	else
2259 		_intel_edp_backlight_off(intel_dp);
2260 }
2261 
2262 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2263 {
2264 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2265 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2266 	bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2267 
2268 	I915_STATE_WARN(cur_state != state,
2269 			"DP port %c state assertion failure (expected %s, current %s)\n",
2270 			port_name(dig_port->port),
2271 			onoff(state), onoff(cur_state));
2272 }
2273 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2274 
2275 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2276 {
2277 	bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2278 
2279 	I915_STATE_WARN(cur_state != state,
2280 			"eDP PLL state assertion failure (expected %s, current %s)\n",
2281 			onoff(state), onoff(cur_state));
2282 }
2283 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2284 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2285 
2286 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2287 {
2288 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2289 	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2290 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2291 
2292 	assert_pipe_disabled(dev_priv, crtc->pipe);
2293 	assert_dp_port_disabled(intel_dp);
2294 	assert_edp_pll_disabled(dev_priv);
2295 
2296 	DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2297 		      crtc->config->port_clock);
2298 
2299 	intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2300 
2301 	if (crtc->config->port_clock == 162000)
2302 		intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2303 	else
2304 		intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2305 
2306 	I915_WRITE(DP_A, intel_dp->DP);
2307 	POSTING_READ(DP_A);
2308 	udelay(500);
2309 
2310 	intel_dp->DP |= DP_PLL_ENABLE;
2311 
2312 	I915_WRITE(DP_A, intel_dp->DP);
2313 	POSTING_READ(DP_A);
2314 	udelay(200);
2315 }
2316 
2317 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2318 {
2319 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2320 	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2321 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2322 
2323 	assert_pipe_disabled(dev_priv, crtc->pipe);
2324 	assert_dp_port_disabled(intel_dp);
2325 	assert_edp_pll_enabled(dev_priv);
2326 
2327 	DRM_DEBUG_KMS("disabling eDP PLL\n");
2328 
2329 	intel_dp->DP &= ~DP_PLL_ENABLE;
2330 
2331 	I915_WRITE(DP_A, intel_dp->DP);
2332 	POSTING_READ(DP_A);
2333 	udelay(200);
2334 }
2335 
2336 /* If the sink supports it, try to set the power state appropriately */
2337 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2338 {
2339 	int ret, i;
2340 
2341 	/* Should have a valid DPCD by this point */
2342 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2343 		return;
2344 
2345 	if (mode != DRM_MODE_DPMS_ON) {
2346 		ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2347 					 DP_SET_POWER_D3);
2348 	} else {
2349 		/*
2350 		 * When turning on, we need to retry for 1ms to give the sink
2351 		 * time to wake up.
2352 		 */
2353 		for (i = 0; i < 3; i++) {
2354 			ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2355 						 DP_SET_POWER_D0);
2356 			if (ret == 1)
2357 				break;
2358 			msleep(1);
2359 		}
2360 	}
2361 
2362 	if (ret != 1)
2363 		DRM_DEBUG_KMS("failed to %s sink power state\n",
2364 			      mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2365 }
2366 
2367 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2368 				  enum i915_pipe *pipe)
2369 {
2370 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2371 	enum port port = dp_to_dig_port(intel_dp)->port;
2372 	struct drm_device *dev = encoder->base.dev;
2373 	struct drm_i915_private *dev_priv = dev->dev_private;
2374 	enum intel_display_power_domain power_domain;
2375 	u32 tmp;
2376 	bool ret;
2377 
2378 	power_domain = intel_display_port_power_domain(encoder);
2379 	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
2380 		return false;
2381 
2382 	ret = false;
2383 
2384 	tmp = I915_READ(intel_dp->output_reg);
2385 
2386 	if (!(tmp & DP_PORT_EN))
2387 		goto out;
2388 
2389 	if (IS_GEN7(dev) && port == PORT_A) {
2390 		*pipe = PORT_TO_PIPE_CPT(tmp);
2391 	} else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2392 		enum i915_pipe p;
2393 
2394 		for_each_pipe(dev_priv, p) {
2395 			u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2396 			if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2397 				*pipe = p;
2398 				ret = true;
2399 
2400 				goto out;
2401 			}
2402 		}
2403 
2404 		DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2405 			      i915_mmio_reg_offset(intel_dp->output_reg));
2406 	} else if (IS_CHERRYVIEW(dev)) {
2407 		*pipe = DP_PORT_TO_PIPE_CHV(tmp);
2408 	} else {
2409 		*pipe = PORT_TO_PIPE(tmp);
2410 	}
2411 
2412 	ret = true;
2413 
2414 out:
2415 	intel_display_power_put(dev_priv, power_domain);
2416 
2417 	return ret;
2418 }
2419 
2420 static void intel_dp_get_config(struct intel_encoder *encoder,
2421 				struct intel_crtc_state *pipe_config)
2422 {
2423 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2424 	u32 tmp, flags = 0;
2425 	struct drm_device *dev = encoder->base.dev;
2426 	struct drm_i915_private *dev_priv = dev->dev_private;
2427 	enum port port = dp_to_dig_port(intel_dp)->port;
2428 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2429 	int dotclock;
2430 
2431 	tmp = I915_READ(intel_dp->output_reg);
2432 
2433 	pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2434 
2435 	if (HAS_PCH_CPT(dev) && port != PORT_A) {
2436 		u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2437 
2438 		if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2439 			flags |= DRM_MODE_FLAG_PHSYNC;
2440 		else
2441 			flags |= DRM_MODE_FLAG_NHSYNC;
2442 
2443 		if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2444 			flags |= DRM_MODE_FLAG_PVSYNC;
2445 		else
2446 			flags |= DRM_MODE_FLAG_NVSYNC;
2447 	} else {
2448 		if (tmp & DP_SYNC_HS_HIGH)
2449 			flags |= DRM_MODE_FLAG_PHSYNC;
2450 		else
2451 			flags |= DRM_MODE_FLAG_NHSYNC;
2452 
2453 		if (tmp & DP_SYNC_VS_HIGH)
2454 			flags |= DRM_MODE_FLAG_PVSYNC;
2455 		else
2456 			flags |= DRM_MODE_FLAG_NVSYNC;
2457 	}
2458 
2459 	pipe_config->base.adjusted_mode.flags |= flags;
2460 
2461 	if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2462 	    !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
2463 		pipe_config->limited_color_range = true;
2464 
2465 	pipe_config->has_dp_encoder = true;
2466 
2467 	pipe_config->lane_count =
2468 		((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2469 
2470 	intel_dp_get_m_n(crtc, pipe_config);
2471 
2472 	if (port == PORT_A) {
2473 		if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
2474 			pipe_config->port_clock = 162000;
2475 		else
2476 			pipe_config->port_clock = 270000;
2477 	}
2478 
2479 	dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2480 					    &pipe_config->dp_m_n);
2481 
2482 	if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2483 		ironlake_check_encoder_dotclock(pipe_config, dotclock);
2484 
2485 	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2486 
2487 	if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2488 	    pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2489 		/*
2490 		 * This is a big fat ugly hack.
2491 		 *
2492 		 * Some machines in UEFI boot mode provide us a VBT that has 18
2493 		 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2494 		 * unknown we fail to light up. Yet the same BIOS boots up with
2495 		 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2496 		 * max, not what it tells us to use.
2497 		 *
2498 		 * Note: This will still be broken if the eDP panel is not lit
2499 		 * up by the BIOS, and thus we can't get the mode at module
2500 		 * load.
2501 		 */
2502 		DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2503 			      pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2504 		dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2505 	}
2506 }
2507 
2508 static void intel_disable_dp(struct intel_encoder *encoder)
2509 {
2510 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2511 	struct drm_device *dev = encoder->base.dev;
2512 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2513 
2514 	if (crtc->config->has_audio)
2515 		intel_audio_codec_disable(encoder);
2516 
2517 	if (HAS_PSR(dev) && !HAS_DDI(dev))
2518 		intel_psr_disable(intel_dp);
2519 
2520 	/* Make sure the panel is off before trying to change the mode. But also
2521 	 * ensure that we have vdd while we switch off the panel. */
2522 	intel_edp_panel_vdd_on(intel_dp);
2523 	intel_edp_backlight_off(intel_dp);
2524 	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2525 	intel_edp_panel_off(intel_dp);
2526 
2527 	/* disable the port before the pipe on g4x */
2528 	if (INTEL_INFO(dev)->gen < 5)
2529 		intel_dp_link_down(intel_dp);
2530 }
2531 
2532 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2533 {
2534 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2535 	enum port port = dp_to_dig_port(intel_dp)->port;
2536 
2537 	intel_dp_link_down(intel_dp);
2538 
2539 	/* Only ilk+ has port A */
2540 	if (port == PORT_A)
2541 		ironlake_edp_pll_off(intel_dp);
2542 }
2543 
2544 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2545 {
2546 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2547 
2548 	intel_dp_link_down(intel_dp);
2549 }
2550 
2551 static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2552 				     bool reset)
2553 {
2554 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2555 	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2556 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2557 	enum i915_pipe pipe = crtc->pipe;
2558 	uint32_t val;
2559 
2560 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2561 	if (reset)
2562 		val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2563 	else
2564 		val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2565 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2566 
2567 	if (crtc->config->lane_count > 2) {
2568 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2569 		if (reset)
2570 			val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2571 		else
2572 			val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2573 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2574 	}
2575 
2576 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2577 	val |= CHV_PCS_REQ_SOFTRESET_EN;
2578 	if (reset)
2579 		val &= ~DPIO_PCS_CLK_SOFT_RESET;
2580 	else
2581 		val |= DPIO_PCS_CLK_SOFT_RESET;
2582 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2583 
2584 	if (crtc->config->lane_count > 2) {
2585 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2586 		val |= CHV_PCS_REQ_SOFTRESET_EN;
2587 		if (reset)
2588 			val &= ~DPIO_PCS_CLK_SOFT_RESET;
2589 		else
2590 			val |= DPIO_PCS_CLK_SOFT_RESET;
2591 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2592 	}
2593 }
2594 
2595 static void chv_post_disable_dp(struct intel_encoder *encoder)
2596 {
2597 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2598 	struct drm_device *dev = encoder->base.dev;
2599 	struct drm_i915_private *dev_priv = dev->dev_private;
2600 
2601 	intel_dp_link_down(intel_dp);
2602 
2603 	mutex_lock(&dev_priv->sb_lock);
2604 
2605 	/* Assert data lane reset */
2606 	chv_data_lane_soft_reset(encoder, true);
2607 
2608 	mutex_unlock(&dev_priv->sb_lock);
2609 }
2610 
2611 static void
2612 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2613 			 uint32_t *DP,
2614 			 uint8_t dp_train_pat)
2615 {
2616 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2617 	struct drm_device *dev = intel_dig_port->base.base.dev;
2618 	struct drm_i915_private *dev_priv = dev->dev_private;
2619 	enum port port = intel_dig_port->port;
2620 
2621 	if (HAS_DDI(dev)) {
2622 		uint32_t temp = I915_READ(DP_TP_CTL(port));
2623 
2624 		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2625 			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2626 		else
2627 			temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2628 
2629 		temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2630 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2631 		case DP_TRAINING_PATTERN_DISABLE:
2632 			temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2633 
2634 			break;
2635 		case DP_TRAINING_PATTERN_1:
2636 			temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2637 			break;
2638 		case DP_TRAINING_PATTERN_2:
2639 			temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2640 			break;
2641 		case DP_TRAINING_PATTERN_3:
2642 			temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2643 			break;
2644 		}
2645 		I915_WRITE(DP_TP_CTL(port), temp);
2646 
2647 	} else if ((IS_GEN7(dev) && port == PORT_A) ||
2648 		   (HAS_PCH_CPT(dev) && port != PORT_A)) {
2649 		*DP &= ~DP_LINK_TRAIN_MASK_CPT;
2650 
2651 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2652 		case DP_TRAINING_PATTERN_DISABLE:
2653 			*DP |= DP_LINK_TRAIN_OFF_CPT;
2654 			break;
2655 		case DP_TRAINING_PATTERN_1:
2656 			*DP |= DP_LINK_TRAIN_PAT_1_CPT;
2657 			break;
2658 		case DP_TRAINING_PATTERN_2:
2659 			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2660 			break;
2661 		case DP_TRAINING_PATTERN_3:
2662 			DRM_ERROR("DP training pattern 3 not supported\n");
2663 			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2664 			break;
2665 		}
2666 
2667 	} else {
2668 		if (IS_CHERRYVIEW(dev))
2669 			*DP &= ~DP_LINK_TRAIN_MASK_CHV;
2670 		else
2671 			*DP &= ~DP_LINK_TRAIN_MASK;
2672 
2673 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2674 		case DP_TRAINING_PATTERN_DISABLE:
2675 			*DP |= DP_LINK_TRAIN_OFF;
2676 			break;
2677 		case DP_TRAINING_PATTERN_1:
2678 			*DP |= DP_LINK_TRAIN_PAT_1;
2679 			break;
2680 		case DP_TRAINING_PATTERN_2:
2681 			*DP |= DP_LINK_TRAIN_PAT_2;
2682 			break;
2683 		case DP_TRAINING_PATTERN_3:
2684 			if (IS_CHERRYVIEW(dev)) {
2685 				*DP |= DP_LINK_TRAIN_PAT_3_CHV;
2686 			} else {
2687 				DRM_ERROR("DP training pattern 3 not supported\n");
2688 				*DP |= DP_LINK_TRAIN_PAT_2;
2689 			}
2690 			break;
2691 		}
2692 	}
2693 }
2694 
2695 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2696 {
2697 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2698 	struct drm_i915_private *dev_priv = dev->dev_private;
2699 	struct intel_crtc *crtc =
2700 		to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
2701 
2702 	/* enable with pattern 1 (as per spec) */
2703 	_intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2704 				 DP_TRAINING_PATTERN_1);
2705 
2706 	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2707 	POSTING_READ(intel_dp->output_reg);
2708 
2709 	/*
2710 	 * Magic for VLV/CHV. We _must_ first set up the register
2711 	 * without actually enabling the port, and then do another
2712 	 * write to enable the port. Otherwise link training will
2713 	 * fail when the power sequencer is freshly used for this port.
2714 	 */
2715 	intel_dp->DP |= DP_PORT_EN;
2716 	if (crtc->config->has_audio)
2717 		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
2718 
2719 	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2720 	POSTING_READ(intel_dp->output_reg);
2721 }
2722 
2723 static void intel_enable_dp(struct intel_encoder *encoder)
2724 {
2725 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2726 	struct drm_device *dev = encoder->base.dev;
2727 	struct drm_i915_private *dev_priv = dev->dev_private;
2728 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2729 	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2730 	enum port port = dp_to_dig_port(intel_dp)->port;
2731 	enum i915_pipe pipe = crtc->pipe;
2732 
2733 	if (WARN_ON(dp_reg & DP_PORT_EN))
2734 		return;
2735 
2736 	pps_lock(intel_dp);
2737 
2738 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
2739 		vlv_init_panel_power_sequencer(intel_dp);
2740 
2741 	/*
2742 	 * We get an occasional spurious underrun between the port
2743 	 * enable and vdd enable, when enabling port A eDP.
2744 	 *
2745 	 * FIXME: Not sure if this applies to (PCH) port D eDP as well
2746 	 */
2747 	if (port == PORT_A)
2748 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2749 
2750 	intel_dp_enable_port(intel_dp);
2751 
2752 	if (port == PORT_A && IS_GEN5(dev_priv)) {
2753 		/*
2754 		 * Underrun reporting for the other pipe was disabled in
2755 		 * g4x_pre_enable_dp(). The eDP PLL and port have now been
2756 		 * enabled, so it's now safe to re-enable underrun reporting.
2757 		 */
2758 		intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2759 		intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2760 		intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
2761 	}
2762 
2763 	edp_panel_vdd_on(intel_dp);
2764 	edp_panel_on(intel_dp);
2765 	edp_panel_vdd_off(intel_dp, true);
2766 
2767 	if (port == PORT_A)
2768 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2769 
2770 	pps_unlock(intel_dp);
2771 
2772 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
2773 		unsigned int lane_mask = 0x0;
2774 
2775 		if (IS_CHERRYVIEW(dev))
2776 			lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2777 
2778 		vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2779 				    lane_mask);
2780 	}
2781 
2782 	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2783 	intel_dp_start_link_train(intel_dp);
2784 	intel_dp_stop_link_train(intel_dp);
2785 
2786 	if (crtc->config->has_audio) {
2787 		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2788 				 pipe_name(pipe));
2789 		intel_audio_codec_enable(encoder);
2790 	}
2791 }
2792 
2793 static void g4x_enable_dp(struct intel_encoder *encoder)
2794 {
2795 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2796 
2797 	intel_enable_dp(encoder);
2798 	intel_edp_backlight_on(intel_dp);
2799 }
2800 
2801 static void vlv_enable_dp(struct intel_encoder *encoder)
2802 {
2803 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2804 
2805 	intel_edp_backlight_on(intel_dp);
2806 	intel_psr_enable(intel_dp);
2807 }
2808 
2809 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2810 {
2811 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2812 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2813 	enum port port = dp_to_dig_port(intel_dp)->port;
2814 	enum i915_pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
2815 
2816 	intel_dp_prepare(encoder);
2817 
2818 	if (port == PORT_A && IS_GEN5(dev_priv)) {
2819 		/*
2820 		 * We get FIFO underruns on the other pipe when
2821 		 * enabling the CPU eDP PLL, and when enabling CPU
2822 		 * eDP port. We could potentially avoid the PLL
2823 		 * underrun with a vblank wait just prior to enabling
2824 		 * the PLL, but that doesn't appear to help the port
2825 		 * enable case. Just sweep it all under the rug.
2826 		 */
2827 		intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2828 		intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2829 	}
2830 
2831 	/* Only ilk+ has port A */
2832 	if (port == PORT_A)
2833 		ironlake_edp_pll_on(intel_dp);
2834 }
2835 
2836 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2837 {
2838 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2839 	struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2840 	enum i915_pipe pipe = intel_dp->pps_pipe;
2841 	i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2842 
2843 	edp_panel_vdd_off_sync(intel_dp);
2844 
2845 	/*
2846 	 * VLV seems to get confused when multiple power seqeuencers
2847 	 * have the same port selected (even if only one has power/vdd
2848 	 * enabled). The failure manifests as vlv_wait_port_ready() failing
2849 	 * CHV on the other hand doesn't seem to mind having the same port
2850 	 * selected in multiple power seqeuencers, but let's clear the
2851 	 * port select always when logically disconnecting a power sequencer
2852 	 * from a port.
2853 	 */
2854 	DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2855 		      pipe_name(pipe), port_name(intel_dig_port->port));
2856 	I915_WRITE(pp_on_reg, 0);
2857 	POSTING_READ(pp_on_reg);
2858 
2859 	intel_dp->pps_pipe = INVALID_PIPE;
2860 }
2861 
2862 static void vlv_steal_power_sequencer(struct drm_device *dev,
2863 				      enum i915_pipe pipe)
2864 {
2865 	struct drm_i915_private *dev_priv = dev->dev_private;
2866 	struct intel_encoder *encoder;
2867 
2868 	lockdep_assert_held(&dev_priv->pps_mutex);
2869 
2870 	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2871 		return;
2872 
2873 	for_each_intel_encoder(dev, encoder) {
2874 		struct intel_dp *intel_dp;
2875 		enum port port;
2876 
2877 		if (encoder->type != INTEL_OUTPUT_EDP)
2878 			continue;
2879 
2880 		intel_dp = enc_to_intel_dp(&encoder->base);
2881 		port = dp_to_dig_port(intel_dp)->port;
2882 
2883 		if (intel_dp->pps_pipe != pipe)
2884 			continue;
2885 
2886 		DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2887 			      pipe_name(pipe), port_name(port));
2888 
2889 		WARN(encoder->base.crtc,
2890 		     "stealing pipe %c power sequencer from active eDP port %c\n",
2891 		     pipe_name(pipe), port_name(port));
2892 
2893 		/* make sure vdd is off before we steal it */
2894 		vlv_detach_power_sequencer(intel_dp);
2895 	}
2896 }
2897 
2898 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2899 {
2900 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2901 	struct intel_encoder *encoder = &intel_dig_port->base;
2902 	struct drm_device *dev = encoder->base.dev;
2903 	struct drm_i915_private *dev_priv = dev->dev_private;
2904 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2905 
2906 	lockdep_assert_held(&dev_priv->pps_mutex);
2907 
2908 	if (!is_edp(intel_dp))
2909 		return;
2910 
2911 	if (intel_dp->pps_pipe == crtc->pipe)
2912 		return;
2913 
2914 	/*
2915 	 * If another power sequencer was being used on this
2916 	 * port previously make sure to turn off vdd there while
2917 	 * we still have control of it.
2918 	 */
2919 	if (intel_dp->pps_pipe != INVALID_PIPE)
2920 		vlv_detach_power_sequencer(intel_dp);
2921 
2922 	/*
2923 	 * We may be stealing the power
2924 	 * sequencer from another port.
2925 	 */
2926 	vlv_steal_power_sequencer(dev, crtc->pipe);
2927 
2928 	/* now it's all ours */
2929 	intel_dp->pps_pipe = crtc->pipe;
2930 
2931 	DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2932 		      pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2933 
2934 	/* init power sequencer on this pipe and port */
2935 	intel_dp_init_panel_power_sequencer(dev, intel_dp);
2936 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2937 }
2938 
2939 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2940 {
2941 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2942 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2943 	struct drm_device *dev = encoder->base.dev;
2944 	struct drm_i915_private *dev_priv = dev->dev_private;
2945 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2946 	enum dpio_channel port = vlv_dport_to_channel(dport);
2947 	int pipe = intel_crtc->pipe;
2948 	u32 val;
2949 
2950 	mutex_lock(&dev_priv->sb_lock);
2951 
2952 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2953 	val = 0;
2954 	if (pipe)
2955 		val |= (1<<21);
2956 	else
2957 		val &= ~(1<<21);
2958 	val |= 0x001000c4;
2959 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2960 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2961 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2962 
2963 	mutex_unlock(&dev_priv->sb_lock);
2964 
2965 	intel_enable_dp(encoder);
2966 }
2967 
2968 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2969 {
2970 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2971 	struct drm_device *dev = encoder->base.dev;
2972 	struct drm_i915_private *dev_priv = dev->dev_private;
2973 	struct intel_crtc *intel_crtc =
2974 		to_intel_crtc(encoder->base.crtc);
2975 	enum dpio_channel port = vlv_dport_to_channel(dport);
2976 	int pipe = intel_crtc->pipe;
2977 
2978 	intel_dp_prepare(encoder);
2979 
2980 	/* Program Tx lane resets to default */
2981 	mutex_lock(&dev_priv->sb_lock);
2982 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2983 			 DPIO_PCS_TX_LANE2_RESET |
2984 			 DPIO_PCS_TX_LANE1_RESET);
2985 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2986 			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2987 			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2988 			 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2989 				 DPIO_PCS_CLK_SOFT_RESET);
2990 
2991 	/* Fix up inter-pair skew failure */
2992 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2993 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2994 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2995 	mutex_unlock(&dev_priv->sb_lock);
2996 }
2997 
2998 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2999 {
3000 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3001 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3002 	struct drm_device *dev = encoder->base.dev;
3003 	struct drm_i915_private *dev_priv = dev->dev_private;
3004 	struct intel_crtc *intel_crtc =
3005 		to_intel_crtc(encoder->base.crtc);
3006 	enum dpio_channel ch = vlv_dport_to_channel(dport);
3007 	int pipe = intel_crtc->pipe;
3008 	int data, i, stagger;
3009 	u32 val;
3010 
3011 	mutex_lock(&dev_priv->sb_lock);
3012 
3013 	/* allow hardware to manage TX FIFO reset source */
3014 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
3015 	val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
3016 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
3017 
3018 	if (intel_crtc->config->lane_count > 2) {
3019 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3020 		val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
3021 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3022 	}
3023 
3024 	/* Program Tx lane latency optimal setting*/
3025 	for (i = 0; i < intel_crtc->config->lane_count; i++) {
3026 		/* Set the upar bit */
3027 		if (intel_crtc->config->lane_count == 1)
3028 			data = 0x0;
3029 		else
3030 			data = (i == 1) ? 0x0 : 0x1;
3031 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
3032 				data << DPIO_UPAR_SHIFT);
3033 	}
3034 
3035 	/* Data lane stagger programming */
3036 	if (intel_crtc->config->port_clock > 270000)
3037 		stagger = 0x18;
3038 	else if (intel_crtc->config->port_clock > 135000)
3039 		stagger = 0xd;
3040 	else if (intel_crtc->config->port_clock > 67500)
3041 		stagger = 0x7;
3042 	else if (intel_crtc->config->port_clock > 33750)
3043 		stagger = 0x4;
3044 	else
3045 		stagger = 0x2;
3046 
3047 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
3048 	val |= DPIO_TX2_STAGGER_MASK(0x1f);
3049 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
3050 
3051 	if (intel_crtc->config->lane_count > 2) {
3052 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3053 		val |= DPIO_TX2_STAGGER_MASK(0x1f);
3054 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3055 	}
3056 
3057 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
3058 		       DPIO_LANESTAGGER_STRAP(stagger) |
3059 		       DPIO_LANESTAGGER_STRAP_OVRD |
3060 		       DPIO_TX1_STAGGER_MASK(0x1f) |
3061 		       DPIO_TX1_STAGGER_MULT(6) |
3062 		       DPIO_TX2_STAGGER_MULT(0));
3063 
3064 	if (intel_crtc->config->lane_count > 2) {
3065 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
3066 			       DPIO_LANESTAGGER_STRAP(stagger) |
3067 			       DPIO_LANESTAGGER_STRAP_OVRD |
3068 			       DPIO_TX1_STAGGER_MASK(0x1f) |
3069 			       DPIO_TX1_STAGGER_MULT(7) |
3070 			       DPIO_TX2_STAGGER_MULT(5));
3071 	}
3072 
3073 	/* Deassert data lane reset */
3074 	chv_data_lane_soft_reset(encoder, false);
3075 
3076 	mutex_unlock(&dev_priv->sb_lock);
3077 
3078 	intel_enable_dp(encoder);
3079 
3080 	/* Second common lane will stay alive on its own now */
3081 	if (dport->release_cl2_override) {
3082 		chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
3083 		dport->release_cl2_override = false;
3084 	}
3085 }
3086 
3087 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
3088 {
3089 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
3090 	struct drm_device *dev = encoder->base.dev;
3091 	struct drm_i915_private *dev_priv = dev->dev_private;
3092 	struct intel_crtc *intel_crtc =
3093 		to_intel_crtc(encoder->base.crtc);
3094 	enum dpio_channel ch = vlv_dport_to_channel(dport);
3095 	enum i915_pipe pipe = intel_crtc->pipe;
3096 	unsigned int lane_mask =
3097 		intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
3098 	u32 val;
3099 
3100 	intel_dp_prepare(encoder);
3101 
3102 	/*
3103 	 * Must trick the second common lane into life.
3104 	 * Otherwise we can't even access the PLL.
3105 	 */
3106 	if (ch == DPIO_CH0 && pipe == PIPE_B)
3107 		dport->release_cl2_override =
3108 			!chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
3109 
3110 	chv_phy_powergate_lanes(encoder, true, lane_mask);
3111 
3112 	mutex_lock(&dev_priv->sb_lock);
3113 
3114 	/* Assert data lane reset */
3115 	chv_data_lane_soft_reset(encoder, true);
3116 
3117 	/* program left/right clock distribution */
3118 	if (pipe != PIPE_B) {
3119 		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3120 		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3121 		if (ch == DPIO_CH0)
3122 			val |= CHV_BUFLEFTENA1_FORCE;
3123 		if (ch == DPIO_CH1)
3124 			val |= CHV_BUFRIGHTENA1_FORCE;
3125 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3126 	} else {
3127 		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3128 		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3129 		if (ch == DPIO_CH0)
3130 			val |= CHV_BUFLEFTENA2_FORCE;
3131 		if (ch == DPIO_CH1)
3132 			val |= CHV_BUFRIGHTENA2_FORCE;
3133 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3134 	}
3135 
3136 	/* program clock channel usage */
3137 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3138 	val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3139 	if (pipe != PIPE_B)
3140 		val &= ~CHV_PCS_USEDCLKCHANNEL;
3141 	else
3142 		val |= CHV_PCS_USEDCLKCHANNEL;
3143 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3144 
3145 	if (intel_crtc->config->lane_count > 2) {
3146 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3147 		val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3148 		if (pipe != PIPE_B)
3149 			val &= ~CHV_PCS_USEDCLKCHANNEL;
3150 		else
3151 			val |= CHV_PCS_USEDCLKCHANNEL;
3152 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3153 	}
3154 
3155 	/*
3156 	 * This a a bit weird since generally CL
3157 	 * matches the pipe, but here we need to
3158 	 * pick the CL based on the port.
3159 	 */
3160 	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3161 	if (pipe != PIPE_B)
3162 		val &= ~CHV_CMN_USEDCLKCHANNEL;
3163 	else
3164 		val |= CHV_CMN_USEDCLKCHANNEL;
3165 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3166 
3167 	mutex_unlock(&dev_priv->sb_lock);
3168 }
3169 
3170 static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3171 {
3172 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3173 	enum i915_pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3174 	u32 val;
3175 
3176 	mutex_lock(&dev_priv->sb_lock);
3177 
3178 	/* disable left/right clock distribution */
3179 	if (pipe != PIPE_B) {
3180 		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3181 		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3182 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3183 	} else {
3184 		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3185 		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3186 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3187 	}
3188 
3189 	mutex_unlock(&dev_priv->sb_lock);
3190 
3191 	/*
3192 	 * Leave the power down bit cleared for at least one
3193 	 * lane so that chv_powergate_phy_ch() will power
3194 	 * on something when the channel is otherwise unused.
3195 	 * When the port is off and the override is removed
3196 	 * the lanes power down anyway, so otherwise it doesn't
3197 	 * really matter what the state of power down bits is
3198 	 * after this.
3199 	 */
3200 	chv_phy_powergate_lanes(encoder, false, 0x0);
3201 }
3202 
3203 /*
3204  * Native read with retry for link status and receiver capability reads for
3205  * cases where the sink may still be asleep.
3206  *
3207  * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3208  * supposed to retry 3 times per the spec.
3209  */
3210 static ssize_t
3211 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3212 			void *buffer, size_t size)
3213 {
3214 	ssize_t ret;
3215 	int i;
3216 
3217 	/*
3218 	 * Sometime we just get the same incorrect byte repeated
3219 	 * over the entire buffer. Doing just one throw away read
3220 	 * initially seems to "solve" it.
3221 	 */
3222 	drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3223 
3224 	for (i = 0; i < 3; i++) {
3225 		ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3226 		if (ret == size)
3227 			return ret;
3228 		msleep(1);
3229 	}
3230 
3231 	return ret;
3232 }
3233 
3234 /*
3235  * Fetch AUX CH registers 0x202 - 0x207 which contain
3236  * link status information
3237  */
3238 bool
3239 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3240 {
3241 	return intel_dp_dpcd_read_wake(&intel_dp->aux,
3242 				       DP_LANE0_1_STATUS,
3243 				       link_status,
3244 				       DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3245 }
3246 
3247 /* These are source-specific values. */
3248 uint8_t
3249 intel_dp_voltage_max(struct intel_dp *intel_dp)
3250 {
3251 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3252 	struct drm_i915_private *dev_priv = dev->dev_private;
3253 	enum port port = dp_to_dig_port(intel_dp)->port;
3254 
3255 	if (IS_BROXTON(dev))
3256 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3257 	else if (INTEL_INFO(dev)->gen >= 9) {
3258 		if (dev_priv->edp_low_vswing && port == PORT_A)
3259 			return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3260 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3261 	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
3262 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3263 	else if (IS_GEN7(dev) && port == PORT_A)
3264 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3265 	else if (HAS_PCH_CPT(dev) && port != PORT_A)
3266 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3267 	else
3268 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3269 }
3270 
3271 uint8_t
3272 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3273 {
3274 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3275 	enum port port = dp_to_dig_port(intel_dp)->port;
3276 
3277 	if (INTEL_INFO(dev)->gen >= 9) {
3278 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3279 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3280 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3281 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3282 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3283 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3284 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3285 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3286 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3287 		default:
3288 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3289 		}
3290 	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3291 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3292 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3293 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3294 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3295 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3296 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3297 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3298 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3299 		default:
3300 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3301 		}
3302 	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
3303 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3304 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3305 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3306 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3307 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3308 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3309 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3310 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3311 		default:
3312 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3313 		}
3314 	} else if (IS_GEN7(dev) && port == PORT_A) {
3315 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3316 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3317 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3318 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3319 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3320 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3321 		default:
3322 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3323 		}
3324 	} else {
3325 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3326 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3327 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3328 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3329 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3330 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3331 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3332 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3333 		default:
3334 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3335 		}
3336 	}
3337 }
3338 
3339 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3340 {
3341 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3342 	struct drm_i915_private *dev_priv = dev->dev_private;
3343 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3344 	struct intel_crtc *intel_crtc =
3345 		to_intel_crtc(dport->base.base.crtc);
3346 	unsigned long demph_reg_value, preemph_reg_value,
3347 		uniqtranscale_reg_value;
3348 	uint8_t train_set = intel_dp->train_set[0];
3349 	enum dpio_channel port = vlv_dport_to_channel(dport);
3350 	int pipe = intel_crtc->pipe;
3351 
3352 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3353 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3354 		preemph_reg_value = 0x0004000;
3355 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3356 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3357 			demph_reg_value = 0x2B405555;
3358 			uniqtranscale_reg_value = 0x552AB83A;
3359 			break;
3360 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3361 			demph_reg_value = 0x2B404040;
3362 			uniqtranscale_reg_value = 0x5548B83A;
3363 			break;
3364 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3365 			demph_reg_value = 0x2B245555;
3366 			uniqtranscale_reg_value = 0x5560B83A;
3367 			break;
3368 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3369 			demph_reg_value = 0x2B405555;
3370 			uniqtranscale_reg_value = 0x5598DA3A;
3371 			break;
3372 		default:
3373 			return 0;
3374 		}
3375 		break;
3376 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3377 		preemph_reg_value = 0x0002000;
3378 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3379 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3380 			demph_reg_value = 0x2B404040;
3381 			uniqtranscale_reg_value = 0x5552B83A;
3382 			break;
3383 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3384 			demph_reg_value = 0x2B404848;
3385 			uniqtranscale_reg_value = 0x5580B83A;
3386 			break;
3387 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3388 			demph_reg_value = 0x2B404040;
3389 			uniqtranscale_reg_value = 0x55ADDA3A;
3390 			break;
3391 		default:
3392 			return 0;
3393 		}
3394 		break;
3395 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3396 		preemph_reg_value = 0x0000000;
3397 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3398 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3399 			demph_reg_value = 0x2B305555;
3400 			uniqtranscale_reg_value = 0x5570B83A;
3401 			break;
3402 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3403 			demph_reg_value = 0x2B2B4040;
3404 			uniqtranscale_reg_value = 0x55ADDA3A;
3405 			break;
3406 		default:
3407 			return 0;
3408 		}
3409 		break;
3410 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3411 		preemph_reg_value = 0x0006000;
3412 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3413 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3414 			demph_reg_value = 0x1B405555;
3415 			uniqtranscale_reg_value = 0x55ADDA3A;
3416 			break;
3417 		default:
3418 			return 0;
3419 		}
3420 		break;
3421 	default:
3422 		return 0;
3423 	}
3424 
3425 	mutex_lock(&dev_priv->sb_lock);
3426 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3427 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3428 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3429 			 uniqtranscale_reg_value);
3430 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3431 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3432 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3433 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3434 	mutex_unlock(&dev_priv->sb_lock);
3435 
3436 	return 0;
3437 }
3438 
3439 static bool chv_need_uniq_trans_scale(uint8_t train_set)
3440 {
3441 	return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3442 		(train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3443 }
3444 
3445 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3446 {
3447 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3448 	struct drm_i915_private *dev_priv = dev->dev_private;
3449 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3450 	struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3451 	u32 deemph_reg_value, margin_reg_value, val;
3452 	uint8_t train_set = intel_dp->train_set[0];
3453 	enum dpio_channel ch = vlv_dport_to_channel(dport);
3454 	enum i915_pipe pipe = intel_crtc->pipe;
3455 	int i;
3456 
3457 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3458 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3459 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3460 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3461 			deemph_reg_value = 128;
3462 			margin_reg_value = 52;
3463 			break;
3464 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3465 			deemph_reg_value = 128;
3466 			margin_reg_value = 77;
3467 			break;
3468 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3469 			deemph_reg_value = 128;
3470 			margin_reg_value = 102;
3471 			break;
3472 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3473 			deemph_reg_value = 128;
3474 			margin_reg_value = 154;
3475 			/* FIXME extra to set for 1200 */
3476 			break;
3477 		default:
3478 			return 0;
3479 		}
3480 		break;
3481 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3482 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3483 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3484 			deemph_reg_value = 85;
3485 			margin_reg_value = 78;
3486 			break;
3487 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3488 			deemph_reg_value = 85;
3489 			margin_reg_value = 116;
3490 			break;
3491 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3492 			deemph_reg_value = 85;
3493 			margin_reg_value = 154;
3494 			break;
3495 		default:
3496 			return 0;
3497 		}
3498 		break;
3499 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3500 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3501 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3502 			deemph_reg_value = 64;
3503 			margin_reg_value = 104;
3504 			break;
3505 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3506 			deemph_reg_value = 64;
3507 			margin_reg_value = 154;
3508 			break;
3509 		default:
3510 			return 0;
3511 		}
3512 		break;
3513 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3514 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3515 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3516 			deemph_reg_value = 43;
3517 			margin_reg_value = 154;
3518 			break;
3519 		default:
3520 			return 0;
3521 		}
3522 		break;
3523 	default:
3524 		return 0;
3525 	}
3526 
3527 	mutex_lock(&dev_priv->sb_lock);
3528 
3529 	/* Clear calc init */
3530 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3531 	val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3532 	val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3533 	val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3534 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3535 
3536 	if (intel_crtc->config->lane_count > 2) {
3537 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3538 		val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3539 		val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3540 		val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3541 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3542 	}
3543 
3544 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3545 	val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3546 	val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3547 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3548 
3549 	if (intel_crtc->config->lane_count > 2) {
3550 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3551 		val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3552 		val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3553 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3554 	}
3555 
3556 	/* Program swing deemph */
3557 	for (i = 0; i < intel_crtc->config->lane_count; i++) {
3558 		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3559 		val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3560 		val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3561 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3562 	}
3563 
3564 	/* Program swing margin */
3565 	for (i = 0; i < intel_crtc->config->lane_count; i++) {
3566 		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3567 
3568 		val &= ~DPIO_SWING_MARGIN000_MASK;
3569 		val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3570 
3571 		/*
3572 		 * Supposedly this value shouldn't matter when unique transition
3573 		 * scale is disabled, but in fact it does matter. Let's just
3574 		 * always program the same value and hope it's OK.
3575 		 */
3576 		val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3577 		val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3578 
3579 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3580 	}
3581 
3582 	/*
3583 	 * The document said it needs to set bit 27 for ch0 and bit 26
3584 	 * for ch1. Might be a typo in the doc.
3585 	 * For now, for this unique transition scale selection, set bit
3586 	 * 27 for ch0 and ch1.
3587 	 */
3588 	for (i = 0; i < intel_crtc->config->lane_count; i++) {
3589 		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3590 		if (chv_need_uniq_trans_scale(train_set))
3591 			val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3592 		else
3593 			val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3594 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3595 	}
3596 
3597 	/* Start swing calculation */
3598 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3599 	val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3600 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3601 
3602 	if (intel_crtc->config->lane_count > 2) {
3603 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3604 		val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3605 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3606 	}
3607 
3608 	mutex_unlock(&dev_priv->sb_lock);
3609 
3610 	return 0;
3611 }
3612 
3613 static uint32_t
3614 gen4_signal_levels(uint8_t train_set)
3615 {
3616 	uint32_t	signal_levels = 0;
3617 
3618 	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3619 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3620 	default:
3621 		signal_levels |= DP_VOLTAGE_0_4;
3622 		break;
3623 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3624 		signal_levels |= DP_VOLTAGE_0_6;
3625 		break;
3626 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3627 		signal_levels |= DP_VOLTAGE_0_8;
3628 		break;
3629 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3630 		signal_levels |= DP_VOLTAGE_1_2;
3631 		break;
3632 	}
3633 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3634 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3635 	default:
3636 		signal_levels |= DP_PRE_EMPHASIS_0;
3637 		break;
3638 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3639 		signal_levels |= DP_PRE_EMPHASIS_3_5;
3640 		break;
3641 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3642 		signal_levels |= DP_PRE_EMPHASIS_6;
3643 		break;
3644 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3645 		signal_levels |= DP_PRE_EMPHASIS_9_5;
3646 		break;
3647 	}
3648 	return signal_levels;
3649 }
3650 
3651 /* Gen6's DP voltage swing and pre-emphasis control */
3652 static uint32_t
3653 gen6_edp_signal_levels(uint8_t train_set)
3654 {
3655 	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3656 					 DP_TRAIN_PRE_EMPHASIS_MASK);
3657 	switch (signal_levels) {
3658 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3659 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3660 		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3661 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3662 		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3663 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3664 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3665 		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3666 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3667 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3668 		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3669 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3670 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3671 		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3672 	default:
3673 		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3674 			      "0x%x\n", signal_levels);
3675 		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3676 	}
3677 }
3678 
3679 /* Gen7's DP voltage swing and pre-emphasis control */
3680 static uint32_t
3681 gen7_edp_signal_levels(uint8_t train_set)
3682 {
3683 	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3684 					 DP_TRAIN_PRE_EMPHASIS_MASK);
3685 	switch (signal_levels) {
3686 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3687 		return EDP_LINK_TRAIN_400MV_0DB_IVB;
3688 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3689 		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3690 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3691 		return EDP_LINK_TRAIN_400MV_6DB_IVB;
3692 
3693 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3694 		return EDP_LINK_TRAIN_600MV_0DB_IVB;
3695 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3696 		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3697 
3698 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3699 		return EDP_LINK_TRAIN_800MV_0DB_IVB;
3700 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3701 		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3702 
3703 	default:
3704 		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3705 			      "0x%x\n", signal_levels);
3706 		return EDP_LINK_TRAIN_500MV_0DB_IVB;
3707 	}
3708 }
3709 
3710 void
3711 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3712 {
3713 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3714 	enum port port = intel_dig_port->port;
3715 	struct drm_device *dev = intel_dig_port->base.base.dev;
3716 	struct drm_i915_private *dev_priv = to_i915(dev);
3717 	uint32_t signal_levels, mask = 0;
3718 	uint8_t train_set = intel_dp->train_set[0];
3719 
3720 	if (HAS_DDI(dev)) {
3721 		signal_levels = ddi_signal_levels(intel_dp);
3722 
3723 		if (IS_BROXTON(dev))
3724 			signal_levels = 0;
3725 		else
3726 			mask = DDI_BUF_EMP_MASK;
3727 	} else if (IS_CHERRYVIEW(dev)) {
3728 		signal_levels = chv_signal_levels(intel_dp);
3729 	} else if (IS_VALLEYVIEW(dev)) {
3730 		signal_levels = vlv_signal_levels(intel_dp);
3731 	} else if (IS_GEN7(dev) && port == PORT_A) {
3732 		signal_levels = gen7_edp_signal_levels(train_set);
3733 		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3734 	} else if (IS_GEN6(dev) && port == PORT_A) {
3735 		signal_levels = gen6_edp_signal_levels(train_set);
3736 		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3737 	} else {
3738 		signal_levels = gen4_signal_levels(train_set);
3739 		mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3740 	}
3741 
3742 	if (mask)
3743 		DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3744 
3745 	DRM_DEBUG_KMS("Using vswing level %d\n",
3746 		train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3747 	DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3748 		(train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3749 			DP_TRAIN_PRE_EMPHASIS_SHIFT);
3750 
3751 	intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3752 
3753 	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3754 	POSTING_READ(intel_dp->output_reg);
3755 }
3756 
3757 void
3758 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3759 				       uint8_t dp_train_pat)
3760 {
3761 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3762 	struct drm_i915_private *dev_priv =
3763 		to_i915(intel_dig_port->base.base.dev);
3764 
3765 	_intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3766 
3767 	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3768 	POSTING_READ(intel_dp->output_reg);
3769 }
3770 
3771 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3772 {
3773 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3774 	struct drm_device *dev = intel_dig_port->base.base.dev;
3775 	struct drm_i915_private *dev_priv = dev->dev_private;
3776 	enum port port = intel_dig_port->port;
3777 	uint32_t val;
3778 
3779 	if (!HAS_DDI(dev))
3780 		return;
3781 
3782 	val = I915_READ(DP_TP_CTL(port));
3783 	val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3784 	val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3785 	I915_WRITE(DP_TP_CTL(port), val);
3786 
3787 	/*
3788 	 * On PORT_A we can have only eDP in SST mode. There the only reason
3789 	 * we need to set idle transmission mode is to work around a HW issue
3790 	 * where we enable the pipe while not in idle link-training mode.
3791 	 * In this case there is requirement to wait for a minimum number of
3792 	 * idle patterns to be sent.
3793 	 */
3794 	if (port == PORT_A)
3795 		return;
3796 
3797 	if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3798 		     1))
3799 		DRM_ERROR("Timed out waiting for DP idle patterns\n");
3800 }
3801 
3802 static void
3803 intel_dp_link_down(struct intel_dp *intel_dp)
3804 {
3805 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3806 	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3807 	enum port port = intel_dig_port->port;
3808 	struct drm_device *dev = intel_dig_port->base.base.dev;
3809 	struct drm_i915_private *dev_priv = dev->dev_private;
3810 	uint32_t DP = intel_dp->DP;
3811 
3812 	if (WARN_ON(HAS_DDI(dev)))
3813 		return;
3814 
3815 	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3816 		return;
3817 
3818 	DRM_DEBUG_KMS("\n");
3819 
3820 	if ((IS_GEN7(dev) && port == PORT_A) ||
3821 	    (HAS_PCH_CPT(dev) && port != PORT_A)) {
3822 		DP &= ~DP_LINK_TRAIN_MASK_CPT;
3823 		DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3824 	} else {
3825 		if (IS_CHERRYVIEW(dev))
3826 			DP &= ~DP_LINK_TRAIN_MASK_CHV;
3827 		else
3828 			DP &= ~DP_LINK_TRAIN_MASK;
3829 		DP |= DP_LINK_TRAIN_PAT_IDLE;
3830 	}
3831 	I915_WRITE(intel_dp->output_reg, DP);
3832 	POSTING_READ(intel_dp->output_reg);
3833 
3834 	DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3835 	I915_WRITE(intel_dp->output_reg, DP);
3836 	POSTING_READ(intel_dp->output_reg);
3837 
3838 	/*
3839 	 * HW workaround for IBX, we need to move the port
3840 	 * to transcoder A after disabling it to allow the
3841 	 * matching HDMI port to be enabled on transcoder A.
3842 	 */
3843 	if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3844 		/*
3845 		 * We get CPU/PCH FIFO underruns on the other pipe when
3846 		 * doing the workaround. Sweep them under the rug.
3847 		 */
3848 		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3849 		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3850 
3851 		/* always enable with pattern 1 (as per spec) */
3852 		DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3853 		DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3854 		I915_WRITE(intel_dp->output_reg, DP);
3855 		POSTING_READ(intel_dp->output_reg);
3856 
3857 		DP &= ~DP_PORT_EN;
3858 		I915_WRITE(intel_dp->output_reg, DP);
3859 		POSTING_READ(intel_dp->output_reg);
3860 
3861 		intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3862 		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3863 		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3864 	}
3865 
3866 	msleep(intel_dp->panel_power_down_delay);
3867 
3868 	intel_dp->DP = DP;
3869 }
3870 
3871 static bool
3872 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3873 {
3874 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3875 	struct drm_device *dev = dig_port->base.base.dev;
3876 	struct drm_i915_private *dev_priv = dev->dev_private;
3877 	uint8_t rev;
3878 
3879 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3880 				    sizeof(intel_dp->dpcd)) < 0)
3881 		return false; /* aux transfer failed */
3882 
3883 #ifdef __DragonFly__
3884 	char dpcd_hex_dump[DP_RECEIVER_CAP_SIZE * 3];
3885 	DRM_DEBUG_KMS("DPCD: %s\n", hexncpy(intel_dp->dpcd, sizeof(intel_dp->dpcd),
3886 		      dpcd_hex_dump, sizeof(dpcd_hex_dump), " "));
3887 #else
3888 	DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3889 #endif
3890 
3891 	if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3892 		return false; /* DPCD not present */
3893 
3894 	/* Check if the panel supports PSR */
3895 	memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3896 	if (is_edp(intel_dp)) {
3897 		intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3898 					intel_dp->psr_dpcd,
3899 					sizeof(intel_dp->psr_dpcd));
3900 		if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3901 			dev_priv->psr.sink_support = true;
3902 			DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3903 		}
3904 
3905 		if (INTEL_INFO(dev)->gen >= 9 &&
3906 			(intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3907 			uint8_t frame_sync_cap;
3908 
3909 			dev_priv->psr.sink_support = true;
3910 			intel_dp_dpcd_read_wake(&intel_dp->aux,
3911 					DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3912 					&frame_sync_cap, 1);
3913 			dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3914 			/* PSR2 needs frame sync as well */
3915 			dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3916 			DRM_DEBUG_KMS("PSR2 %s on sink",
3917 				dev_priv->psr.psr2_support ? "supported" : "not supported");
3918 		}
3919 	}
3920 
3921 	DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
3922 		      yesno(intel_dp_source_supports_hbr2(intel_dp)),
3923 		      yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
3924 
3925 	/* Intermediate frequency support */
3926 	if (is_edp(intel_dp) &&
3927 	    (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] &	DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3928 	    (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3929 	    (rev >= 0x03)) { /* eDp v1.4 or higher */
3930 		__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3931 		int i;
3932 
3933 		intel_dp_dpcd_read_wake(&intel_dp->aux,
3934 				DP_SUPPORTED_LINK_RATES,
3935 				sink_rates,
3936 				sizeof(sink_rates));
3937 
3938 		for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3939 			int val = le16_to_cpu(sink_rates[i]);
3940 
3941 			if (val == 0)
3942 				break;
3943 
3944 			/* Value read is in kHz while drm clock is saved in deca-kHz */
3945 			intel_dp->sink_rates[i] = (val * 200) / 10;
3946 		}
3947 		intel_dp->num_sink_rates = i;
3948 	}
3949 
3950 	intel_dp_print_rates(intel_dp);
3951 
3952 	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3953 	      DP_DWN_STRM_PORT_PRESENT))
3954 		return true; /* native DP sink */
3955 
3956 	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3957 		return true; /* no per-port downstream info */
3958 
3959 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3960 				    intel_dp->downstream_ports,
3961 				    DP_MAX_DOWNSTREAM_PORTS) < 0)
3962 		return false; /* downstream port status fetch failed */
3963 
3964 	return true;
3965 }
3966 
3967 static void
3968 intel_dp_probe_oui(struct intel_dp *intel_dp)
3969 {
3970 	u8 buf[3];
3971 
3972 	if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3973 		return;
3974 
3975 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3976 		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3977 			      buf[0], buf[1], buf[2]);
3978 
3979 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3980 		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3981 			      buf[0], buf[1], buf[2]);
3982 }
3983 
3984 static bool
3985 intel_dp_probe_mst(struct intel_dp *intel_dp)
3986 {
3987 	u8 buf[1];
3988 
3989 	if (!intel_dp->can_mst)
3990 		return false;
3991 
3992 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3993 		return false;
3994 
3995 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3996 		if (buf[0] & DP_MST_CAP) {
3997 			DRM_DEBUG_KMS("Sink is MST capable\n");
3998 			intel_dp->is_mst = true;
3999 		} else {
4000 			DRM_DEBUG_KMS("Sink is not MST capable\n");
4001 			intel_dp->is_mst = false;
4002 		}
4003 	}
4004 
4005 #if 0
4006 	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4007 	return intel_dp->is_mst;
4008 #else
4009 	return false;
4010 #endif
4011 }
4012 
4013 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
4014 {
4015 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4016 	struct drm_device *dev = dig_port->base.base.dev;
4017 	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4018 	u8 buf;
4019 	int ret = 0;
4020 	int count = 0;
4021 	int attempts = 10;
4022 
4023 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4024 		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4025 		ret = -EIO;
4026 		goto out;
4027 	}
4028 
4029 	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4030 			       buf & ~DP_TEST_SINK_START) < 0) {
4031 		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4032 		ret = -EIO;
4033 		goto out;
4034 	}
4035 
4036 	do {
4037 		intel_wait_for_vblank(dev, intel_crtc->pipe);
4038 
4039 		if (drm_dp_dpcd_readb(&intel_dp->aux,
4040 				      DP_TEST_SINK_MISC, &buf) < 0) {
4041 			ret = -EIO;
4042 			goto out;
4043 		}
4044 		count = buf & DP_TEST_COUNT_MASK;
4045 	} while (--attempts && count);
4046 
4047 	if (attempts == 0) {
4048 		DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
4049 		ret = -ETIMEDOUT;
4050 	}
4051 
4052  out:
4053 	hsw_enable_ips(intel_crtc);
4054 	return ret;
4055 }
4056 
4057 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4058 {
4059 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4060 	struct drm_device *dev = dig_port->base.base.dev;
4061 	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4062 	u8 buf;
4063 	int ret;
4064 
4065 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4066 		return -EIO;
4067 
4068 	if (!(buf & DP_TEST_CRC_SUPPORTED))
4069 		return -ENOTTY;
4070 
4071 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4072 		return -EIO;
4073 
4074 	if (buf & DP_TEST_SINK_START) {
4075 		ret = intel_dp_sink_crc_stop(intel_dp);
4076 		if (ret)
4077 			return ret;
4078 	}
4079 
4080 	hsw_disable_ips(intel_crtc);
4081 
4082 	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4083 			       buf | DP_TEST_SINK_START) < 0) {
4084 		hsw_enable_ips(intel_crtc);
4085 		return -EIO;
4086 	}
4087 
4088 	intel_wait_for_vblank(dev, intel_crtc->pipe);
4089 	return 0;
4090 }
4091 
4092 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4093 {
4094 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4095 	struct drm_device *dev = dig_port->base.base.dev;
4096 	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4097 	u8 buf;
4098 	int count, ret;
4099 	int attempts = 6;
4100 
4101 	ret = intel_dp_sink_crc_start(intel_dp);
4102 	if (ret)
4103 		return ret;
4104 
4105 	do {
4106 		intel_wait_for_vblank(dev, intel_crtc->pipe);
4107 
4108 		if (drm_dp_dpcd_readb(&intel_dp->aux,
4109 				      DP_TEST_SINK_MISC, &buf) < 0) {
4110 			ret = -EIO;
4111 			goto stop;
4112 		}
4113 		count = buf & DP_TEST_COUNT_MASK;
4114 
4115 	} while (--attempts && count == 0);
4116 
4117 	if (attempts == 0) {
4118 		DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4119 		ret = -ETIMEDOUT;
4120 		goto stop;
4121 	}
4122 
4123 	if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4124 		ret = -EIO;
4125 		goto stop;
4126 	}
4127 
4128 stop:
4129 	intel_dp_sink_crc_stop(intel_dp);
4130 	return ret;
4131 }
4132 
4133 static bool
4134 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4135 {
4136 	return intel_dp_dpcd_read_wake(&intel_dp->aux,
4137 				       DP_DEVICE_SERVICE_IRQ_VECTOR,
4138 				       sink_irq_vector, 1) == 1;
4139 }
4140 
4141 static bool
4142 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4143 {
4144 	int ret;
4145 
4146 	ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4147 					     DP_SINK_COUNT_ESI,
4148 					     sink_irq_vector, 14);
4149 	if (ret != 14)
4150 		return false;
4151 
4152 	return true;
4153 }
4154 
4155 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4156 {
4157 	uint8_t test_result = DP_TEST_ACK;
4158 	return test_result;
4159 }
4160 
4161 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4162 {
4163 	uint8_t test_result = DP_TEST_NAK;
4164 	return test_result;
4165 }
4166 
4167 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4168 {
4169 	uint8_t test_result = DP_TEST_NAK;
4170 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4171 	struct drm_connector *connector = &intel_connector->base;
4172 
4173 	if (intel_connector->detect_edid == NULL ||
4174 	    connector->edid_corrupt ||
4175 	    intel_dp->aux.i2c_defer_count > 6) {
4176 		/* Check EDID read for NACKs, DEFERs and corruption
4177 		 * (DP CTS 1.2 Core r1.1)
4178 		 *    4.2.2.4 : Failed EDID read, I2C_NAK
4179 		 *    4.2.2.5 : Failed EDID read, I2C_DEFER
4180 		 *    4.2.2.6 : EDID corruption detected
4181 		 * Use failsafe mode for all cases
4182 		 */
4183 		if (intel_dp->aux.i2c_nack_count > 0 ||
4184 			intel_dp->aux.i2c_defer_count > 0)
4185 			DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4186 				      intel_dp->aux.i2c_nack_count,
4187 				      intel_dp->aux.i2c_defer_count);
4188 		intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4189 	} else {
4190 		struct edid *block = intel_connector->detect_edid;
4191 
4192 		/* We have to write the checksum
4193 		 * of the last block read
4194 		 */
4195 		block += intel_connector->detect_edid->extensions;
4196 
4197 		if (!drm_dp_dpcd_write(&intel_dp->aux,
4198 					DP_TEST_EDID_CHECKSUM,
4199 					&block->checksum,
4200 					1))
4201 			DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4202 
4203 		test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4204 		intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4205 	}
4206 
4207 	/* Set test active flag here so userspace doesn't interrupt things */
4208 	intel_dp->compliance_test_active = 1;
4209 
4210 	return test_result;
4211 }
4212 
4213 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4214 {
4215 	uint8_t test_result = DP_TEST_NAK;
4216 	return test_result;
4217 }
4218 
4219 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4220 {
4221 	uint8_t response = DP_TEST_NAK;
4222 	uint8_t rxdata = 0;
4223 	int status = 0;
4224 
4225 	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4226 	if (status <= 0) {
4227 		DRM_DEBUG_KMS("Could not read test request from sink\n");
4228 		goto update_status;
4229 	}
4230 
4231 	switch (rxdata) {
4232 	case DP_TEST_LINK_TRAINING:
4233 		DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4234 		intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4235 		response = intel_dp_autotest_link_training(intel_dp);
4236 		break;
4237 	case DP_TEST_LINK_VIDEO_PATTERN:
4238 		DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4239 		intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4240 		response = intel_dp_autotest_video_pattern(intel_dp);
4241 		break;
4242 	case DP_TEST_LINK_EDID_READ:
4243 		DRM_DEBUG_KMS("EDID test requested\n");
4244 		intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4245 		response = intel_dp_autotest_edid(intel_dp);
4246 		break;
4247 	case DP_TEST_LINK_PHY_TEST_PATTERN:
4248 		DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4249 		intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4250 		response = intel_dp_autotest_phy_pattern(intel_dp);
4251 		break;
4252 	default:
4253 		DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4254 		break;
4255 	}
4256 
4257 update_status:
4258 	status = drm_dp_dpcd_write(&intel_dp->aux,
4259 				   DP_TEST_RESPONSE,
4260 				   &response, 1);
4261 	if (status <= 0)
4262 		DRM_DEBUG_KMS("Could not write test response to sink\n");
4263 }
4264 
4265 static int
4266 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4267 {
4268 	bool bret;
4269 
4270 	if (intel_dp->is_mst) {
4271 		u8 esi[16] = { 0 };
4272 		int ret = 0;
4273 		int retry;
4274 		bool handled;
4275 		bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4276 go_again:
4277 		if (bret == true) {
4278 
4279 			/* check link status - esi[10] = 0x200c */
4280 			if (intel_dp->active_mst_links &&
4281 			    !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4282 				DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4283 				intel_dp_start_link_train(intel_dp);
4284 				intel_dp_stop_link_train(intel_dp);
4285 			}
4286 
4287 			DRM_DEBUG_KMS("got esi %3ph\n", esi);
4288 			ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4289 
4290 			if (handled) {
4291 				for (retry = 0; retry < 3; retry++) {
4292 					int wret;
4293 					wret = drm_dp_dpcd_write(&intel_dp->aux,
4294 								 DP_SINK_COUNT_ESI+1,
4295 								 &esi[1], 3);
4296 					if (wret == 3) {
4297 						break;
4298 					}
4299 				}
4300 
4301 				bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4302 				if (bret == true) {
4303 					DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4304 					goto go_again;
4305 				}
4306 			} else
4307 				ret = 0;
4308 
4309 			return ret;
4310 		} else {
4311 			struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4312 			DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4313 			intel_dp->is_mst = false;
4314 			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4315 			/* send a hotplug event */
4316 			drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4317 		}
4318 	}
4319 	return -EINVAL;
4320 }
4321 
4322 /*
4323  * According to DP spec
4324  * 5.1.2:
4325  *  1. Read DPCD
4326  *  2. Configure link according to Receiver Capabilities
4327  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4328  *  4. Check link status on receipt of hot-plug interrupt
4329  */
4330 static void
4331 intel_dp_check_link_status(struct intel_dp *intel_dp)
4332 {
4333 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4334 	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4335 	u8 sink_irq_vector;
4336 	u8 link_status[DP_LINK_STATUS_SIZE];
4337 
4338 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4339 
4340 	/*
4341 	 * Clearing compliance test variables to allow capturing
4342 	 * of values for next automated test request.
4343 	 */
4344 	intel_dp->compliance_test_active = 0;
4345 	intel_dp->compliance_test_type = 0;
4346 	intel_dp->compliance_test_data = 0;
4347 
4348 	if (!intel_encoder->base.crtc)
4349 		return;
4350 
4351 	if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4352 		return;
4353 
4354 	/* Try to read receiver status if the link appears to be up */
4355 	if (!intel_dp_get_link_status(intel_dp, link_status)) {
4356 		return;
4357 	}
4358 
4359 	/* Now read the DPCD to see if it's actually running */
4360 	if (!intel_dp_get_dpcd(intel_dp)) {
4361 		return;
4362 	}
4363 
4364 	/* Try to read the source of the interrupt */
4365 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4366 	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4367 		/* Clear interrupt source */
4368 		drm_dp_dpcd_writeb(&intel_dp->aux,
4369 				   DP_DEVICE_SERVICE_IRQ_VECTOR,
4370 				   sink_irq_vector);
4371 
4372 		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4373 			DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4374 		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4375 			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4376 	}
4377 
4378 	/* if link training is requested we should perform it always */
4379 	if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
4380 		(!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
4381 		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4382 			      intel_encoder->base.name);
4383 		intel_dp_start_link_train(intel_dp);
4384 		intel_dp_stop_link_train(intel_dp);
4385 	}
4386 }
4387 
4388 /* XXX this is probably wrong for multiple downstream ports */
4389 static enum drm_connector_status
4390 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4391 {
4392 	uint8_t *dpcd = intel_dp->dpcd;
4393 	uint8_t type;
4394 
4395 	if (!intel_dp_get_dpcd(intel_dp))
4396 		return connector_status_disconnected;
4397 
4398 	/* if there's no downstream port, we're done */
4399 	if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4400 		return connector_status_connected;
4401 
4402 	/* If we're HPD-aware, SINK_COUNT changes dynamically */
4403 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4404 	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4405 		uint8_t reg;
4406 
4407 		if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4408 					    &reg, 1) < 0)
4409 			return connector_status_unknown;
4410 
4411 		return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4412 					      : connector_status_disconnected;
4413 	}
4414 
4415 	/* If no HPD, poke DDC gently */
4416 	if (drm_probe_ddc(&intel_dp->aux.ddc))
4417 		return connector_status_connected;
4418 
4419 	/* Well we tried, say unknown for unreliable port types */
4420 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4421 		type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4422 		if (type == DP_DS_PORT_TYPE_VGA ||
4423 		    type == DP_DS_PORT_TYPE_NON_EDID)
4424 			return connector_status_unknown;
4425 	} else {
4426 		type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4427 			DP_DWN_STRM_PORT_TYPE_MASK;
4428 		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4429 		    type == DP_DWN_STRM_PORT_TYPE_OTHER)
4430 			return connector_status_unknown;
4431 	}
4432 
4433 	/* Anything else is out of spec, warn and ignore */
4434 	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4435 	return connector_status_disconnected;
4436 }
4437 
4438 static enum drm_connector_status
4439 edp_detect(struct intel_dp *intel_dp)
4440 {
4441 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4442 	enum drm_connector_status status;
4443 
4444 	status = intel_panel_detect(dev);
4445 	if (status == connector_status_unknown)
4446 		status = connector_status_connected;
4447 
4448 	return status;
4449 }
4450 
4451 static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4452 				       struct intel_digital_port *port)
4453 {
4454 	u32 bit;
4455 
4456 	switch (port->port) {
4457 	case PORT_A:
4458 		return true;
4459 	case PORT_B:
4460 		bit = SDE_PORTB_HOTPLUG;
4461 		break;
4462 	case PORT_C:
4463 		bit = SDE_PORTC_HOTPLUG;
4464 		break;
4465 	case PORT_D:
4466 		bit = SDE_PORTD_HOTPLUG;
4467 		break;
4468 	default:
4469 		MISSING_CASE(port->port);
4470 		return false;
4471 	}
4472 
4473 	return I915_READ(SDEISR) & bit;
4474 }
4475 
4476 static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4477 				       struct intel_digital_port *port)
4478 {
4479 	u32 bit;
4480 
4481 	switch (port->port) {
4482 	case PORT_A:
4483 		return true;
4484 	case PORT_B:
4485 		bit = SDE_PORTB_HOTPLUG_CPT;
4486 		break;
4487 	case PORT_C:
4488 		bit = SDE_PORTC_HOTPLUG_CPT;
4489 		break;
4490 	case PORT_D:
4491 		bit = SDE_PORTD_HOTPLUG_CPT;
4492 		break;
4493 	case PORT_E:
4494 		bit = SDE_PORTE_HOTPLUG_SPT;
4495 		break;
4496 	default:
4497 		MISSING_CASE(port->port);
4498 		return false;
4499 	}
4500 
4501 	return I915_READ(SDEISR) & bit;
4502 }
4503 
4504 static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4505 				       struct intel_digital_port *port)
4506 {
4507 	u32 bit;
4508 
4509 	switch (port->port) {
4510 	case PORT_B:
4511 		bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4512 		break;
4513 	case PORT_C:
4514 		bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4515 		break;
4516 	case PORT_D:
4517 		bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4518 		break;
4519 	default:
4520 		MISSING_CASE(port->port);
4521 		return false;
4522 	}
4523 
4524 	return I915_READ(PORT_HOTPLUG_STAT) & bit;
4525 }
4526 
4527 static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
4528 					struct intel_digital_port *port)
4529 {
4530 	u32 bit;
4531 
4532 	switch (port->port) {
4533 	case PORT_B:
4534 		bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
4535 		break;
4536 	case PORT_C:
4537 		bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
4538 		break;
4539 	case PORT_D:
4540 		bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
4541 		break;
4542 	default:
4543 		MISSING_CASE(port->port);
4544 		return false;
4545 	}
4546 
4547 	return I915_READ(PORT_HOTPLUG_STAT) & bit;
4548 }
4549 
4550 static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4551 				       struct intel_digital_port *intel_dig_port)
4552 {
4553 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4554 	enum port port;
4555 	u32 bit;
4556 
4557 	intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4558 	switch (port) {
4559 	case PORT_A:
4560 		bit = BXT_DE_PORT_HP_DDIA;
4561 		break;
4562 	case PORT_B:
4563 		bit = BXT_DE_PORT_HP_DDIB;
4564 		break;
4565 	case PORT_C:
4566 		bit = BXT_DE_PORT_HP_DDIC;
4567 		break;
4568 	default:
4569 		MISSING_CASE(port);
4570 		return false;
4571 	}
4572 
4573 	return I915_READ(GEN8_DE_PORT_ISR) & bit;
4574 }
4575 
4576 /*
4577  * intel_digital_port_connected - is the specified port connected?
4578  * @dev_priv: i915 private structure
4579  * @port: the port to test
4580  *
4581  * Return %true if @port is connected, %false otherwise.
4582  */
4583 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4584 					 struct intel_digital_port *port)
4585 {
4586 	if (HAS_PCH_IBX(dev_priv))
4587 		return ibx_digital_port_connected(dev_priv, port);
4588 	else if (HAS_PCH_SPLIT(dev_priv))
4589 		return cpt_digital_port_connected(dev_priv, port);
4590 	else if (IS_BROXTON(dev_priv))
4591 		return bxt_digital_port_connected(dev_priv, port);
4592 	else if (IS_GM45(dev_priv))
4593 		return gm45_digital_port_connected(dev_priv, port);
4594 	else
4595 		return g4x_digital_port_connected(dev_priv, port);
4596 }
4597 
4598 static struct edid *
4599 intel_dp_get_edid(struct intel_dp *intel_dp)
4600 {
4601 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4602 
4603 	/* use cached edid if we have one */
4604 	if (intel_connector->edid) {
4605 		/* invalid edid */
4606 		if (IS_ERR(intel_connector->edid))
4607 			return NULL;
4608 
4609 		return drm_edid_duplicate(intel_connector->edid);
4610 	} else
4611 		return drm_get_edid(&intel_connector->base,
4612 				    &intel_dp->aux.ddc);
4613 }
4614 
4615 static void
4616 intel_dp_set_edid(struct intel_dp *intel_dp)
4617 {
4618 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4619 	struct edid *edid;
4620 
4621 	edid = intel_dp_get_edid(intel_dp);
4622 	intel_connector->detect_edid = edid;
4623 
4624 	if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4625 		intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4626 	else
4627 		intel_dp->has_audio = drm_detect_monitor_audio(edid);
4628 }
4629 
4630 static void
4631 intel_dp_unset_edid(struct intel_dp *intel_dp)
4632 {
4633 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4634 
4635 	kfree(intel_connector->detect_edid);
4636 	intel_connector->detect_edid = NULL;
4637 
4638 	intel_dp->has_audio = false;
4639 }
4640 
4641 static enum drm_connector_status
4642 intel_dp_detect(struct drm_connector *connector, bool force)
4643 {
4644 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4645 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4646 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4647 	struct drm_device *dev = connector->dev;
4648 	enum drm_connector_status status;
4649 	enum intel_display_power_domain power_domain;
4650 	bool ret;
4651 	u8 sink_irq_vector;
4652 
4653 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4654 		      connector->base.id, connector->name);
4655 	intel_dp_unset_edid(intel_dp);
4656 
4657 	if (intel_dp->is_mst) {
4658 		/* MST devices are disconnected from a monitor POV */
4659 		if (intel_encoder->type != INTEL_OUTPUT_EDP)
4660 			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4661 		return connector_status_disconnected;
4662 	}
4663 
4664 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
4665 	intel_display_power_get(to_i915(dev), power_domain);
4666 
4667 	/* Can't disconnect eDP, but you can close the lid... */
4668 	if (is_edp(intel_dp))
4669 		status = edp_detect(intel_dp);
4670 	else if (intel_digital_port_connected(to_i915(dev),
4671 					      dp_to_dig_port(intel_dp)))
4672 		status = intel_dp_detect_dpcd(intel_dp);
4673 	else
4674 		status = connector_status_disconnected;
4675 
4676 	if (status != connector_status_connected) {
4677 		intel_dp->compliance_test_active = 0;
4678 		intel_dp->compliance_test_type = 0;
4679 		intel_dp->compliance_test_data = 0;
4680 
4681 		goto out;
4682 	}
4683 
4684 	intel_dp_probe_oui(intel_dp);
4685 
4686 	ret = intel_dp_probe_mst(intel_dp);
4687 	if (ret) {
4688 		/* if we are in MST mode then this connector
4689 		   won't appear connected or have anything with EDID on it */
4690 		if (intel_encoder->type != INTEL_OUTPUT_EDP)
4691 			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4692 		status = connector_status_disconnected;
4693 		goto out;
4694 	}
4695 
4696 	/*
4697 	 * Clearing NACK and defer counts to get their exact values
4698 	 * while reading EDID which are required by Compliance tests
4699 	 * 4.2.2.4 and 4.2.2.5
4700 	 */
4701 	intel_dp->aux.i2c_nack_count = 0;
4702 	intel_dp->aux.i2c_defer_count = 0;
4703 
4704 	intel_dp_set_edid(intel_dp);
4705 
4706 	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4707 		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4708 	status = connector_status_connected;
4709 
4710 	/* Try to read the source of the interrupt */
4711 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4712 	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4713 		/* Clear interrupt source */
4714 		drm_dp_dpcd_writeb(&intel_dp->aux,
4715 				   DP_DEVICE_SERVICE_IRQ_VECTOR,
4716 				   sink_irq_vector);
4717 
4718 		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4719 			intel_dp_handle_test_request(intel_dp);
4720 		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4721 			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4722 	}
4723 
4724 out:
4725 	intel_display_power_put(to_i915(dev), power_domain);
4726 	return status;
4727 }
4728 
4729 static void
4730 intel_dp_force(struct drm_connector *connector)
4731 {
4732 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4733 	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4734 	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4735 	enum intel_display_power_domain power_domain;
4736 
4737 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4738 		      connector->base.id, connector->name);
4739 	intel_dp_unset_edid(intel_dp);
4740 
4741 	if (connector->status != connector_status_connected)
4742 		return;
4743 
4744 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
4745 	intel_display_power_get(dev_priv, power_domain);
4746 
4747 	intel_dp_set_edid(intel_dp);
4748 
4749 	intel_display_power_put(dev_priv, power_domain);
4750 
4751 	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4752 		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4753 }
4754 
4755 static int intel_dp_get_modes(struct drm_connector *connector)
4756 {
4757 	struct intel_connector *intel_connector = to_intel_connector(connector);
4758 	struct edid *edid;
4759 
4760 	edid = intel_connector->detect_edid;
4761 	if (edid) {
4762 		int ret = intel_connector_update_modes(connector, edid);
4763 		if (ret)
4764 			return ret;
4765 	}
4766 
4767 	/* if eDP has no EDID, fall back to fixed mode */
4768 	if (is_edp(intel_attached_dp(connector)) &&
4769 	    intel_connector->panel.fixed_mode) {
4770 		struct drm_display_mode *mode;
4771 
4772 		mode = drm_mode_duplicate(connector->dev,
4773 					  intel_connector->panel.fixed_mode);
4774 		if (mode) {
4775 			drm_mode_probed_add(connector, mode);
4776 			return 1;
4777 		}
4778 	}
4779 
4780 	return 0;
4781 }
4782 
4783 static bool
4784 intel_dp_detect_audio(struct drm_connector *connector)
4785 {
4786 	bool has_audio = false;
4787 	struct edid *edid;
4788 
4789 	edid = to_intel_connector(connector)->detect_edid;
4790 	if (edid)
4791 		has_audio = drm_detect_monitor_audio(edid);
4792 
4793 	return has_audio;
4794 }
4795 
4796 static int
4797 intel_dp_set_property(struct drm_connector *connector,
4798 		      struct drm_property *property,
4799 		      uint64_t val)
4800 {
4801 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
4802 	struct intel_connector *intel_connector = to_intel_connector(connector);
4803 	struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4804 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4805 	int ret;
4806 
4807 	ret = drm_object_property_set_value(&connector->base, property, val);
4808 	if (ret)
4809 		return ret;
4810 
4811 	if (property == dev_priv->force_audio_property) {
4812 		int i = val;
4813 		bool has_audio;
4814 
4815 		if (i == intel_dp->force_audio)
4816 			return 0;
4817 
4818 		intel_dp->force_audio = i;
4819 
4820 		if (i == HDMI_AUDIO_AUTO)
4821 			has_audio = intel_dp_detect_audio(connector);
4822 		else
4823 			has_audio = (i == HDMI_AUDIO_ON);
4824 
4825 		if (has_audio == intel_dp->has_audio)
4826 			return 0;
4827 
4828 		intel_dp->has_audio = has_audio;
4829 		goto done;
4830 	}
4831 
4832 	if (property == dev_priv->broadcast_rgb_property) {
4833 		bool old_auto = intel_dp->color_range_auto;
4834 		bool old_range = intel_dp->limited_color_range;
4835 
4836 		switch (val) {
4837 		case INTEL_BROADCAST_RGB_AUTO:
4838 			intel_dp->color_range_auto = true;
4839 			break;
4840 		case INTEL_BROADCAST_RGB_FULL:
4841 			intel_dp->color_range_auto = false;
4842 			intel_dp->limited_color_range = false;
4843 			break;
4844 		case INTEL_BROADCAST_RGB_LIMITED:
4845 			intel_dp->color_range_auto = false;
4846 			intel_dp->limited_color_range = true;
4847 			break;
4848 		default:
4849 			return -EINVAL;
4850 		}
4851 
4852 		if (old_auto == intel_dp->color_range_auto &&
4853 		    old_range == intel_dp->limited_color_range)
4854 			return 0;
4855 
4856 		goto done;
4857 	}
4858 
4859 	if (is_edp(intel_dp) &&
4860 	    property == connector->dev->mode_config.scaling_mode_property) {
4861 		if (val == DRM_MODE_SCALE_NONE) {
4862 			DRM_DEBUG_KMS("no scaling not supported\n");
4863 			return -EINVAL;
4864 		}
4865 
4866 		if (intel_connector->panel.fitting_mode == val) {
4867 			/* the eDP scaling property is not changed */
4868 			return 0;
4869 		}
4870 		intel_connector->panel.fitting_mode = val;
4871 
4872 		goto done;
4873 	}
4874 
4875 	return -EINVAL;
4876 
4877 done:
4878 	if (intel_encoder->base.crtc)
4879 		intel_crtc_restore_mode(intel_encoder->base.crtc);
4880 
4881 	return 0;
4882 }
4883 
4884 static void
4885 intel_dp_connector_destroy(struct drm_connector *connector)
4886 {
4887 	struct intel_connector *intel_connector = to_intel_connector(connector);
4888 
4889 	kfree(intel_connector->detect_edid);
4890 
4891 	if (!IS_ERR_OR_NULL(intel_connector->edid))
4892 		kfree(intel_connector->edid);
4893 
4894 	/* Can't call is_edp() since the encoder may have been destroyed
4895 	 * already. */
4896 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4897 		intel_panel_fini(&intel_connector->panel);
4898 
4899 	drm_connector_cleanup(connector);
4900 	kfree(connector);
4901 }
4902 
4903 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4904 {
4905 	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4906 	struct intel_dp *intel_dp = &intel_dig_port->dp;
4907 
4908 	intel_dp_mst_encoder_cleanup(intel_dig_port);
4909 	if (is_edp(intel_dp)) {
4910 		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4911 		/*
4912 		 * vdd might still be enabled do to the delayed vdd off.
4913 		 * Make sure vdd is actually turned off here.
4914 		 */
4915 		pps_lock(intel_dp);
4916 		edp_panel_vdd_off_sync(intel_dp);
4917 		pps_unlock(intel_dp);
4918 
4919 #if 0
4920 		if (intel_dp->edp_notifier.notifier_call) {
4921 			unregister_reboot_notifier(&intel_dp->edp_notifier);
4922 			intel_dp->edp_notifier.notifier_call = NULL;
4923 		}
4924 #endif
4925 	}
4926 	drm_encoder_cleanup(encoder);
4927 	kfree(intel_dig_port);
4928 }
4929 
4930 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4931 {
4932 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4933 
4934 	if (!is_edp(intel_dp))
4935 		return;
4936 
4937 	/*
4938 	 * vdd might still be enabled do to the delayed vdd off.
4939 	 * Make sure vdd is actually turned off here.
4940 	 */
4941 	cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4942 	pps_lock(intel_dp);
4943 	edp_panel_vdd_off_sync(intel_dp);
4944 	pps_unlock(intel_dp);
4945 }
4946 
4947 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4948 {
4949 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4950 	struct drm_device *dev = intel_dig_port->base.base.dev;
4951 	struct drm_i915_private *dev_priv = dev->dev_private;
4952 	enum intel_display_power_domain power_domain;
4953 
4954 	lockdep_assert_held(&dev_priv->pps_mutex);
4955 
4956 	if (!edp_have_panel_vdd(intel_dp))
4957 		return;
4958 
4959 	/*
4960 	 * The VDD bit needs a power domain reference, so if the bit is
4961 	 * already enabled when we boot or resume, grab this reference and
4962 	 * schedule a vdd off, so we don't hold on to the reference
4963 	 * indefinitely.
4964 	 */
4965 	DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4966 	power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
4967 	intel_display_power_get(dev_priv, power_domain);
4968 
4969 	edp_panel_vdd_schedule_off(intel_dp);
4970 }
4971 
4972 void intel_dp_encoder_reset(struct drm_encoder *encoder)
4973 {
4974 	struct intel_dp *intel_dp;
4975 
4976 	if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4977 		return;
4978 
4979 	intel_dp = enc_to_intel_dp(encoder);
4980 
4981 	pps_lock(intel_dp);
4982 
4983 	/*
4984 	 * Read out the current power sequencer assignment,
4985 	 * in case the BIOS did something with it.
4986 	 */
4987 	if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev))
4988 		vlv_initial_power_sequencer_setup(intel_dp);
4989 
4990 	intel_edp_panel_vdd_sanitize(intel_dp);
4991 
4992 	pps_unlock(intel_dp);
4993 }
4994 
4995 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4996 	.dpms = drm_atomic_helper_connector_dpms,
4997 	.detect = intel_dp_detect,
4998 	.force = intel_dp_force,
4999 	.fill_modes = drm_helper_probe_single_connector_modes,
5000 	.set_property = intel_dp_set_property,
5001 	.atomic_get_property = intel_connector_atomic_get_property,
5002 	.destroy = intel_dp_connector_destroy,
5003 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5004 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
5005 };
5006 
5007 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5008 	.get_modes = intel_dp_get_modes,
5009 	.mode_valid = intel_dp_mode_valid,
5010 	.best_encoder = intel_best_encoder,
5011 };
5012 
5013 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5014 	.reset = intel_dp_encoder_reset,
5015 	.destroy = intel_dp_encoder_destroy,
5016 };
5017 
5018 bool
5019 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5020 {
5021 	struct intel_dp *intel_dp = &intel_dig_port->dp;
5022 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5023 	struct drm_device *dev = intel_dig_port->base.base.dev;
5024 	struct drm_i915_private *dev_priv = dev->dev_private;
5025 	enum intel_display_power_domain power_domain;
5026 	bool ret = true;
5027 
5028 	if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
5029 		intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
5030 
5031 	if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5032 		/*
5033 		 * vdd off can generate a long pulse on eDP which
5034 		 * would require vdd on to handle it, and thus we
5035 		 * would end up in an endless cycle of
5036 		 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5037 		 */
5038 		DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5039 			      port_name(intel_dig_port->port));
5040 		return false;
5041 	}
5042 
5043 	DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5044 		      port_name(intel_dig_port->port),
5045 		      long_hpd ? "long" : "short");
5046 
5047 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
5048 	intel_display_power_get(dev_priv, power_domain);
5049 
5050 	if (long_hpd) {
5051 		if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5052 			goto mst_fail;
5053 
5054 		if (!intel_dp_get_dpcd(intel_dp)) {
5055 			goto mst_fail;
5056 		}
5057 
5058 		intel_dp_probe_oui(intel_dp);
5059 
5060 		if (!intel_dp_probe_mst(intel_dp)) {
5061 			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5062 			intel_dp_check_link_status(intel_dp);
5063 			drm_modeset_unlock(&dev->mode_config.connection_mutex);
5064 			goto mst_fail;
5065 		}
5066 	} else {
5067 		if (intel_dp->is_mst) {
5068 			if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
5069 				goto mst_fail;
5070 		}
5071 
5072 		if (!intel_dp->is_mst) {
5073 			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5074 			intel_dp_check_link_status(intel_dp);
5075 			drm_modeset_unlock(&dev->mode_config.connection_mutex);
5076 		}
5077 	}
5078 
5079 	ret = false;
5080 
5081 	goto put_power;
5082 mst_fail:
5083 	/* if we were in MST mode, and device is not there get out of MST mode */
5084 	if (intel_dp->is_mst) {
5085 		DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5086 		intel_dp->is_mst = false;
5087 		drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5088 	}
5089 put_power:
5090 	intel_display_power_put(dev_priv, power_domain);
5091 
5092 	return ret;
5093 }
5094 
5095 /* check the VBT to see whether the eDP is on another port */
5096 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5097 {
5098 	struct drm_i915_private *dev_priv = dev->dev_private;
5099 	union child_device_config *p_child;
5100 	int i;
5101 	static const short port_mapping[] = {
5102 		[PORT_B] = DVO_PORT_DPB,
5103 		[PORT_C] = DVO_PORT_DPC,
5104 		[PORT_D] = DVO_PORT_DPD,
5105 		[PORT_E] = DVO_PORT_DPE,
5106 	};
5107 
5108 	/*
5109 	 * eDP not supported on g4x. so bail out early just
5110 	 * for a bit extra safety in case the VBT is bonkers.
5111 	 */
5112 	if (INTEL_INFO(dev)->gen < 5)
5113 		return false;
5114 
5115 	if (port == PORT_A)
5116 		return true;
5117 
5118 	if (!dev_priv->vbt.child_dev_num)
5119 		return false;
5120 
5121 	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5122 		p_child = dev_priv->vbt.child_dev + i;
5123 
5124 		if (p_child->common.dvo_port == port_mapping[port] &&
5125 		    (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5126 		    (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5127 			return true;
5128 	}
5129 	return false;
5130 }
5131 
5132 void
5133 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5134 {
5135 	struct intel_connector *intel_connector = to_intel_connector(connector);
5136 
5137 	intel_attach_force_audio_property(connector);
5138 	intel_attach_broadcast_rgb_property(connector);
5139 	intel_dp->color_range_auto = true;
5140 
5141 	if (is_edp(intel_dp)) {
5142 		drm_mode_create_scaling_mode_property(connector->dev);
5143 		drm_object_attach_property(
5144 			&connector->base,
5145 			connector->dev->mode_config.scaling_mode_property,
5146 			DRM_MODE_SCALE_ASPECT);
5147 		intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5148 	}
5149 }
5150 
5151 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5152 {
5153 	intel_dp->panel_power_off_time = ktime_get_boottime();
5154 	intel_dp->last_power_on = jiffies;
5155 	intel_dp->last_backlight_off = jiffies;
5156 }
5157 
5158 static void
5159 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5160 				    struct intel_dp *intel_dp)
5161 {
5162 	struct drm_i915_private *dev_priv = dev->dev_private;
5163 	struct edp_power_seq cur, vbt, spec,
5164 		*final = &intel_dp->pps_delays;
5165 	u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5166 	i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
5167 
5168 	lockdep_assert_held(&dev_priv->pps_mutex);
5169 
5170 	/* already initialized? */
5171 	if (final->t11_t12 != 0)
5172 		return;
5173 
5174 	if (IS_BROXTON(dev)) {
5175 		/*
5176 		 * TODO: BXT has 2 sets of PPS registers.
5177 		 * Correct Register for Broxton need to be identified
5178 		 * using VBT. hardcoding for now
5179 		 */
5180 		pp_ctrl_reg = BXT_PP_CONTROL(0);
5181 		pp_on_reg = BXT_PP_ON_DELAYS(0);
5182 		pp_off_reg = BXT_PP_OFF_DELAYS(0);
5183 	} else if (HAS_PCH_SPLIT(dev)) {
5184 		pp_ctrl_reg = PCH_PP_CONTROL;
5185 		pp_on_reg = PCH_PP_ON_DELAYS;
5186 		pp_off_reg = PCH_PP_OFF_DELAYS;
5187 		pp_div_reg = PCH_PP_DIVISOR;
5188 	} else {
5189 		enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5190 
5191 		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5192 		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5193 		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5194 		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5195 	}
5196 
5197 	/* Workaround: Need to write PP_CONTROL with the unlock key as
5198 	 * the very first thing. */
5199 	pp_ctl = ironlake_get_pp_control(intel_dp);
5200 
5201 	pp_on = I915_READ(pp_on_reg);
5202 	pp_off = I915_READ(pp_off_reg);
5203 	if (!IS_BROXTON(dev)) {
5204 		I915_WRITE(pp_ctrl_reg, pp_ctl);
5205 		pp_div = I915_READ(pp_div_reg);
5206 	}
5207 
5208 	/* Pull timing values out of registers */
5209 	cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5210 		PANEL_POWER_UP_DELAY_SHIFT;
5211 
5212 	cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5213 		PANEL_LIGHT_ON_DELAY_SHIFT;
5214 
5215 	cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5216 		PANEL_LIGHT_OFF_DELAY_SHIFT;
5217 
5218 	cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5219 		PANEL_POWER_DOWN_DELAY_SHIFT;
5220 
5221 	if (IS_BROXTON(dev)) {
5222 		u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5223 			BXT_POWER_CYCLE_DELAY_SHIFT;
5224 		if (tmp > 0)
5225 			cur.t11_t12 = (tmp - 1) * 1000;
5226 		else
5227 			cur.t11_t12 = 0;
5228 	} else {
5229 		cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5230 		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5231 	}
5232 
5233 	DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5234 		      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5235 
5236 	vbt = dev_priv->vbt.edp_pps;
5237 
5238 	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5239 	 * our hw here, which are all in 100usec. */
5240 	spec.t1_t3 = 210 * 10;
5241 	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5242 	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5243 	spec.t10 = 500 * 10;
5244 	/* This one is special and actually in units of 100ms, but zero
5245 	 * based in the hw (so we need to add 100 ms). But the sw vbt
5246 	 * table multiplies it with 1000 to make it in units of 100usec,
5247 	 * too. */
5248 	spec.t11_t12 = (510 + 100) * 10;
5249 
5250 	DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5251 		      vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5252 
5253 	/* Use the max of the register settings and vbt. If both are
5254 	 * unset, fall back to the spec limits. */
5255 #define assign_final(field)	final->field = (max(cur.field, vbt.field) == 0 ? \
5256 				       spec.field : \
5257 				       max(cur.field, vbt.field))
5258 	assign_final(t1_t3);
5259 	assign_final(t8);
5260 	assign_final(t9);
5261 	assign_final(t10);
5262 	assign_final(t11_t12);
5263 #undef assign_final
5264 
5265 #define get_delay(field)	(DIV_ROUND_UP(final->field, 10))
5266 	intel_dp->panel_power_up_delay = get_delay(t1_t3);
5267 	intel_dp->backlight_on_delay = get_delay(t8);
5268 	intel_dp->backlight_off_delay = get_delay(t9);
5269 	intel_dp->panel_power_down_delay = get_delay(t10);
5270 	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5271 #undef get_delay
5272 
5273 	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5274 		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5275 		      intel_dp->panel_power_cycle_delay);
5276 
5277 	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5278 		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5279 }
5280 
5281 static void
5282 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5283 					      struct intel_dp *intel_dp)
5284 {
5285 	struct drm_i915_private *dev_priv = dev->dev_private;
5286 	u32 pp_on, pp_off, pp_div, port_sel = 0;
5287 	int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5288 	i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
5289 	enum port port = dp_to_dig_port(intel_dp)->port;
5290 	const struct edp_power_seq *seq = &intel_dp->pps_delays;
5291 
5292 	lockdep_assert_held(&dev_priv->pps_mutex);
5293 
5294 	if (IS_BROXTON(dev)) {
5295 		/*
5296 		 * TODO: BXT has 2 sets of PPS registers.
5297 		 * Correct Register for Broxton need to be identified
5298 		 * using VBT. hardcoding for now
5299 		 */
5300 		pp_ctrl_reg = BXT_PP_CONTROL(0);
5301 		pp_on_reg = BXT_PP_ON_DELAYS(0);
5302 		pp_off_reg = BXT_PP_OFF_DELAYS(0);
5303 
5304 	} else if (HAS_PCH_SPLIT(dev)) {
5305 		pp_on_reg = PCH_PP_ON_DELAYS;
5306 		pp_off_reg = PCH_PP_OFF_DELAYS;
5307 		pp_div_reg = PCH_PP_DIVISOR;
5308 	} else {
5309 		enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5310 
5311 		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5312 		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5313 		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5314 	}
5315 
5316 	/*
5317 	 * And finally store the new values in the power sequencer. The
5318 	 * backlight delays are set to 1 because we do manual waits on them. For
5319 	 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5320 	 * we'll end up waiting for the backlight off delay twice: once when we
5321 	 * do the manual sleep, and once when we disable the panel and wait for
5322 	 * the PP_STATUS bit to become zero.
5323 	 */
5324 	pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5325 		(1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5326 	pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5327 		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5328 	/* Compute the divisor for the pp clock, simply match the Bspec
5329 	 * formula. */
5330 	if (IS_BROXTON(dev)) {
5331 		pp_div = I915_READ(pp_ctrl_reg);
5332 		pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5333 		pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5334 				<< BXT_POWER_CYCLE_DELAY_SHIFT);
5335 	} else {
5336 		pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5337 		pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5338 				<< PANEL_POWER_CYCLE_DELAY_SHIFT);
5339 	}
5340 
5341 	/* Haswell doesn't have any port selection bits for the panel
5342 	 * power sequencer any more. */
5343 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5344 		port_sel = PANEL_PORT_SELECT_VLV(port);
5345 	} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5346 		if (port == PORT_A)
5347 			port_sel = PANEL_PORT_SELECT_DPA;
5348 		else
5349 			port_sel = PANEL_PORT_SELECT_DPD;
5350 	}
5351 
5352 	pp_on |= port_sel;
5353 
5354 	I915_WRITE(pp_on_reg, pp_on);
5355 	I915_WRITE(pp_off_reg, pp_off);
5356 	if (IS_BROXTON(dev))
5357 		I915_WRITE(pp_ctrl_reg, pp_div);
5358 	else
5359 		I915_WRITE(pp_div_reg, pp_div);
5360 
5361 	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5362 		      I915_READ(pp_on_reg),
5363 		      I915_READ(pp_off_reg),
5364 		      IS_BROXTON(dev) ?
5365 		      (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5366 		      I915_READ(pp_div_reg));
5367 }
5368 
5369 /**
5370  * intel_dp_set_drrs_state - program registers for RR switch to take effect
5371  * @dev: DRM device
5372  * @refresh_rate: RR to be programmed
5373  *
5374  * This function gets called when refresh rate (RR) has to be changed from
5375  * one frequency to another. Switches can be between high and low RR
5376  * supported by the panel or to any other RR based on media playback (in
5377  * this case, RR value needs to be passed from user space).
5378  *
5379  * The caller of this function needs to take a lock on dev_priv->drrs.
5380  */
5381 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5382 {
5383 	struct drm_i915_private *dev_priv = dev->dev_private;
5384 	struct intel_encoder *encoder;
5385 	struct intel_digital_port *dig_port = NULL;
5386 	struct intel_dp *intel_dp = dev_priv->drrs.dp;
5387 	struct intel_crtc_state *config = NULL;
5388 	struct intel_crtc *intel_crtc = NULL;
5389 	enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5390 
5391 	if (refresh_rate <= 0) {
5392 		DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5393 		return;
5394 	}
5395 
5396 	if (intel_dp == NULL) {
5397 		DRM_DEBUG_KMS("DRRS not supported.\n");
5398 		return;
5399 	}
5400 
5401 	/*
5402 	 * FIXME: This needs proper synchronization with psr state for some
5403 	 * platforms that cannot have PSR and DRRS enabled at the same time.
5404 	 */
5405 
5406 	dig_port = dp_to_dig_port(intel_dp);
5407 	encoder = &dig_port->base;
5408 	intel_crtc = to_intel_crtc(encoder->base.crtc);
5409 
5410 	if (!intel_crtc) {
5411 		DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5412 		return;
5413 	}
5414 
5415 	config = intel_crtc->config;
5416 
5417 	if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5418 		DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5419 		return;
5420 	}
5421 
5422 	if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5423 			refresh_rate)
5424 		index = DRRS_LOW_RR;
5425 
5426 	if (index == dev_priv->drrs.refresh_rate_type) {
5427 		DRM_DEBUG_KMS(
5428 			"DRRS requested for previously set RR...ignoring\n");
5429 		return;
5430 	}
5431 
5432 	if (!intel_crtc->active) {
5433 		DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5434 		return;
5435 	}
5436 
5437 	if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5438 		switch (index) {
5439 		case DRRS_HIGH_RR:
5440 			intel_dp_set_m_n(intel_crtc, M1_N1);
5441 			break;
5442 		case DRRS_LOW_RR:
5443 			intel_dp_set_m_n(intel_crtc, M2_N2);
5444 			break;
5445 		case DRRS_MAX_RR:
5446 		default:
5447 			DRM_ERROR("Unsupported refreshrate type\n");
5448 		}
5449 	} else if (INTEL_INFO(dev)->gen > 6) {
5450 		i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5451 		u32 val;
5452 
5453 		val = I915_READ(reg);
5454 		if (index > DRRS_HIGH_RR) {
5455 			if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5456 				val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5457 			else
5458 				val |= PIPECONF_EDP_RR_MODE_SWITCH;
5459 		} else {
5460 			if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5461 				val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5462 			else
5463 				val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5464 		}
5465 		I915_WRITE(reg, val);
5466 	}
5467 
5468 	dev_priv->drrs.refresh_rate_type = index;
5469 
5470 	DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5471 }
5472 
5473 /**
5474  * intel_edp_drrs_enable - init drrs struct if supported
5475  * @intel_dp: DP struct
5476  *
5477  * Initializes frontbuffer_bits and drrs.dp
5478  */
5479 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5480 {
5481 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5482 	struct drm_i915_private *dev_priv = dev->dev_private;
5483 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5484 	struct drm_crtc *crtc = dig_port->base.base.crtc;
5485 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5486 
5487 	if (!intel_crtc->config->has_drrs) {
5488 		DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5489 		return;
5490 	}
5491 
5492 	mutex_lock(&dev_priv->drrs.mutex);
5493 	if (WARN_ON(dev_priv->drrs.dp)) {
5494 		DRM_ERROR("DRRS already enabled\n");
5495 		goto unlock;
5496 	}
5497 
5498 	dev_priv->drrs.busy_frontbuffer_bits = 0;
5499 
5500 	dev_priv->drrs.dp = intel_dp;
5501 
5502 unlock:
5503 	mutex_unlock(&dev_priv->drrs.mutex);
5504 }
5505 
5506 /**
5507  * intel_edp_drrs_disable - Disable DRRS
5508  * @intel_dp: DP struct
5509  *
5510  */
5511 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5512 {
5513 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5514 	struct drm_i915_private *dev_priv = dev->dev_private;
5515 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5516 	struct drm_crtc *crtc = dig_port->base.base.crtc;
5517 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5518 
5519 	if (!intel_crtc->config->has_drrs)
5520 		return;
5521 
5522 	mutex_lock(&dev_priv->drrs.mutex);
5523 	if (!dev_priv->drrs.dp) {
5524 		mutex_unlock(&dev_priv->drrs.mutex);
5525 		return;
5526 	}
5527 
5528 	if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5529 		intel_dp_set_drrs_state(dev_priv->dev,
5530 			intel_dp->attached_connector->panel.
5531 			fixed_mode->vrefresh);
5532 
5533 	dev_priv->drrs.dp = NULL;
5534 	mutex_unlock(&dev_priv->drrs.mutex);
5535 
5536 	cancel_delayed_work_sync(&dev_priv->drrs.work);
5537 }
5538 
5539 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5540 {
5541 	struct drm_i915_private *dev_priv =
5542 		container_of(work, typeof(*dev_priv), drrs.work.work);
5543 	struct intel_dp *intel_dp;
5544 
5545 	mutex_lock(&dev_priv->drrs.mutex);
5546 
5547 	intel_dp = dev_priv->drrs.dp;
5548 
5549 	if (!intel_dp)
5550 		goto unlock;
5551 
5552 	/*
5553 	 * The delayed work can race with an invalidate hence we need to
5554 	 * recheck.
5555 	 */
5556 
5557 	if (dev_priv->drrs.busy_frontbuffer_bits)
5558 		goto unlock;
5559 
5560 	if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5561 		intel_dp_set_drrs_state(dev_priv->dev,
5562 			intel_dp->attached_connector->panel.
5563 			downclock_mode->vrefresh);
5564 
5565 unlock:
5566 	mutex_unlock(&dev_priv->drrs.mutex);
5567 }
5568 
5569 /**
5570  * intel_edp_drrs_invalidate - Disable Idleness DRRS
5571  * @dev: DRM device
5572  * @frontbuffer_bits: frontbuffer plane tracking bits
5573  *
5574  * This function gets called everytime rendering on the given planes start.
5575  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5576  *
5577  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5578  */
5579 void intel_edp_drrs_invalidate(struct drm_device *dev,
5580 		unsigned frontbuffer_bits)
5581 {
5582 	struct drm_i915_private *dev_priv = dev->dev_private;
5583 	struct drm_crtc *crtc;
5584 	enum i915_pipe pipe;
5585 
5586 	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5587 		return;
5588 
5589 	cancel_delayed_work(&dev_priv->drrs.work);
5590 
5591 	mutex_lock(&dev_priv->drrs.mutex);
5592 	if (!dev_priv->drrs.dp) {
5593 		mutex_unlock(&dev_priv->drrs.mutex);
5594 		return;
5595 	}
5596 
5597 	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5598 	pipe = to_intel_crtc(crtc)->pipe;
5599 
5600 	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5601 	dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5602 
5603 	/* invalidate means busy screen hence upclock */
5604 	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5605 		intel_dp_set_drrs_state(dev_priv->dev,
5606 				dev_priv->drrs.dp->attached_connector->panel.
5607 				fixed_mode->vrefresh);
5608 
5609 	mutex_unlock(&dev_priv->drrs.mutex);
5610 }
5611 
5612 /**
5613  * intel_edp_drrs_flush - Restart Idleness DRRS
5614  * @dev: DRM device
5615  * @frontbuffer_bits: frontbuffer plane tracking bits
5616  *
5617  * This function gets called every time rendering on the given planes has
5618  * completed or flip on a crtc is completed. So DRRS should be upclocked
5619  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5620  * if no other planes are dirty.
5621  *
5622  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5623  */
5624 void intel_edp_drrs_flush(struct drm_device *dev,
5625 		unsigned frontbuffer_bits)
5626 {
5627 	struct drm_i915_private *dev_priv = dev->dev_private;
5628 	struct drm_crtc *crtc;
5629 	enum i915_pipe pipe;
5630 
5631 	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5632 		return;
5633 
5634 	cancel_delayed_work(&dev_priv->drrs.work);
5635 
5636 	mutex_lock(&dev_priv->drrs.mutex);
5637 	if (!dev_priv->drrs.dp) {
5638 		mutex_unlock(&dev_priv->drrs.mutex);
5639 		return;
5640 	}
5641 
5642 	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5643 	pipe = to_intel_crtc(crtc)->pipe;
5644 
5645 	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5646 	dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5647 
5648 	/* flush means busy screen hence upclock */
5649 	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5650 		intel_dp_set_drrs_state(dev_priv->dev,
5651 				dev_priv->drrs.dp->attached_connector->panel.
5652 				fixed_mode->vrefresh);
5653 
5654 	/*
5655 	 * flush also means no more activity hence schedule downclock, if all
5656 	 * other fbs are quiescent too
5657 	 */
5658 	if (!dev_priv->drrs.busy_frontbuffer_bits)
5659 		schedule_delayed_work(&dev_priv->drrs.work,
5660 				msecs_to_jiffies(1000));
5661 	mutex_unlock(&dev_priv->drrs.mutex);
5662 }
5663 
5664 /**
5665  * DOC: Display Refresh Rate Switching (DRRS)
5666  *
5667  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5668  * which enables swtching between low and high refresh rates,
5669  * dynamically, based on the usage scenario. This feature is applicable
5670  * for internal panels.
5671  *
5672  * Indication that the panel supports DRRS is given by the panel EDID, which
5673  * would list multiple refresh rates for one resolution.
5674  *
5675  * DRRS is of 2 types - static and seamless.
5676  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5677  * (may appear as a blink on screen) and is used in dock-undock scenario.
5678  * Seamless DRRS involves changing RR without any visual effect to the user
5679  * and can be used during normal system usage. This is done by programming
5680  * certain registers.
5681  *
5682  * Support for static/seamless DRRS may be indicated in the VBT based on
5683  * inputs from the panel spec.
5684  *
5685  * DRRS saves power by switching to low RR based on usage scenarios.
5686  *
5687  * eDP DRRS:-
5688  *        The implementation is based on frontbuffer tracking implementation.
5689  * When there is a disturbance on the screen triggered by user activity or a
5690  * periodic system activity, DRRS is disabled (RR is changed to high RR).
5691  * When there is no movement on screen, after a timeout of 1 second, a switch
5692  * to low RR is made.
5693  *        For integration with frontbuffer tracking code,
5694  * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5695  *
5696  * DRRS can be further extended to support other internal panels and also
5697  * the scenario of video playback wherein RR is set based on the rate
5698  * requested by userspace.
5699  */
5700 
5701 /**
5702  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5703  * @intel_connector: eDP connector
5704  * @fixed_mode: preferred mode of panel
5705  *
5706  * This function is  called only once at driver load to initialize basic
5707  * DRRS stuff.
5708  *
5709  * Returns:
5710  * Downclock mode if panel supports it, else return NULL.
5711  * DRRS support is determined by the presence of downclock mode (apart
5712  * from VBT setting).
5713  */
5714 static struct drm_display_mode *
5715 intel_dp_drrs_init(struct intel_connector *intel_connector,
5716 		struct drm_display_mode *fixed_mode)
5717 {
5718 	struct drm_connector *connector = &intel_connector->base;
5719 	struct drm_device *dev = connector->dev;
5720 	struct drm_i915_private *dev_priv = dev->dev_private;
5721 	struct drm_display_mode *downclock_mode = NULL;
5722 
5723 	INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5724 	lockinit(&dev_priv->drrs.mutex, "i915dm", 0, LK_CANRECURSE);
5725 
5726 	if (INTEL_INFO(dev)->gen <= 6) {
5727 		DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5728 		return NULL;
5729 	}
5730 
5731 	if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5732 		DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5733 		return NULL;
5734 	}
5735 
5736 	downclock_mode = intel_find_panel_downclock
5737 					(dev, fixed_mode, connector);
5738 
5739 	if (!downclock_mode) {
5740 		DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5741 		return NULL;
5742 	}
5743 
5744 	dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5745 
5746 	dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5747 	DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5748 	return downclock_mode;
5749 }
5750 
5751 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5752 				     struct intel_connector *intel_connector)
5753 {
5754 	struct drm_connector *connector = &intel_connector->base;
5755 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5756 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5757 	struct drm_device *dev = intel_encoder->base.dev;
5758 	struct drm_i915_private *dev_priv = dev->dev_private;
5759 	struct drm_display_mode *fixed_mode = NULL;
5760 	struct drm_display_mode *downclock_mode = NULL;
5761 	bool has_dpcd;
5762 	struct drm_display_mode *scan;
5763 	struct edid *edid;
5764 	enum i915_pipe pipe = INVALID_PIPE;
5765 
5766 	if (!is_edp(intel_dp))
5767 		return true;
5768 
5769 	pps_lock(intel_dp);
5770 	intel_edp_panel_vdd_sanitize(intel_dp);
5771 	pps_unlock(intel_dp);
5772 
5773 	/* Cache DPCD and EDID for edp. */
5774 	has_dpcd = intel_dp_get_dpcd(intel_dp);
5775 
5776 	if (has_dpcd) {
5777 		if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5778 			dev_priv->no_aux_handshake =
5779 				intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5780 				DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5781 	} else {
5782 		/* if this fails, presume the device is a ghost */
5783 		DRM_INFO("failed to retrieve link info, disabling eDP\n");
5784 		return false;
5785 	}
5786 
5787 	/* We now know it's not a ghost, init power sequence regs. */
5788 	pps_lock(intel_dp);
5789 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5790 	pps_unlock(intel_dp);
5791 
5792 	mutex_lock(&dev->mode_config.mutex);
5793 	edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5794 	if (edid) {
5795 		if (drm_add_edid_modes(connector, edid)) {
5796 			drm_mode_connector_update_edid_property(connector,
5797 								edid);
5798 			drm_edid_to_eld(connector, edid);
5799 		} else {
5800 			kfree(edid);
5801 			edid = ERR_PTR(-EINVAL);
5802 		}
5803 	} else {
5804 		edid = ERR_PTR(-ENOENT);
5805 	}
5806 	intel_connector->edid = edid;
5807 
5808 	/* prefer fixed mode from EDID if available */
5809 	list_for_each_entry(scan, &connector->probed_modes, head) {
5810 		if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5811 			fixed_mode = drm_mode_duplicate(dev, scan);
5812 			downclock_mode = intel_dp_drrs_init(
5813 						intel_connector, fixed_mode);
5814 			break;
5815 		}
5816 	}
5817 
5818 	/* fallback to VBT if available for eDP */
5819 	if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5820 		fixed_mode = drm_mode_duplicate(dev,
5821 					dev_priv->vbt.lfp_lvds_vbt_mode);
5822 		if (fixed_mode)
5823 			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5824 	}
5825 	mutex_unlock(&dev->mode_config.mutex);
5826 
5827 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5828 		intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5829 #if 0
5830 		register_reboot_notifier(&intel_dp->edp_notifier);
5831 #endif
5832 
5833 		/*
5834 		 * Figure out the current pipe for the initial backlight setup.
5835 		 * If the current pipe isn't valid, try the PPS pipe, and if that
5836 		 * fails just assume pipe A.
5837 		 */
5838 		if (IS_CHERRYVIEW(dev))
5839 			pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5840 		else
5841 			pipe = PORT_TO_PIPE(intel_dp->DP);
5842 
5843 		if (pipe != PIPE_A && pipe != PIPE_B)
5844 			pipe = intel_dp->pps_pipe;
5845 
5846 		if (pipe != PIPE_A && pipe != PIPE_B)
5847 			pipe = PIPE_A;
5848 
5849 		DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5850 			      pipe_name(pipe));
5851 	}
5852 
5853 	intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5854 	intel_connector->panel.backlight.power = intel_edp_backlight_power;
5855 	intel_panel_setup_backlight(connector, pipe);
5856 
5857 	return true;
5858 }
5859 
5860 bool
5861 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5862 			struct intel_connector *intel_connector)
5863 {
5864 	struct drm_connector *connector = &intel_connector->base;
5865 	struct intel_dp *intel_dp = &intel_dig_port->dp;
5866 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5867 	struct drm_device *dev = intel_encoder->base.dev;
5868 	struct drm_i915_private *dev_priv = dev->dev_private;
5869 	enum port port = intel_dig_port->port;
5870 	int type, ret;
5871 
5872 	if (WARN(intel_dig_port->max_lanes < 1,
5873 		 "Not enough lanes (%d) for DP on port %c\n",
5874 		 intel_dig_port->max_lanes, port_name(port)))
5875 		return false;
5876 
5877 	intel_dp->pps_pipe = INVALID_PIPE;
5878 
5879 	/* intel_dp vfuncs */
5880 	if (INTEL_INFO(dev)->gen >= 9)
5881 		intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5882 	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5883 		intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5884 	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5885 		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5886 	else if (HAS_PCH_SPLIT(dev))
5887 		intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5888 	else
5889 		intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5890 
5891 	if (INTEL_INFO(dev)->gen >= 9)
5892 		intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5893 	else
5894 		intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5895 
5896 	if (HAS_DDI(dev))
5897 		intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5898 
5899 	/* Preserve the current hw state. */
5900 	intel_dp->DP = I915_READ(intel_dp->output_reg);
5901 	intel_dp->attached_connector = intel_connector;
5902 
5903 	if (intel_dp_is_edp(dev, port))
5904 		type = DRM_MODE_CONNECTOR_eDP;
5905 	else
5906 		type = DRM_MODE_CONNECTOR_DisplayPort;
5907 
5908 	/*
5909 	 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5910 	 * for DP the encoder type can be set by the caller to
5911 	 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5912 	 */
5913 	if (type == DRM_MODE_CONNECTOR_eDP)
5914 		intel_encoder->type = INTEL_OUTPUT_EDP;
5915 
5916 	/* eDP only on port B and/or C on vlv/chv */
5917 	if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
5918 		    is_edp(intel_dp) && port != PORT_B && port != PORT_C))
5919 		return false;
5920 
5921 	DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5922 			type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5923 			port_name(port));
5924 
5925 	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5926 	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5927 
5928 	connector->interlace_allowed = true;
5929 	connector->doublescan_allowed = 0;
5930 
5931 	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5932 			  edp_panel_vdd_work);
5933 
5934 	intel_connector_attach_encoder(intel_connector, intel_encoder);
5935 	drm_connector_register(connector);
5936 
5937 	if (HAS_DDI(dev))
5938 		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5939 	else
5940 		intel_connector->get_hw_state = intel_connector_get_hw_state;
5941 	intel_connector->unregister = intel_dp_connector_unregister;
5942 
5943 	/* Set up the hotplug pin. */
5944 	switch (port) {
5945 	case PORT_A:
5946 		intel_encoder->hpd_pin = HPD_PORT_A;
5947 		break;
5948 	case PORT_B:
5949 		intel_encoder->hpd_pin = HPD_PORT_B;
5950 		if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
5951 			intel_encoder->hpd_pin = HPD_PORT_A;
5952 		break;
5953 	case PORT_C:
5954 		intel_encoder->hpd_pin = HPD_PORT_C;
5955 		break;
5956 	case PORT_D:
5957 		intel_encoder->hpd_pin = HPD_PORT_D;
5958 		break;
5959 	case PORT_E:
5960 		intel_encoder->hpd_pin = HPD_PORT_E;
5961 		break;
5962 	default:
5963 		BUG();
5964 	}
5965 
5966 	if (is_edp(intel_dp)) {
5967 		pps_lock(intel_dp);
5968 		intel_dp_init_panel_power_timestamps(intel_dp);
5969 		if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5970 			vlv_initial_power_sequencer_setup(intel_dp);
5971 		else
5972 			intel_dp_init_panel_power_sequencer(dev, intel_dp);
5973 		pps_unlock(intel_dp);
5974 	}
5975 
5976 	ret = intel_dp_aux_init(intel_dp, intel_connector);
5977 	if (ret)
5978 		goto fail;
5979 
5980 	/* init MST on ports that can support it */
5981 	if (HAS_DP_MST(dev) &&
5982 	    (port == PORT_B || port == PORT_C || port == PORT_D))
5983 		intel_dp_mst_encoder_init(intel_dig_port,
5984 					  intel_connector->base.base.id);
5985 
5986 	if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5987 		intel_dp_aux_fini(intel_dp);
5988 		intel_dp_mst_encoder_cleanup(intel_dig_port);
5989 		goto fail;
5990 	}
5991 
5992 	intel_dp_add_properties(intel_dp, connector);
5993 
5994 	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5995 	 * 0xd.  Failure to do so will result in spurious interrupts being
5996 	 * generated on the port when a cable is not attached.
5997 	 */
5998 	if (IS_G4X(dev) && !IS_GM45(dev)) {
5999 		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6000 		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6001 	}
6002 
6003 	i915_debugfs_connector_add(connector);
6004 
6005 	return true;
6006 
6007 fail:
6008 	if (is_edp(intel_dp)) {
6009 		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
6010 		/*
6011 		 * vdd might still be enabled do to the delayed vdd off.
6012 		 * Make sure vdd is actually turned off here.
6013 		 */
6014 		pps_lock(intel_dp);
6015 		edp_panel_vdd_off_sync(intel_dp);
6016 		pps_unlock(intel_dp);
6017 	}
6018 	drm_connector_unregister(connector);
6019 	drm_connector_cleanup(connector);
6020 
6021 	return false;
6022 }
6023 
6024 void
6025 intel_dp_init(struct drm_device *dev,
6026 	      i915_reg_t output_reg, enum port port)
6027 {
6028 	struct drm_i915_private *dev_priv = dev->dev_private;
6029 	struct intel_digital_port *intel_dig_port;
6030 	struct intel_encoder *intel_encoder;
6031 	struct drm_encoder *encoder;
6032 	struct intel_connector *intel_connector;
6033 
6034 	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6035 	if (!intel_dig_port)
6036 		return;
6037 
6038 	intel_connector = intel_connector_alloc();
6039 	if (!intel_connector)
6040 		goto err_connector_alloc;
6041 
6042 	intel_encoder = &intel_dig_port->base;
6043 	encoder = &intel_encoder->base;
6044 
6045 	drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6046 			 DRM_MODE_ENCODER_TMDS, NULL);
6047 
6048 	intel_encoder->compute_config = intel_dp_compute_config;
6049 	intel_encoder->disable = intel_disable_dp;
6050 	intel_encoder->get_hw_state = intel_dp_get_hw_state;
6051 	intel_encoder->get_config = intel_dp_get_config;
6052 	intel_encoder->suspend = intel_dp_encoder_suspend;
6053 	if (IS_CHERRYVIEW(dev)) {
6054 		intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6055 		intel_encoder->pre_enable = chv_pre_enable_dp;
6056 		intel_encoder->enable = vlv_enable_dp;
6057 		intel_encoder->post_disable = chv_post_disable_dp;
6058 		intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6059 	} else if (IS_VALLEYVIEW(dev)) {
6060 		intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6061 		intel_encoder->pre_enable = vlv_pre_enable_dp;
6062 		intel_encoder->enable = vlv_enable_dp;
6063 		intel_encoder->post_disable = vlv_post_disable_dp;
6064 	} else {
6065 		intel_encoder->pre_enable = g4x_pre_enable_dp;
6066 		intel_encoder->enable = g4x_enable_dp;
6067 		if (INTEL_INFO(dev)->gen >= 5)
6068 			intel_encoder->post_disable = ilk_post_disable_dp;
6069 	}
6070 
6071 	intel_dig_port->port = port;
6072 	intel_dig_port->dp.output_reg = output_reg;
6073 	intel_dig_port->max_lanes = 4;
6074 
6075 	intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
6076 	if (IS_CHERRYVIEW(dev)) {
6077 		if (port == PORT_D)
6078 			intel_encoder->crtc_mask = 1 << 2;
6079 		else
6080 			intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6081 	} else {
6082 		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6083 	}
6084 	intel_encoder->cloneable = 0;
6085 
6086 	intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6087 	dev_priv->hotplug.irq_port[port] = intel_dig_port;
6088 
6089 	if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6090 		goto err_init_connector;
6091 
6092 	return;
6093 
6094 err_init_connector:
6095 	drm_encoder_cleanup(encoder);
6096 	kfree(intel_connector);
6097 err_connector_alloc:
6098 	kfree(intel_dig_port);
6099 
6100 	return;
6101 }
6102 
6103 #if 0
6104 void intel_dp_mst_suspend(struct drm_device *dev)
6105 {
6106 	struct drm_i915_private *dev_priv = dev->dev_private;
6107 	int i;
6108 
6109 	/* disable MST */
6110 	for (i = 0; i < I915_MAX_PORTS; i++) {
6111 		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6112 		if (!intel_dig_port)
6113 			continue;
6114 
6115 		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6116 			if (!intel_dig_port->dp.can_mst)
6117 				continue;
6118 			if (intel_dig_port->dp.is_mst)
6119 				drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6120 		}
6121 	}
6122 }
6123 #endif
6124 
6125 void intel_dp_mst_resume(struct drm_device *dev)
6126 {
6127 	struct drm_i915_private *dev_priv = dev->dev_private;
6128 	int i;
6129 
6130 	for (i = 0; i < I915_MAX_PORTS; i++) {
6131 		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6132 		if (!intel_dig_port)
6133 			continue;
6134 		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6135 			int ret;
6136 
6137 			if (!intel_dig_port->dp.can_mst)
6138 				continue;
6139 
6140 			ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6141 			if (ret != 0) {
6142 				intel_dp_check_mst_status(&intel_dig_port->dp);
6143 			}
6144 		}
6145 	}
6146 }
6147