xref: /dragonfly/sys/dev/drm/i915/intel_dp.c (revision 38b5d46c)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27 
28 #include <linux/i2c.h>
29 #include <linux/export.h>
30 #include <linux/notifier.h>
31 #include <drm/drmP.h>
32 #include <linux/slab.h>
33 #include <drm/drm_atomic_helper.h>
34 #include <drm/drm_crtc.h>
35 #include <drm/drm_crtc_helper.h>
36 #include <drm/drm_edid.h>
37 #include "intel_drv.h"
38 #include <drm/i915_drm.h>
39 #include "i915_drv.h"
40 
41 #define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
42 
43 static int disable_aux_irq = 0;
44 TUNABLE_INT("drm.i915.disable_aux_irq", &disable_aux_irq);
45 
46 /* Compliance test status bits  */
47 #define INTEL_DP_RESOLUTION_SHIFT_MASK	0
48 #define INTEL_DP_RESOLUTION_PREFERRED	(1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49 #define INTEL_DP_RESOLUTION_STANDARD	(2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
50 #define INTEL_DP_RESOLUTION_FAILSAFE	(3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
51 
52 struct dp_link_dpll {
53 	int clock;
54 	struct dpll dpll;
55 };
56 
57 static const struct dp_link_dpll gen4_dpll[] = {
58 	{ 162000,
59 		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
60 	{ 270000,
61 		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
62 };
63 
64 static const struct dp_link_dpll pch_dpll[] = {
65 	{ 162000,
66 		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
67 	{ 270000,
68 		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
69 };
70 
71 static const struct dp_link_dpll vlv_dpll[] = {
72 	{ 162000,
73 		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
74 	{ 270000,
75 		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
76 };
77 
78 /*
79  * CHV supports eDP 1.4 that have  more link rates.
80  * Below only provides the fixed rate but exclude variable rate.
81  */
82 static const struct dp_link_dpll chv_dpll[] = {
83 	/*
84 	 * CHV requires to program fractional division for m2.
85 	 * m2 is stored in fixed point format using formula below
86 	 * (m2_int << 22) | m2_fraction
87 	 */
88 	{ 162000,	/* m2_int = 32, m2_fraction = 1677722 */
89 		{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
90 	{ 270000,	/* m2_int = 27, m2_fraction = 0 */
91 		{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
92 	{ 540000,	/* m2_int = 27, m2_fraction = 0 */
93 		{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
94 };
95 
96 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
97 				  324000, 432000, 540000 };
98 static const int skl_rates[] = { 162000, 216000, 270000,
99 				  324000, 432000, 540000 };
100 static const int default_rates[] = { 162000, 270000, 540000 };
101 
102 /**
103  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
104  * @intel_dp: DP struct
105  *
106  * If a CPU or PCH DP output is attached to an eDP panel, this function
107  * will return true, and false otherwise.
108  */
109 static bool is_edp(struct intel_dp *intel_dp)
110 {
111 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
112 
113 	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
114 }
115 
116 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
117 {
118 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
119 
120 	return intel_dig_port->base.base.dev;
121 }
122 
123 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
124 {
125 	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
126 }
127 
128 static void intel_dp_link_down(struct intel_dp *intel_dp);
129 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
130 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
131 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
132 static void vlv_steal_power_sequencer(struct drm_device *dev,
133 				      enum i915_pipe pipe);
134 
135 static unsigned int intel_dp_unused_lane_mask(int lane_count)
136 {
137 	return ~((1 << lane_count) - 1) & 0xf;
138 }
139 
140 static int
141 intel_dp_max_link_bw(struct intel_dp  *intel_dp)
142 {
143 	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
144 
145 	switch (max_link_bw) {
146 	case DP_LINK_BW_1_62:
147 	case DP_LINK_BW_2_7:
148 	case DP_LINK_BW_5_4:
149 		break;
150 	default:
151 		WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
152 		     max_link_bw);
153 		max_link_bw = DP_LINK_BW_1_62;
154 		break;
155 	}
156 	return max_link_bw;
157 }
158 
159 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
160 {
161 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
162 	struct drm_device *dev = intel_dig_port->base.base.dev;
163 	u8 source_max, sink_max;
164 
165 	source_max = 4;
166 	if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
167 	    (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
168 		source_max = 2;
169 
170 	sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
171 
172 	return min(source_max, sink_max);
173 }
174 
175 /*
176  * The units on the numbers in the next two are... bizarre.  Examples will
177  * make it clearer; this one parallels an example in the eDP spec.
178  *
179  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
180  *
181  *     270000 * 1 * 8 / 10 == 216000
182  *
183  * The actual data capacity of that configuration is 2.16Gbit/s, so the
184  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
185  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
186  * 119000.  At 18bpp that's 2142000 kilobits per second.
187  *
188  * Thus the strange-looking division by 10 in intel_dp_link_required, to
189  * get the result in decakilobits instead of kilobits.
190  */
191 
192 static int
193 intel_dp_link_required(int pixel_clock, int bpp)
194 {
195 	return (pixel_clock * bpp + 9) / 10;
196 }
197 
198 static int
199 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
200 {
201 	return (max_link_clock * max_lanes * 8) / 10;
202 }
203 
204 static enum drm_mode_status
205 intel_dp_mode_valid(struct drm_connector *connector,
206 		    struct drm_display_mode *mode)
207 {
208 	struct intel_dp *intel_dp = intel_attached_dp(connector);
209 	struct intel_connector *intel_connector = to_intel_connector(connector);
210 	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
211 	int target_clock = mode->clock;
212 	int max_rate, mode_rate, max_lanes, max_link_clock;
213 
214 	if (is_edp(intel_dp) && fixed_mode) {
215 		if (mode->hdisplay > fixed_mode->hdisplay)
216 			return MODE_PANEL;
217 
218 		if (mode->vdisplay > fixed_mode->vdisplay)
219 			return MODE_PANEL;
220 
221 		target_clock = fixed_mode->clock;
222 	}
223 
224 	max_link_clock = intel_dp_max_link_rate(intel_dp);
225 	max_lanes = intel_dp_max_lane_count(intel_dp);
226 
227 	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
228 	mode_rate = intel_dp_link_required(target_clock, 18);
229 
230 	if (mode_rate > max_rate)
231 		return MODE_CLOCK_HIGH;
232 
233 	if (mode->clock < 10000)
234 		return MODE_CLOCK_LOW;
235 
236 	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
237 		return MODE_H_ILLEGAL;
238 
239 	return MODE_OK;
240 }
241 
242 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
243 {
244 	int	i;
245 	uint32_t v = 0;
246 
247 	if (src_bytes > 4)
248 		src_bytes = 4;
249 	for (i = 0; i < src_bytes; i++)
250 		v |= ((uint32_t) src[i]) << ((3-i) * 8);
251 	return v;
252 }
253 
254 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
255 {
256 	int i;
257 	if (dst_bytes > 4)
258 		dst_bytes = 4;
259 	for (i = 0; i < dst_bytes; i++)
260 		dst[i] = src >> ((3-i) * 8);
261 }
262 
263 static void
264 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
265 				    struct intel_dp *intel_dp);
266 static void
267 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
268 					      struct intel_dp *intel_dp);
269 
270 static void pps_lock(struct intel_dp *intel_dp)
271 {
272 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
273 	struct intel_encoder *encoder = &intel_dig_port->base;
274 	struct drm_device *dev = encoder->base.dev;
275 	struct drm_i915_private *dev_priv = dev->dev_private;
276 	enum intel_display_power_domain power_domain;
277 
278 	/*
279 	 * See vlv_power_sequencer_reset() why we need
280 	 * a power domain reference here.
281 	 */
282 	power_domain = intel_display_port_aux_power_domain(encoder);
283 	intel_display_power_get(dev_priv, power_domain);
284 
285 	mutex_lock(&dev_priv->pps_mutex);
286 }
287 
288 static void pps_unlock(struct intel_dp *intel_dp)
289 {
290 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
291 	struct intel_encoder *encoder = &intel_dig_port->base;
292 	struct drm_device *dev = encoder->base.dev;
293 	struct drm_i915_private *dev_priv = dev->dev_private;
294 	enum intel_display_power_domain power_domain;
295 
296 	mutex_unlock(&dev_priv->pps_mutex);
297 
298 	power_domain = intel_display_port_aux_power_domain(encoder);
299 	intel_display_power_put(dev_priv, power_domain);
300 }
301 
302 static void
303 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
304 {
305 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
306 	struct drm_device *dev = intel_dig_port->base.base.dev;
307 	struct drm_i915_private *dev_priv = dev->dev_private;
308 	enum i915_pipe pipe = intel_dp->pps_pipe;
309 	bool pll_enabled, release_cl_override = false;
310 	enum dpio_phy phy = DPIO_PHY(pipe);
311 	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
312 	uint32_t DP;
313 
314 	if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
315 		 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
316 		 pipe_name(pipe), port_name(intel_dig_port->port)))
317 		return;
318 
319 	DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
320 		      pipe_name(pipe), port_name(intel_dig_port->port));
321 
322 	/* Preserve the BIOS-computed detected bit. This is
323 	 * supposed to be read-only.
324 	 */
325 	DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
326 	DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
327 	DP |= DP_PORT_WIDTH(1);
328 	DP |= DP_LINK_TRAIN_PAT_1;
329 
330 	if (IS_CHERRYVIEW(dev))
331 		DP |= DP_PIPE_SELECT_CHV(pipe);
332 	else if (pipe == PIPE_B)
333 		DP |= DP_PIPEB_SELECT;
334 
335 	pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
336 
337 	/*
338 	 * The DPLL for the pipe must be enabled for this to work.
339 	 * So enable temporarily it if it's not already enabled.
340 	 */
341 	if (!pll_enabled) {
342 		release_cl_override = IS_CHERRYVIEW(dev) &&
343 			!chv_phy_powergate_ch(dev_priv, phy, ch, true);
344 
345 		vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
346 				 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
347 	}
348 
349 	/*
350 	 * Similar magic as in intel_dp_enable_port().
351 	 * We _must_ do this port enable + disable trick
352 	 * to make this power seqeuencer lock onto the port.
353 	 * Otherwise even VDD force bit won't work.
354 	 */
355 	I915_WRITE(intel_dp->output_reg, DP);
356 	POSTING_READ(intel_dp->output_reg);
357 
358 	I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
359 	POSTING_READ(intel_dp->output_reg);
360 
361 	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
362 	POSTING_READ(intel_dp->output_reg);
363 
364 	if (!pll_enabled) {
365 		vlv_force_pll_off(dev, pipe);
366 
367 		if (release_cl_override)
368 			chv_phy_powergate_ch(dev_priv, phy, ch, false);
369 	}
370 }
371 
372 static enum i915_pipe
373 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
374 {
375 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
376 	struct drm_device *dev = intel_dig_port->base.base.dev;
377 	struct drm_i915_private *dev_priv = dev->dev_private;
378 	struct intel_encoder *encoder;
379 	unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
380 	enum i915_pipe pipe;
381 
382 	lockdep_assert_held(&dev_priv->pps_mutex);
383 
384 	/* We should never land here with regular DP ports */
385 	WARN_ON(!is_edp(intel_dp));
386 
387 	if (intel_dp->pps_pipe != INVALID_PIPE)
388 		return intel_dp->pps_pipe;
389 
390 	/*
391 	 * We don't have power sequencer currently.
392 	 * Pick one that's not used by other ports.
393 	 */
394 	for_each_intel_encoder(dev, encoder) {
395 		struct intel_dp *tmp;
396 
397 		if (encoder->type != INTEL_OUTPUT_EDP)
398 			continue;
399 
400 		tmp = enc_to_intel_dp(&encoder->base);
401 
402 		if (tmp->pps_pipe != INVALID_PIPE)
403 			pipes &= ~(1 << tmp->pps_pipe);
404 	}
405 
406 	/*
407 	 * Didn't find one. This should not happen since there
408 	 * are two power sequencers and up to two eDP ports.
409 	 */
410 	if (WARN_ON(pipes == 0))
411 		pipe = PIPE_A;
412 	else
413 		pipe = ffs(pipes) - 1;
414 
415 	vlv_steal_power_sequencer(dev, pipe);
416 	intel_dp->pps_pipe = pipe;
417 
418 	DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
419 		      pipe_name(intel_dp->pps_pipe),
420 		      port_name(intel_dig_port->port));
421 
422 	/* init power sequencer on this pipe and port */
423 	intel_dp_init_panel_power_sequencer(dev, intel_dp);
424 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
425 
426 	/*
427 	 * Even vdd force doesn't work until we've made
428 	 * the power sequencer lock in on the port.
429 	 */
430 	vlv_power_sequencer_kick(intel_dp);
431 
432 	return intel_dp->pps_pipe;
433 }
434 
435 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
436 			       enum i915_pipe pipe);
437 
438 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
439 			       enum i915_pipe pipe)
440 {
441 	return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
442 }
443 
444 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
445 				enum i915_pipe pipe)
446 {
447 	return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
448 }
449 
450 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
451 			 enum i915_pipe pipe)
452 {
453 	return true;
454 }
455 
456 static enum i915_pipe
457 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
458 		     enum port port,
459 		     vlv_pipe_check pipe_check)
460 {
461 	enum i915_pipe pipe;
462 
463 	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
464 		u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
465 			PANEL_PORT_SELECT_MASK;
466 
467 		if (port_sel != PANEL_PORT_SELECT_VLV(port))
468 			continue;
469 
470 		if (!pipe_check(dev_priv, pipe))
471 			continue;
472 
473 		return pipe;
474 	}
475 
476 	return INVALID_PIPE;
477 }
478 
479 static void
480 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
481 {
482 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
483 	struct drm_device *dev = intel_dig_port->base.base.dev;
484 	struct drm_i915_private *dev_priv = dev->dev_private;
485 	enum port port = intel_dig_port->port;
486 
487 	lockdep_assert_held(&dev_priv->pps_mutex);
488 
489 	/* try to find a pipe with this port selected */
490 	/* first pick one where the panel is on */
491 	intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
492 						  vlv_pipe_has_pp_on);
493 	/* didn't find one? pick one where vdd is on */
494 	if (intel_dp->pps_pipe == INVALID_PIPE)
495 		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
496 							  vlv_pipe_has_vdd_on);
497 	/* didn't find one? pick one with just the correct port */
498 	if (intel_dp->pps_pipe == INVALID_PIPE)
499 		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
500 							  vlv_pipe_any);
501 
502 	/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
503 	if (intel_dp->pps_pipe == INVALID_PIPE) {
504 		DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
505 			      port_name(port));
506 		return;
507 	}
508 
509 	DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
510 		      port_name(port), pipe_name(intel_dp->pps_pipe));
511 
512 	intel_dp_init_panel_power_sequencer(dev, intel_dp);
513 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
514 }
515 
516 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
517 {
518 	struct drm_device *dev = dev_priv->dev;
519 	struct intel_encoder *encoder;
520 
521 	if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)))
522 		return;
523 
524 	/*
525 	 * We can't grab pps_mutex here due to deadlock with power_domain
526 	 * mutex when power_domain functions are called while holding pps_mutex.
527 	 * That also means that in order to use pps_pipe the code needs to
528 	 * hold both a power domain reference and pps_mutex, and the power domain
529 	 * reference get/put must be done while _not_ holding pps_mutex.
530 	 * pps_{lock,unlock}() do these steps in the correct order, so one
531 	 * should use them always.
532 	 */
533 
534 	for_each_intel_encoder(dev, encoder) {
535 		struct intel_dp *intel_dp;
536 
537 		if (encoder->type != INTEL_OUTPUT_EDP)
538 			continue;
539 
540 		intel_dp = enc_to_intel_dp(&encoder->base);
541 		intel_dp->pps_pipe = INVALID_PIPE;
542 	}
543 }
544 
545 static i915_reg_t
546 _pp_ctrl_reg(struct intel_dp *intel_dp)
547 {
548 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
549 
550 	if (IS_BROXTON(dev))
551 		return BXT_PP_CONTROL(0);
552 	else if (HAS_PCH_SPLIT(dev))
553 		return PCH_PP_CONTROL;
554 	else
555 		return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
556 }
557 
558 static i915_reg_t
559 _pp_stat_reg(struct intel_dp *intel_dp)
560 {
561 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
562 
563 	if (IS_BROXTON(dev))
564 		return BXT_PP_STATUS(0);
565 	else if (HAS_PCH_SPLIT(dev))
566 		return PCH_PP_STATUS;
567 	else
568 		return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
569 }
570 
571 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
572    This function only applicable when panel PM state is not to be tracked */
573 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
574 			      void *unused)
575 {
576 	struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
577 						 edp_notifier);
578 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
579 	struct drm_i915_private *dev_priv = dev->dev_private;
580 
581 #if 0
582 	if (!is_edp(intel_dp) || code != SYS_RESTART)
583 		return 0;
584 #endif
585 
586 	pps_lock(intel_dp);
587 
588 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
589 		enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
590 		i915_reg_t pp_ctrl_reg, pp_div_reg;
591 		u32 pp_div;
592 
593 		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
594 		pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
595 		pp_div = I915_READ(pp_div_reg);
596 		pp_div &= PP_REFERENCE_DIVIDER_MASK;
597 
598 		/* 0x1F write to PP_DIV_REG sets max cycle delay */
599 		I915_WRITE(pp_div_reg, pp_div | 0x1F);
600 		I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
601 		msleep(intel_dp->panel_power_cycle_delay);
602 	}
603 
604 	pps_unlock(intel_dp);
605 
606 	return 0;
607 }
608 
609 static bool edp_have_panel_power(struct intel_dp *intel_dp)
610 {
611 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
612 	struct drm_i915_private *dev_priv = dev->dev_private;
613 
614 	lockdep_assert_held(&dev_priv->pps_mutex);
615 
616 	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
617 	    intel_dp->pps_pipe == INVALID_PIPE)
618 		return false;
619 
620 	return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
621 }
622 
623 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
624 {
625 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
626 	struct drm_i915_private *dev_priv = dev->dev_private;
627 
628 	lockdep_assert_held(&dev_priv->pps_mutex);
629 
630 	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
631 	    intel_dp->pps_pipe == INVALID_PIPE)
632 		return false;
633 
634 	return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
635 }
636 
637 static void
638 intel_dp_check_edp(struct intel_dp *intel_dp)
639 {
640 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
641 	struct drm_i915_private *dev_priv = dev->dev_private;
642 
643 	if (!is_edp(intel_dp))
644 		return;
645 
646 	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
647 		WARN(1, "eDP powered off while attempting aux channel communication.\n");
648 		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
649 			      I915_READ(_pp_stat_reg(intel_dp)),
650 			      I915_READ(_pp_ctrl_reg(intel_dp)));
651 	}
652 }
653 
654 static uint32_t
655 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
656 {
657 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
658 	struct drm_device *dev = intel_dig_port->base.base.dev;
659 	struct drm_i915_private *dev_priv = dev->dev_private;
660 	i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
661 	uint32_t status;
662 	bool done;
663 
664 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
665 	if (has_aux_irq)
666 		done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
667 					  msecs_to_jiffies_timeout(10));
668 	else
669 		done = wait_for_atomic(C, 10) == 0;
670 	if (!done)
671 		DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
672 			  has_aux_irq);
673 #undef C
674 
675 	return status;
676 }
677 
678 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
679 {
680 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
681 	struct drm_device *dev = intel_dig_port->base.base.dev;
682 
683 	/*
684 	 * The clock divider is based off the hrawclk, and would like to run at
685 	 * 2MHz.  So, take the hrawclk value and divide by 2 and use that
686 	 */
687 	return index ? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev), 2);
688 }
689 
690 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
691 {
692 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
693 	struct drm_device *dev = intel_dig_port->base.base.dev;
694 	struct drm_i915_private *dev_priv = dev->dev_private;
695 
696 	if (index)
697 		return 0;
698 
699 	if (intel_dig_port->port == PORT_A) {
700 		return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
701 
702 	} else {
703 		return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
704 	}
705 }
706 
707 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
708 {
709 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
710 	struct drm_device *dev = intel_dig_port->base.base.dev;
711 	struct drm_i915_private *dev_priv = dev->dev_private;
712 
713 	if (intel_dig_port->port == PORT_A) {
714 		if (index)
715 			return 0;
716 		return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
717 	} else if (HAS_PCH_LPT_H(dev_priv)) {
718 		/* Workaround for non-ULT HSW */
719 		switch (index) {
720 		case 0: return 63;
721 		case 1: return 72;
722 		default: return 0;
723 		}
724 	} else  {
725 		return index ? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
726 	}
727 }
728 
729 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
730 {
731 	return index ? 0 : 100;
732 }
733 
734 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
735 {
736 	/*
737 	 * SKL doesn't need us to program the AUX clock divider (Hardware will
738 	 * derive the clock from CDCLK automatically). We still implement the
739 	 * get_aux_clock_divider vfunc to plug-in into the existing code.
740 	 */
741 	return index ? 0 : 1;
742 }
743 
744 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
745 				      bool has_aux_irq,
746 				      int send_bytes,
747 				      uint32_t aux_clock_divider)
748 {
749 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
750 	struct drm_device *dev = intel_dig_port->base.base.dev;
751 	uint32_t precharge, timeout;
752 
753 	if (IS_GEN6(dev))
754 		precharge = 3;
755 	else
756 		precharge = 5;
757 
758 	if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
759 		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
760 	else
761 		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
762 
763 	return DP_AUX_CH_CTL_SEND_BUSY |
764 	       DP_AUX_CH_CTL_DONE |
765 	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
766 	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
767 	       timeout |
768 	       DP_AUX_CH_CTL_RECEIVE_ERROR |
769 	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
770 	       (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
771 	       (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
772 }
773 
774 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
775 				      bool has_aux_irq,
776 				      int send_bytes,
777 				      uint32_t unused)
778 {
779 	return DP_AUX_CH_CTL_SEND_BUSY |
780 	       DP_AUX_CH_CTL_DONE |
781 	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
782 	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
783 	       DP_AUX_CH_CTL_TIME_OUT_1600us |
784 	       DP_AUX_CH_CTL_RECEIVE_ERROR |
785 	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
786 	       DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
787 }
788 
789 static int
790 intel_dp_aux_ch(struct intel_dp *intel_dp,
791 		const uint8_t *send, int send_bytes,
792 		uint8_t *recv, int recv_size)
793 {
794 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
795 	struct drm_device *dev = intel_dig_port->base.base.dev;
796 	struct drm_i915_private *dev_priv = dev->dev_private;
797 	i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
798 	uint32_t aux_clock_divider;
799 	int i, ret, recv_bytes;
800 	uint32_t status;
801 	int try, clock = 0;
802 #ifdef __DragonFly__
803 	bool has_aux_irq = HAS_AUX_IRQ(dev) && !disable_aux_irq;
804 #else
805 	bool has_aux_irq = HAS_AUX_IRQ(dev);
806 #endif
807 	bool vdd;
808 
809 	pps_lock(intel_dp);
810 
811 	/*
812 	 * We will be called with VDD already enabled for dpcd/edid/oui reads.
813 	 * In such cases we want to leave VDD enabled and it's up to upper layers
814 	 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
815 	 * ourselves.
816 	 */
817 	vdd = edp_panel_vdd_on(intel_dp);
818 
819 	/* dp aux is extremely sensitive to irq latency, hence request the
820 	 * lowest possible wakeup latency and so prevent the cpu from going into
821 	 * deep sleep states.
822 	 */
823 	pm_qos_update_request(&dev_priv->pm_qos, 0);
824 
825 	intel_dp_check_edp(intel_dp);
826 
827 	/* Try to wait for any previous AUX channel activity */
828 	for (try = 0; try < 3; try++) {
829 		status = I915_READ_NOTRACE(ch_ctl);
830 		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
831 			break;
832 		msleep(1);
833 	}
834 
835 	if (try == 3) {
836 		static u32 last_status = -1;
837 		const u32 status = I915_READ(ch_ctl);
838 
839 		if (status != last_status) {
840 			WARN(1, "dp_aux_ch not started status 0x%08x\n",
841 			     status);
842 			last_status = status;
843 		}
844 
845 		ret = -EBUSY;
846 		goto out;
847 	}
848 
849 	/* Only 5 data registers! */
850 	if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
851 		ret = -E2BIG;
852 		goto out;
853 	}
854 
855 	while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
856 		u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
857 							  has_aux_irq,
858 							  send_bytes,
859 							  aux_clock_divider);
860 
861 		/* Must try at least 3 times according to DP spec */
862 		for (try = 0; try < 5; try++) {
863 			/* Load the send data into the aux channel data registers */
864 			for (i = 0; i < send_bytes; i += 4)
865 				I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
866 					   intel_dp_pack_aux(send + i,
867 							     send_bytes - i));
868 
869 			/* Send the command and wait for it to complete */
870 			I915_WRITE(ch_ctl, send_ctl);
871 
872 			status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
873 
874 			/* Clear done status and any errors */
875 			I915_WRITE(ch_ctl,
876 				   status |
877 				   DP_AUX_CH_CTL_DONE |
878 				   DP_AUX_CH_CTL_TIME_OUT_ERROR |
879 				   DP_AUX_CH_CTL_RECEIVE_ERROR);
880 
881 			if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
882 				continue;
883 
884 			/* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
885 			 *   400us delay required for errors and timeouts
886 			 *   Timeout errors from the HW already meet this
887 			 *   requirement so skip to next iteration
888 			 */
889 			if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
890 				usleep_range(400, 500);
891 				continue;
892 			}
893 			if (status & DP_AUX_CH_CTL_DONE)
894 				goto done;
895 		}
896 	}
897 
898 	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
899 		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
900 		ret = -EBUSY;
901 		goto out;
902 	}
903 
904 done:
905 	/* Check for timeout or receive error.
906 	 * Timeouts occur when the sink is not connected
907 	 */
908 	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
909 		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
910 		ret = -EIO;
911 		goto out;
912 	}
913 
914 	/* Timeouts occur when the device isn't connected, so they're
915 	 * "normal" -- don't fill the kernel log with these */
916 	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
917 		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
918 		ret = -ETIMEDOUT;
919 		goto out;
920 	}
921 
922 	/* Unload any bytes sent back from the other side */
923 	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
924 		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
925 
926 	/*
927 	 * By BSpec: "Message sizes of 0 or >20 are not allowed."
928 	 * We have no idea of what happened so we return -EBUSY so
929 	 * drm layer takes care for the necessary retries.
930 	 */
931 	if (recv_bytes == 0 || recv_bytes > 20) {
932 		DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
933 			      recv_bytes);
934 		/*
935 		 * FIXME: This patch was created on top of a series that
936 		 * organize the retries at drm level. There EBUSY should
937 		 * also take care for 1ms wait before retrying.
938 		 * That aux retries re-org is still needed and after that is
939 		 * merged we remove this sleep from here.
940 		 */
941 		usleep_range(1000, 1500);
942 		ret = -EBUSY;
943 		goto out;
944 	}
945 
946 	if (recv_bytes > recv_size)
947 		recv_bytes = recv_size;
948 
949 	for (i = 0; i < recv_bytes; i += 4)
950 		intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
951 				    recv + i, recv_bytes - i);
952 
953 	ret = recv_bytes;
954 out:
955 	pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
956 
957 	if (vdd)
958 		edp_panel_vdd_off(intel_dp, false);
959 
960 	pps_unlock(intel_dp);
961 
962 	return ret;
963 }
964 
965 #define BARE_ADDRESS_SIZE	3
966 #define HEADER_SIZE		(BARE_ADDRESS_SIZE + 1)
967 static ssize_t
968 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
969 {
970 	struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
971 	uint8_t txbuf[20], rxbuf[20];
972 	size_t txsize, rxsize;
973 	int ret;
974 
975 	txbuf[0] = (msg->request << 4) |
976 		((msg->address >> 16) & 0xf);
977 	txbuf[1] = (msg->address >> 8) & 0xff;
978 	txbuf[2] = msg->address & 0xff;
979 	txbuf[3] = msg->size - 1;
980 
981 	switch (msg->request & ~DP_AUX_I2C_MOT) {
982 	case DP_AUX_NATIVE_WRITE:
983 	case DP_AUX_I2C_WRITE:
984 	case DP_AUX_I2C_WRITE_STATUS_UPDATE:
985 		txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
986 		rxsize = 2; /* 0 or 1 data bytes */
987 
988 		if (WARN_ON(txsize > 20))
989 			return -E2BIG;
990 
991 		memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
992 
993 		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
994 		if (ret > 0) {
995 			msg->reply = rxbuf[0] >> 4;
996 
997 			if (ret > 1) {
998 				/* Number of bytes written in a short write. */
999 				ret = clamp_t(int, rxbuf[1], 0, msg->size);
1000 			} else {
1001 				/* Return payload size. */
1002 				ret = msg->size;
1003 			}
1004 		}
1005 		break;
1006 
1007 	case DP_AUX_NATIVE_READ:
1008 	case DP_AUX_I2C_READ:
1009 		txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1010 		rxsize = msg->size + 1;
1011 
1012 		if (WARN_ON(rxsize > 20))
1013 			return -E2BIG;
1014 
1015 		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1016 		if (ret > 0) {
1017 			msg->reply = rxbuf[0] >> 4;
1018 			/*
1019 			 * Assume happy day, and copy the data. The caller is
1020 			 * expected to check msg->reply before touching it.
1021 			 *
1022 			 * Return payload size.
1023 			 */
1024 			ret--;
1025 			memcpy(msg->buffer, rxbuf + 1, ret);
1026 		}
1027 		break;
1028 
1029 	default:
1030 		ret = -EINVAL;
1031 		break;
1032 	}
1033 
1034 	return ret;
1035 }
1036 
1037 static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1038 				       enum port port)
1039 {
1040 	switch (port) {
1041 	case PORT_B:
1042 	case PORT_C:
1043 	case PORT_D:
1044 		return DP_AUX_CH_CTL(port);
1045 	default:
1046 		MISSING_CASE(port);
1047 		return DP_AUX_CH_CTL(PORT_B);
1048 	}
1049 }
1050 
1051 static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1052 					enum port port, int index)
1053 {
1054 	switch (port) {
1055 	case PORT_B:
1056 	case PORT_C:
1057 	case PORT_D:
1058 		return DP_AUX_CH_DATA(port, index);
1059 	default:
1060 		MISSING_CASE(port);
1061 		return DP_AUX_CH_DATA(PORT_B, index);
1062 	}
1063 }
1064 
1065 static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1066 				       enum port port)
1067 {
1068 	switch (port) {
1069 	case PORT_A:
1070 		return DP_AUX_CH_CTL(port);
1071 	case PORT_B:
1072 	case PORT_C:
1073 	case PORT_D:
1074 		return PCH_DP_AUX_CH_CTL(port);
1075 	default:
1076 		MISSING_CASE(port);
1077 		return DP_AUX_CH_CTL(PORT_A);
1078 	}
1079 }
1080 
1081 static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1082 					enum port port, int index)
1083 {
1084 	switch (port) {
1085 	case PORT_A:
1086 		return DP_AUX_CH_DATA(port, index);
1087 	case PORT_B:
1088 	case PORT_C:
1089 	case PORT_D:
1090 		return PCH_DP_AUX_CH_DATA(port, index);
1091 	default:
1092 		MISSING_CASE(port);
1093 		return DP_AUX_CH_DATA(PORT_A, index);
1094 	}
1095 }
1096 
1097 /*
1098  * On SKL we don't have Aux for port E so we rely
1099  * on VBT to set a proper alternate aux channel.
1100  */
1101 static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1102 {
1103 	const struct ddi_vbt_port_info *info =
1104 		&dev_priv->vbt.ddi_port_info[PORT_E];
1105 
1106 	switch (info->alternate_aux_channel) {
1107 	case DP_AUX_A:
1108 		return PORT_A;
1109 	case DP_AUX_B:
1110 		return PORT_B;
1111 	case DP_AUX_C:
1112 		return PORT_C;
1113 	case DP_AUX_D:
1114 		return PORT_D;
1115 	default:
1116 		MISSING_CASE(info->alternate_aux_channel);
1117 		return PORT_A;
1118 	}
1119 }
1120 
1121 static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1122 				       enum port port)
1123 {
1124 	if (port == PORT_E)
1125 		port = skl_porte_aux_port(dev_priv);
1126 
1127 	switch (port) {
1128 	case PORT_A:
1129 	case PORT_B:
1130 	case PORT_C:
1131 	case PORT_D:
1132 		return DP_AUX_CH_CTL(port);
1133 	default:
1134 		MISSING_CASE(port);
1135 		return DP_AUX_CH_CTL(PORT_A);
1136 	}
1137 }
1138 
1139 static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1140 					enum port port, int index)
1141 {
1142 	if (port == PORT_E)
1143 		port = skl_porte_aux_port(dev_priv);
1144 
1145 	switch (port) {
1146 	case PORT_A:
1147 	case PORT_B:
1148 	case PORT_C:
1149 	case PORT_D:
1150 		return DP_AUX_CH_DATA(port, index);
1151 	default:
1152 		MISSING_CASE(port);
1153 		return DP_AUX_CH_DATA(PORT_A, index);
1154 	}
1155 }
1156 
1157 static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1158 					 enum port port)
1159 {
1160 	if (INTEL_INFO(dev_priv)->gen >= 9)
1161 		return skl_aux_ctl_reg(dev_priv, port);
1162 	else if (HAS_PCH_SPLIT(dev_priv))
1163 		return ilk_aux_ctl_reg(dev_priv, port);
1164 	else
1165 		return g4x_aux_ctl_reg(dev_priv, port);
1166 }
1167 
1168 static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1169 					  enum port port, int index)
1170 {
1171 	if (INTEL_INFO(dev_priv)->gen >= 9)
1172 		return skl_aux_data_reg(dev_priv, port, index);
1173 	else if (HAS_PCH_SPLIT(dev_priv))
1174 		return ilk_aux_data_reg(dev_priv, port, index);
1175 	else
1176 		return g4x_aux_data_reg(dev_priv, port, index);
1177 }
1178 
1179 static void intel_aux_reg_init(struct intel_dp *intel_dp)
1180 {
1181 	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1182 	enum port port = dp_to_dig_port(intel_dp)->port;
1183 	int i;
1184 
1185 	intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1186 	for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1187 		intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1188 }
1189 
1190 static void
1191 intel_dp_aux_fini(struct intel_dp *intel_dp)
1192 {
1193 	drm_dp_aux_unregister(&intel_dp->aux);
1194 	kfree(intel_dp->aux.name);
1195 }
1196 
1197 static int
1198 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1199 {
1200 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1201 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1202 	enum port port = intel_dig_port->port;
1203 	int ret;
1204 
1205 	intel_aux_reg_init(intel_dp);
1206 
1207 	intel_dp->aux.name = drm_asprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1208 	if (!intel_dp->aux.name)
1209 		return -ENOMEM;
1210 
1211 	intel_dp->aux.dev = dev->dev;
1212 	intel_dp->aux.transfer = intel_dp_aux_transfer;
1213 
1214 #if 0
1215 	DRM_DEBUG_KMS("registering %s bus for %s\n",
1216 		      intel_dp->aux.name,
1217 		      connector->base.kdev->kobj.name);
1218 #endif
1219 
1220 	ret = drm_dp_aux_register(&intel_dp->aux);
1221 	if (ret < 0) {
1222 		DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1223 			  intel_dp->aux.name, ret);
1224 		kfree(intel_dp->aux.name);
1225 		return ret;
1226 	}
1227 
1228 #if 0
1229 	ret = sysfs_create_link(&connector->base.kdev->kobj,
1230 				&intel_dp->aux.ddc.dev.kobj,
1231 				intel_dp->aux.ddc.dev.kobj.name);
1232 	if (ret < 0) {
1233 		DRM_ERROR("sysfs_create_link() for %s failed (%d)\n",
1234 			  intel_dp->aux.name, ret);
1235 		intel_dp_aux_fini(intel_dp);
1236 		return ret;
1237 	}
1238 #endif
1239 
1240 	return 0;
1241 }
1242 
1243 static void
1244 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1245 {
1246 #if 0
1247 	struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1248 
1249 	if (!intel_connector->mst_port)
1250 		sysfs_remove_link(&intel_connector->base.kdev->kobj,
1251 				  intel_dp->aux.ddc.dev.kobj.name);
1252 #endif
1253 	intel_connector_unregister(intel_connector);
1254 }
1255 
1256 static void
1257 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
1258 {
1259 	u32 ctrl1;
1260 
1261 	memset(&pipe_config->dpll_hw_state, 0,
1262 	       sizeof(pipe_config->dpll_hw_state));
1263 
1264 	pipe_config->ddi_pll_sel = SKL_DPLL0;
1265 	pipe_config->dpll_hw_state.cfgcr1 = 0;
1266 	pipe_config->dpll_hw_state.cfgcr2 = 0;
1267 
1268 	ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1269 	switch (pipe_config->port_clock / 2) {
1270 	case 81000:
1271 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1272 					      SKL_DPLL0);
1273 		break;
1274 	case 135000:
1275 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1276 					      SKL_DPLL0);
1277 		break;
1278 	case 270000:
1279 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1280 					      SKL_DPLL0);
1281 		break;
1282 	case 162000:
1283 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1284 					      SKL_DPLL0);
1285 		break;
1286 	/* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1287 	results in CDCLK change. Need to handle the change of CDCLK by
1288 	disabling pipes and re-enabling them */
1289 	case 108000:
1290 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1291 					      SKL_DPLL0);
1292 		break;
1293 	case 216000:
1294 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1295 					      SKL_DPLL0);
1296 		break;
1297 
1298 	}
1299 	pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1300 }
1301 
1302 void
1303 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
1304 {
1305 	memset(&pipe_config->dpll_hw_state, 0,
1306 	       sizeof(pipe_config->dpll_hw_state));
1307 
1308 	switch (pipe_config->port_clock / 2) {
1309 	case 81000:
1310 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1311 		break;
1312 	case 135000:
1313 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1314 		break;
1315 	case 270000:
1316 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1317 		break;
1318 	}
1319 }
1320 
1321 static int
1322 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1323 {
1324 	if (intel_dp->num_sink_rates) {
1325 		*sink_rates = intel_dp->sink_rates;
1326 		return intel_dp->num_sink_rates;
1327 	}
1328 
1329 	*sink_rates = default_rates;
1330 
1331 	return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1332 }
1333 
1334 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1335 {
1336 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1337 	struct drm_device *dev = dig_port->base.base.dev;
1338 
1339 	/* WaDisableHBR2:skl */
1340 	if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
1341 		return false;
1342 
1343 	if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1344 	    (INTEL_INFO(dev)->gen >= 9))
1345 		return true;
1346 	else
1347 		return false;
1348 }
1349 
1350 static int
1351 intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
1352 {
1353 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1354 	struct drm_device *dev = dig_port->base.base.dev;
1355 	int size;
1356 
1357 	if (IS_BROXTON(dev)) {
1358 		*source_rates = bxt_rates;
1359 		size = ARRAY_SIZE(bxt_rates);
1360 	} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
1361 		*source_rates = skl_rates;
1362 		size = ARRAY_SIZE(skl_rates);
1363 	} else {
1364 		*source_rates = default_rates;
1365 		size = ARRAY_SIZE(default_rates);
1366 	}
1367 
1368 	/* This depends on the fact that 5.4 is last value in the array */
1369 	if (!intel_dp_source_supports_hbr2(intel_dp))
1370 		size--;
1371 
1372 	return size;
1373 }
1374 
1375 static void
1376 intel_dp_set_clock(struct intel_encoder *encoder,
1377 		   struct intel_crtc_state *pipe_config)
1378 {
1379 	struct drm_device *dev = encoder->base.dev;
1380 	const struct dp_link_dpll *divisor = NULL;
1381 	int i, count = 0;
1382 
1383 	if (IS_G4X(dev)) {
1384 		divisor = gen4_dpll;
1385 		count = ARRAY_SIZE(gen4_dpll);
1386 	} else if (HAS_PCH_SPLIT(dev)) {
1387 		divisor = pch_dpll;
1388 		count = ARRAY_SIZE(pch_dpll);
1389 	} else if (IS_CHERRYVIEW(dev)) {
1390 		divisor = chv_dpll;
1391 		count = ARRAY_SIZE(chv_dpll);
1392 	} else if (IS_VALLEYVIEW(dev)) {
1393 		divisor = vlv_dpll;
1394 		count = ARRAY_SIZE(vlv_dpll);
1395 	}
1396 
1397 	if (divisor && count) {
1398 		for (i = 0; i < count; i++) {
1399 			if (pipe_config->port_clock == divisor[i].clock) {
1400 				pipe_config->dpll = divisor[i].dpll;
1401 				pipe_config->clock_set = true;
1402 				break;
1403 			}
1404 		}
1405 	}
1406 }
1407 
1408 static int intersect_rates(const int *source_rates, int source_len,
1409 			   const int *sink_rates, int sink_len,
1410 			   int *common_rates)
1411 {
1412 	int i = 0, j = 0, k = 0;
1413 
1414 	while (i < source_len && j < sink_len) {
1415 		if (source_rates[i] == sink_rates[j]) {
1416 			if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1417 				return k;
1418 			common_rates[k] = source_rates[i];
1419 			++k;
1420 			++i;
1421 			++j;
1422 		} else if (source_rates[i] < sink_rates[j]) {
1423 			++i;
1424 		} else {
1425 			++j;
1426 		}
1427 	}
1428 	return k;
1429 }
1430 
1431 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1432 				 int *common_rates)
1433 {
1434 	const int *source_rates, *sink_rates;
1435 	int source_len, sink_len;
1436 
1437 	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1438 	source_len = intel_dp_source_rates(intel_dp, &source_rates);
1439 
1440 	return intersect_rates(source_rates, source_len,
1441 			       sink_rates, sink_len,
1442 			       common_rates);
1443 }
1444 
1445 static void snprintf_int_array(char *str, size_t len,
1446 			       const int *array, int nelem)
1447 {
1448 	int i;
1449 
1450 	str[0] = '\0';
1451 
1452 	for (i = 0; i < nelem; i++) {
1453 		int r = ksnprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1454 		if (r >= len)
1455 			return;
1456 		str += r;
1457 		len -= r;
1458 	}
1459 }
1460 
1461 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1462 {
1463 	const int *source_rates, *sink_rates;
1464 	int source_len, sink_len, common_len;
1465 	int common_rates[DP_MAX_SUPPORTED_RATES];
1466 	char str[128]; /* FIXME: too big for stack? */
1467 
1468 	if ((drm_debug & DRM_UT_KMS) == 0)
1469 		return;
1470 
1471 	source_len = intel_dp_source_rates(intel_dp, &source_rates);
1472 	snprintf_int_array(str, sizeof(str), source_rates, source_len);
1473 	DRM_DEBUG_KMS("source rates: %s\n", str);
1474 
1475 	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1476 	snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1477 	DRM_DEBUG_KMS("sink rates: %s\n", str);
1478 
1479 	common_len = intel_dp_common_rates(intel_dp, common_rates);
1480 	snprintf_int_array(str, sizeof(str), common_rates, common_len);
1481 	DRM_DEBUG_KMS("common rates: %s\n", str);
1482 }
1483 
1484 static int rate_to_index(int find, const int *rates)
1485 {
1486 	int i = 0;
1487 
1488 	for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1489 		if (find == rates[i])
1490 			break;
1491 
1492 	return i;
1493 }
1494 
1495 int
1496 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1497 {
1498 	int rates[DP_MAX_SUPPORTED_RATES] = {};
1499 	int len;
1500 
1501 	len = intel_dp_common_rates(intel_dp, rates);
1502 	if (WARN_ON(len <= 0))
1503 		return 162000;
1504 
1505 	return rates[rate_to_index(0, rates) - 1];
1506 }
1507 
1508 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1509 {
1510 	return rate_to_index(rate, intel_dp->sink_rates);
1511 }
1512 
1513 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1514 			   uint8_t *link_bw, uint8_t *rate_select)
1515 {
1516 	if (intel_dp->num_sink_rates) {
1517 		*link_bw = 0;
1518 		*rate_select =
1519 			intel_dp_rate_select(intel_dp, port_clock);
1520 	} else {
1521 		*link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1522 		*rate_select = 0;
1523 	}
1524 }
1525 
1526 bool
1527 intel_dp_compute_config(struct intel_encoder *encoder,
1528 			struct intel_crtc_state *pipe_config)
1529 {
1530 	struct drm_device *dev = encoder->base.dev;
1531 	struct drm_i915_private *dev_priv = dev->dev_private;
1532 	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1533 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1534 	enum port port = dp_to_dig_port(intel_dp)->port;
1535 	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1536 	struct intel_connector *intel_connector = intel_dp->attached_connector;
1537 	int lane_count, clock;
1538 	int min_lane_count = 1;
1539 	int max_lane_count = intel_dp_max_lane_count(intel_dp);
1540 	/* Conveniently, the link BW constants become indices with a shift...*/
1541 	int min_clock = 0;
1542 	int max_clock;
1543 	int bpp, mode_rate;
1544 	int link_avail, link_clock;
1545 	int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1546 	int common_len;
1547 	uint8_t link_bw, rate_select;
1548 
1549 	common_len = intel_dp_common_rates(intel_dp, common_rates);
1550 
1551 	/* No common link rates between source and sink */
1552 	WARN_ON(common_len <= 0);
1553 
1554 	max_clock = common_len - 1;
1555 
1556 	if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1557 		pipe_config->has_pch_encoder = true;
1558 
1559 	pipe_config->has_dp_encoder = true;
1560 	pipe_config->has_drrs = false;
1561 	pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1562 
1563 	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1564 		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1565 				       adjusted_mode);
1566 
1567 		if (INTEL_INFO(dev)->gen >= 9) {
1568 			int ret;
1569 			ret = skl_update_scaler_crtc(pipe_config);
1570 			if (ret)
1571 				return ret;
1572 		}
1573 
1574 		if (HAS_GMCH_DISPLAY(dev))
1575 			intel_gmch_panel_fitting(intel_crtc, pipe_config,
1576 						 intel_connector->panel.fitting_mode);
1577 		else
1578 			intel_pch_panel_fitting(intel_crtc, pipe_config,
1579 						intel_connector->panel.fitting_mode);
1580 	}
1581 
1582 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1583 		return false;
1584 
1585 	DRM_DEBUG_KMS("DP link computation with max lane count %i "
1586 		      "max bw %d pixel clock %iKHz\n",
1587 		      max_lane_count, common_rates[max_clock],
1588 		      adjusted_mode->crtc_clock);
1589 
1590 	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
1591 	 * bpc in between. */
1592 	bpp = pipe_config->pipe_bpp;
1593 	if (is_edp(intel_dp)) {
1594 
1595 		/* Get bpp from vbt only for panels that dont have bpp in edid */
1596 		if (intel_connector->base.display_info.bpc == 0 &&
1597 			(dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1598 			DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1599 				      dev_priv->vbt.edp_bpp);
1600 			bpp = dev_priv->vbt.edp_bpp;
1601 		}
1602 
1603 		/*
1604 		 * Use the maximum clock and number of lanes the eDP panel
1605 		 * advertizes being capable of. The panels are generally
1606 		 * designed to support only a single clock and lane
1607 		 * configuration, and typically these values correspond to the
1608 		 * native resolution of the panel.
1609 		 */
1610 		min_lane_count = max_lane_count;
1611 		min_clock = max_clock;
1612 	}
1613 
1614 	for (; bpp >= 6*3; bpp -= 2*3) {
1615 		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1616 						   bpp);
1617 
1618 		for (clock = min_clock; clock <= max_clock; clock++) {
1619 			for (lane_count = min_lane_count;
1620 				lane_count <= max_lane_count;
1621 				lane_count <<= 1) {
1622 
1623 				link_clock = common_rates[clock];
1624 				link_avail = intel_dp_max_data_rate(link_clock,
1625 								    lane_count);
1626 
1627 				if (mode_rate <= link_avail) {
1628 					goto found;
1629 				}
1630 			}
1631 		}
1632 	}
1633 
1634 	return false;
1635 
1636 found:
1637 	if (intel_dp->color_range_auto) {
1638 		/*
1639 		 * See:
1640 		 * CEA-861-E - 5.1 Default Encoding Parameters
1641 		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1642 		 */
1643 		pipe_config->limited_color_range =
1644 			bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1645 	} else {
1646 		pipe_config->limited_color_range =
1647 			intel_dp->limited_color_range;
1648 	}
1649 
1650 	pipe_config->lane_count = lane_count;
1651 
1652 	pipe_config->pipe_bpp = bpp;
1653 	pipe_config->port_clock = common_rates[clock];
1654 
1655 	intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1656 			      &link_bw, &rate_select);
1657 
1658 	DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1659 		      link_bw, rate_select, pipe_config->lane_count,
1660 		      pipe_config->port_clock, bpp);
1661 	DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1662 		      mode_rate, link_avail);
1663 
1664 	intel_link_compute_m_n(bpp, lane_count,
1665 			       adjusted_mode->crtc_clock,
1666 			       pipe_config->port_clock,
1667 			       &pipe_config->dp_m_n);
1668 
1669 	if (intel_connector->panel.downclock_mode != NULL &&
1670 		dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1671 			pipe_config->has_drrs = true;
1672 			intel_link_compute_m_n(bpp, lane_count,
1673 				intel_connector->panel.downclock_mode->clock,
1674 				pipe_config->port_clock,
1675 				&pipe_config->dp_m2_n2);
1676 	}
1677 
1678 	if ((IS_SKYLAKE(dev)  || IS_KABYLAKE(dev)) && is_edp(intel_dp))
1679 		skl_edp_set_pll_config(pipe_config);
1680 	else if (IS_BROXTON(dev))
1681 		/* handled in ddi */;
1682 	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1683 		hsw_dp_set_ddi_pll_sel(pipe_config);
1684 	else
1685 		intel_dp_set_clock(encoder, pipe_config);
1686 
1687 	return true;
1688 }
1689 
1690 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1691 			      const struct intel_crtc_state *pipe_config)
1692 {
1693 	intel_dp->link_rate = pipe_config->port_clock;
1694 	intel_dp->lane_count = pipe_config->lane_count;
1695 }
1696 
1697 static void intel_dp_prepare(struct intel_encoder *encoder)
1698 {
1699 	struct drm_device *dev = encoder->base.dev;
1700 	struct drm_i915_private *dev_priv = dev->dev_private;
1701 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1702 	enum port port = dp_to_dig_port(intel_dp)->port;
1703 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1704 	const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1705 
1706 	intel_dp_set_link_params(intel_dp, crtc->config);
1707 
1708 	/*
1709 	 * There are four kinds of DP registers:
1710 	 *
1711 	 * 	IBX PCH
1712 	 * 	SNB CPU
1713 	 *	IVB CPU
1714 	 * 	CPT PCH
1715 	 *
1716 	 * IBX PCH and CPU are the same for almost everything,
1717 	 * except that the CPU DP PLL is configured in this
1718 	 * register
1719 	 *
1720 	 * CPT PCH is quite different, having many bits moved
1721 	 * to the TRANS_DP_CTL register instead. That
1722 	 * configuration happens (oddly) in ironlake_pch_enable
1723 	 */
1724 
1725 	/* Preserve the BIOS-computed detected bit. This is
1726 	 * supposed to be read-only.
1727 	 */
1728 	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1729 
1730 	/* Handle DP bits in common between all three register formats */
1731 	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1732 	intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1733 
1734 	/* Split out the IBX/CPU vs CPT settings */
1735 
1736 	if (IS_GEN7(dev) && port == PORT_A) {
1737 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1738 			intel_dp->DP |= DP_SYNC_HS_HIGH;
1739 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1740 			intel_dp->DP |= DP_SYNC_VS_HIGH;
1741 		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1742 
1743 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1744 			intel_dp->DP |= DP_ENHANCED_FRAMING;
1745 
1746 		intel_dp->DP |= crtc->pipe << 29;
1747 	} else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1748 		u32 trans_dp;
1749 
1750 		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1751 
1752 		trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1753 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1754 			trans_dp |= TRANS_DP_ENH_FRAMING;
1755 		else
1756 			trans_dp &= ~TRANS_DP_ENH_FRAMING;
1757 		I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1758 	} else {
1759 		if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1760 		    !IS_CHERRYVIEW(dev) && crtc->config->limited_color_range)
1761 			intel_dp->DP |= DP_COLOR_RANGE_16_235;
1762 
1763 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1764 			intel_dp->DP |= DP_SYNC_HS_HIGH;
1765 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1766 			intel_dp->DP |= DP_SYNC_VS_HIGH;
1767 		intel_dp->DP |= DP_LINK_TRAIN_OFF;
1768 
1769 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1770 			intel_dp->DP |= DP_ENHANCED_FRAMING;
1771 
1772 		if (IS_CHERRYVIEW(dev))
1773 			intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1774 		else if (crtc->pipe == PIPE_B)
1775 			intel_dp->DP |= DP_PIPEB_SELECT;
1776 	}
1777 }
1778 
1779 #define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1780 #define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1781 
1782 #define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1783 #define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
1784 
1785 #define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1786 #define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1787 
1788 static void wait_panel_status(struct intel_dp *intel_dp,
1789 				       u32 mask,
1790 				       u32 value)
1791 {
1792 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1793 	struct drm_i915_private *dev_priv = dev->dev_private;
1794 	i915_reg_t pp_stat_reg, pp_ctrl_reg;
1795 
1796 	lockdep_assert_held(&dev_priv->pps_mutex);
1797 
1798 	pp_stat_reg = _pp_stat_reg(intel_dp);
1799 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1800 
1801 	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1802 			mask, value,
1803 			I915_READ(pp_stat_reg),
1804 			I915_READ(pp_ctrl_reg));
1805 
1806 	if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1807 		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1808 				I915_READ(pp_stat_reg),
1809 				I915_READ(pp_ctrl_reg));
1810 	}
1811 
1812 	DRM_DEBUG_KMS("Wait complete\n");
1813 }
1814 
1815 static void wait_panel_on(struct intel_dp *intel_dp)
1816 {
1817 	DRM_DEBUG_KMS("Wait for panel power on\n");
1818 	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1819 }
1820 
1821 static void wait_panel_off(struct intel_dp *intel_dp)
1822 {
1823 	DRM_DEBUG_KMS("Wait for panel power off time\n");
1824 	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1825 }
1826 
1827 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1828 {
1829 	DRM_DEBUG_KMS("Wait for panel power cycle\n");
1830 
1831 	/* When we disable the VDD override bit last we have to do the manual
1832 	 * wait. */
1833 	wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1834 				       intel_dp->panel_power_cycle_delay);
1835 
1836 	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1837 }
1838 
1839 static void wait_backlight_on(struct intel_dp *intel_dp)
1840 {
1841 	wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1842 				       intel_dp->backlight_on_delay);
1843 }
1844 
1845 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1846 {
1847 	wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1848 				       intel_dp->backlight_off_delay);
1849 }
1850 
1851 /* Read the current pp_control value, unlocking the register if it
1852  * is locked
1853  */
1854 
1855 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1856 {
1857 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1858 	struct drm_i915_private *dev_priv = dev->dev_private;
1859 	u32 control;
1860 
1861 	lockdep_assert_held(&dev_priv->pps_mutex);
1862 
1863 	control = I915_READ(_pp_ctrl_reg(intel_dp));
1864 	if (!IS_BROXTON(dev)) {
1865 		control &= ~PANEL_UNLOCK_MASK;
1866 		control |= PANEL_UNLOCK_REGS;
1867 	}
1868 	return control;
1869 }
1870 
1871 /*
1872  * Must be paired with edp_panel_vdd_off().
1873  * Must hold pps_mutex around the whole on/off sequence.
1874  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1875  */
1876 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1877 {
1878 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1879 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1880 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1881 	struct drm_i915_private *dev_priv = dev->dev_private;
1882 	enum intel_display_power_domain power_domain;
1883 	u32 pp;
1884 	i915_reg_t pp_stat_reg, pp_ctrl_reg;
1885 	bool need_to_disable = !intel_dp->want_panel_vdd;
1886 
1887 	lockdep_assert_held(&dev_priv->pps_mutex);
1888 
1889 	if (!is_edp(intel_dp))
1890 		return false;
1891 
1892 	cancel_delayed_work(&intel_dp->panel_vdd_work);
1893 	intel_dp->want_panel_vdd = true;
1894 
1895 	if (edp_have_panel_vdd(intel_dp))
1896 		return need_to_disable;
1897 
1898 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
1899 	intel_display_power_get(dev_priv, power_domain);
1900 
1901 	DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1902 		      port_name(intel_dig_port->port));
1903 
1904 	if (!edp_have_panel_power(intel_dp))
1905 		wait_panel_power_cycle(intel_dp);
1906 
1907 	pp = ironlake_get_pp_control(intel_dp);
1908 	pp |= EDP_FORCE_VDD;
1909 
1910 	pp_stat_reg = _pp_stat_reg(intel_dp);
1911 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1912 
1913 	I915_WRITE(pp_ctrl_reg, pp);
1914 	POSTING_READ(pp_ctrl_reg);
1915 	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1916 			I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1917 	/*
1918 	 * If the panel wasn't on, delay before accessing aux channel
1919 	 */
1920 	if (!edp_have_panel_power(intel_dp)) {
1921 		DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1922 			      port_name(intel_dig_port->port));
1923 		msleep(intel_dp->panel_power_up_delay);
1924 	}
1925 
1926 	return need_to_disable;
1927 }
1928 
1929 /*
1930  * Must be paired with intel_edp_panel_vdd_off() or
1931  * intel_edp_panel_off().
1932  * Nested calls to these functions are not allowed since
1933  * we drop the lock. Caller must use some higher level
1934  * locking to prevent nested calls from other threads.
1935  */
1936 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1937 {
1938 	bool vdd;
1939 
1940 	if (!is_edp(intel_dp))
1941 		return;
1942 
1943 	pps_lock(intel_dp);
1944 	vdd = edp_panel_vdd_on(intel_dp);
1945 	pps_unlock(intel_dp);
1946 
1947 #ifdef __DragonFly__
1948 /* XXX: limit dmesg spam to 16 warnings instead of 137, where is the bug? */
1949 	if(!vdd)
1950 		DRM_ERROR_RATELIMITED("eDP port %c VDD already requested on\n",
1951 		    port_name(dp_to_dig_port(intel_dp)->port));
1952 #else
1953 	I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1954 	     port_name(dp_to_dig_port(intel_dp)->port));
1955 #endif
1956 }
1957 
1958 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1959 {
1960 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1961 	struct drm_i915_private *dev_priv = dev->dev_private;
1962 	struct intel_digital_port *intel_dig_port =
1963 		dp_to_dig_port(intel_dp);
1964 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1965 	enum intel_display_power_domain power_domain;
1966 	u32 pp;
1967 	i915_reg_t pp_stat_reg, pp_ctrl_reg;
1968 
1969 	lockdep_assert_held(&dev_priv->pps_mutex);
1970 
1971 	WARN_ON(intel_dp->want_panel_vdd);
1972 
1973 	if (!edp_have_panel_vdd(intel_dp))
1974 		return;
1975 
1976 	DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1977 		      port_name(intel_dig_port->port));
1978 
1979 	pp = ironlake_get_pp_control(intel_dp);
1980 	pp &= ~EDP_FORCE_VDD;
1981 
1982 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1983 	pp_stat_reg = _pp_stat_reg(intel_dp);
1984 
1985 	I915_WRITE(pp_ctrl_reg, pp);
1986 	POSTING_READ(pp_ctrl_reg);
1987 
1988 	/* Make sure sequencer is idle before allowing subsequent activity */
1989 	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1990 	I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1991 
1992 	if ((pp & POWER_TARGET_ON) == 0)
1993 		intel_dp->last_power_cycle = jiffies;
1994 
1995 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
1996 	intel_display_power_put(dev_priv, power_domain);
1997 }
1998 
1999 static void edp_panel_vdd_work(struct work_struct *__work)
2000 {
2001 	struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
2002 						 struct intel_dp, panel_vdd_work);
2003 
2004 	pps_lock(intel_dp);
2005 	if (!intel_dp->want_panel_vdd)
2006 		edp_panel_vdd_off_sync(intel_dp);
2007 	pps_unlock(intel_dp);
2008 }
2009 
2010 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2011 {
2012 	unsigned long delay;
2013 
2014 	/*
2015 	 * Queue the timer to fire a long time from now (relative to the power
2016 	 * down delay) to keep the panel power up across a sequence of
2017 	 * operations.
2018 	 */
2019 	delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2020 	schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2021 }
2022 
2023 /*
2024  * Must be paired with edp_panel_vdd_on().
2025  * Must hold pps_mutex around the whole on/off sequence.
2026  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2027  */
2028 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2029 {
2030 	struct drm_i915_private *dev_priv =
2031 		intel_dp_to_dev(intel_dp)->dev_private;
2032 
2033 	lockdep_assert_held(&dev_priv->pps_mutex);
2034 
2035 	if (!is_edp(intel_dp))
2036 		return;
2037 
2038 	I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2039 	     port_name(dp_to_dig_port(intel_dp)->port));
2040 
2041 	intel_dp->want_panel_vdd = false;
2042 
2043 	if (sync)
2044 		edp_panel_vdd_off_sync(intel_dp);
2045 	else
2046 		edp_panel_vdd_schedule_off(intel_dp);
2047 }
2048 
2049 static void edp_panel_on(struct intel_dp *intel_dp)
2050 {
2051 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2052 	struct drm_i915_private *dev_priv = dev->dev_private;
2053 	u32 pp;
2054 	i915_reg_t pp_ctrl_reg;
2055 
2056 	lockdep_assert_held(&dev_priv->pps_mutex);
2057 
2058 	if (!is_edp(intel_dp))
2059 		return;
2060 
2061 	DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2062 		      port_name(dp_to_dig_port(intel_dp)->port));
2063 
2064 	if (WARN(edp_have_panel_power(intel_dp),
2065 		 "eDP port %c panel power already on\n",
2066 		 port_name(dp_to_dig_port(intel_dp)->port)))
2067 		return;
2068 
2069 	wait_panel_power_cycle(intel_dp);
2070 
2071 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2072 	pp = ironlake_get_pp_control(intel_dp);
2073 	if (IS_GEN5(dev)) {
2074 		/* ILK workaround: disable reset around power sequence */
2075 		pp &= ~PANEL_POWER_RESET;
2076 		I915_WRITE(pp_ctrl_reg, pp);
2077 		POSTING_READ(pp_ctrl_reg);
2078 	}
2079 
2080 	pp |= POWER_TARGET_ON;
2081 	if (!IS_GEN5(dev))
2082 		pp |= PANEL_POWER_RESET;
2083 
2084 	I915_WRITE(pp_ctrl_reg, pp);
2085 	POSTING_READ(pp_ctrl_reg);
2086 
2087 	wait_panel_on(intel_dp);
2088 	intel_dp->last_power_on = jiffies;
2089 
2090 	if (IS_GEN5(dev)) {
2091 		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2092 		I915_WRITE(pp_ctrl_reg, pp);
2093 		POSTING_READ(pp_ctrl_reg);
2094 	}
2095 }
2096 
2097 void intel_edp_panel_on(struct intel_dp *intel_dp)
2098 {
2099 	if (!is_edp(intel_dp))
2100 		return;
2101 
2102 	pps_lock(intel_dp);
2103 	edp_panel_on(intel_dp);
2104 	pps_unlock(intel_dp);
2105 }
2106 
2107 
2108 static void edp_panel_off(struct intel_dp *intel_dp)
2109 {
2110 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2111 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
2112 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2113 	struct drm_i915_private *dev_priv = dev->dev_private;
2114 	enum intel_display_power_domain power_domain;
2115 	u32 pp;
2116 	i915_reg_t pp_ctrl_reg;
2117 
2118 	lockdep_assert_held(&dev_priv->pps_mutex);
2119 
2120 	if (!is_edp(intel_dp))
2121 		return;
2122 
2123 	DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2124 		      port_name(dp_to_dig_port(intel_dp)->port));
2125 
2126 	WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2127 	     port_name(dp_to_dig_port(intel_dp)->port));
2128 
2129 	pp = ironlake_get_pp_control(intel_dp);
2130 	/* We need to switch off panel power _and_ force vdd, for otherwise some
2131 	 * panels get very unhappy and cease to work. */
2132 	pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2133 		EDP_BLC_ENABLE);
2134 
2135 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2136 
2137 	intel_dp->want_panel_vdd = false;
2138 
2139 	I915_WRITE(pp_ctrl_reg, pp);
2140 	POSTING_READ(pp_ctrl_reg);
2141 
2142 	intel_dp->last_power_cycle = jiffies;
2143 	wait_panel_off(intel_dp);
2144 
2145 	/* We got a reference when we enabled the VDD. */
2146 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
2147 	intel_display_power_put(dev_priv, power_domain);
2148 }
2149 
2150 void intel_edp_panel_off(struct intel_dp *intel_dp)
2151 {
2152 	if (!is_edp(intel_dp))
2153 		return;
2154 
2155 	pps_lock(intel_dp);
2156 	edp_panel_off(intel_dp);
2157 	pps_unlock(intel_dp);
2158 }
2159 
2160 /* Enable backlight in the panel power control. */
2161 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2162 {
2163 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2164 	struct drm_device *dev = intel_dig_port->base.base.dev;
2165 	struct drm_i915_private *dev_priv = dev->dev_private;
2166 	u32 pp;
2167 	i915_reg_t pp_ctrl_reg;
2168 
2169 	/*
2170 	 * If we enable the backlight right away following a panel power
2171 	 * on, we may see slight flicker as the panel syncs with the eDP
2172 	 * link.  So delay a bit to make sure the image is solid before
2173 	 * allowing it to appear.
2174 	 */
2175 	wait_backlight_on(intel_dp);
2176 
2177 	pps_lock(intel_dp);
2178 
2179 	pp = ironlake_get_pp_control(intel_dp);
2180 	pp |= EDP_BLC_ENABLE;
2181 
2182 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2183 
2184 	I915_WRITE(pp_ctrl_reg, pp);
2185 	POSTING_READ(pp_ctrl_reg);
2186 
2187 	pps_unlock(intel_dp);
2188 }
2189 
2190 /* Enable backlight PWM and backlight PP control. */
2191 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2192 {
2193 	if (!is_edp(intel_dp))
2194 		return;
2195 
2196 	DRM_DEBUG_KMS("\n");
2197 
2198 	intel_panel_enable_backlight(intel_dp->attached_connector);
2199 	_intel_edp_backlight_on(intel_dp);
2200 }
2201 
2202 /* Disable backlight in the panel power control. */
2203 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2204 {
2205 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2206 	struct drm_i915_private *dev_priv = dev->dev_private;
2207 	u32 pp;
2208 	i915_reg_t pp_ctrl_reg;
2209 
2210 	if (!is_edp(intel_dp))
2211 		return;
2212 
2213 	pps_lock(intel_dp);
2214 
2215 	pp = ironlake_get_pp_control(intel_dp);
2216 	pp &= ~EDP_BLC_ENABLE;
2217 
2218 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2219 
2220 	I915_WRITE(pp_ctrl_reg, pp);
2221 	POSTING_READ(pp_ctrl_reg);
2222 
2223 	pps_unlock(intel_dp);
2224 
2225 	intel_dp->last_backlight_off = jiffies;
2226 	edp_wait_backlight_off(intel_dp);
2227 }
2228 
2229 /* Disable backlight PP control and backlight PWM. */
2230 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2231 {
2232 	if (!is_edp(intel_dp))
2233 		return;
2234 
2235 	DRM_DEBUG_KMS("\n");
2236 
2237 	_intel_edp_backlight_off(intel_dp);
2238 	intel_panel_disable_backlight(intel_dp->attached_connector);
2239 }
2240 
2241 /*
2242  * Hook for controlling the panel power control backlight through the bl_power
2243  * sysfs attribute. Take care to handle multiple calls.
2244  */
2245 static void intel_edp_backlight_power(struct intel_connector *connector,
2246 				      bool enable)
2247 {
2248 	struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2249 	bool is_enabled;
2250 
2251 	pps_lock(intel_dp);
2252 	is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2253 	pps_unlock(intel_dp);
2254 
2255 	if (is_enabled == enable)
2256 		return;
2257 
2258 	DRM_DEBUG_KMS("panel power control backlight %s\n",
2259 		      enable ? "enable" : "disable");
2260 
2261 	if (enable)
2262 		_intel_edp_backlight_on(intel_dp);
2263 	else
2264 		_intel_edp_backlight_off(intel_dp);
2265 }
2266 
2267 static const char *state_string(bool enabled)
2268 {
2269 	return enabled ? "on" : "off";
2270 }
2271 
2272 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2273 {
2274 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2275 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2276 	bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2277 
2278 	I915_STATE_WARN(cur_state != state,
2279 			"DP port %c state assertion failure (expected %s, current %s)\n",
2280 			port_name(dig_port->port),
2281 			state_string(state), state_string(cur_state));
2282 }
2283 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2284 
2285 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2286 {
2287 	bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2288 
2289 	I915_STATE_WARN(cur_state != state,
2290 			"eDP PLL state assertion failure (expected %s, current %s)\n",
2291 			state_string(state), state_string(cur_state));
2292 }
2293 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2294 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2295 
2296 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2297 {
2298 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2299 	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2300 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2301 
2302 	assert_pipe_disabled(dev_priv, crtc->pipe);
2303 	assert_dp_port_disabled(intel_dp);
2304 	assert_edp_pll_disabled(dev_priv);
2305 
2306 	DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2307 		      crtc->config->port_clock);
2308 
2309 	intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2310 
2311 	if (crtc->config->port_clock == 162000)
2312 		intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2313 	else
2314 		intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2315 
2316 	I915_WRITE(DP_A, intel_dp->DP);
2317 	POSTING_READ(DP_A);
2318 	udelay(500);
2319 
2320 	intel_dp->DP |= DP_PLL_ENABLE;
2321 
2322 	I915_WRITE(DP_A, intel_dp->DP);
2323 	POSTING_READ(DP_A);
2324 	udelay(200);
2325 }
2326 
2327 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2328 {
2329 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2330 	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2331 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2332 
2333 	assert_pipe_disabled(dev_priv, crtc->pipe);
2334 	assert_dp_port_disabled(intel_dp);
2335 	assert_edp_pll_enabled(dev_priv);
2336 
2337 	DRM_DEBUG_KMS("disabling eDP PLL\n");
2338 
2339 	intel_dp->DP &= ~DP_PLL_ENABLE;
2340 
2341 	I915_WRITE(DP_A, intel_dp->DP);
2342 	POSTING_READ(DP_A);
2343 	udelay(200);
2344 }
2345 
2346 /* If the sink supports it, try to set the power state appropriately */
2347 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2348 {
2349 	int ret, i;
2350 
2351 	/* Should have a valid DPCD by this point */
2352 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2353 		return;
2354 
2355 	if (mode != DRM_MODE_DPMS_ON) {
2356 		ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2357 					 DP_SET_POWER_D3);
2358 	} else {
2359 		/*
2360 		 * When turning on, we need to retry for 1ms to give the sink
2361 		 * time to wake up.
2362 		 */
2363 		for (i = 0; i < 3; i++) {
2364 			ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2365 						 DP_SET_POWER_D0);
2366 			if (ret == 1)
2367 				break;
2368 			msleep(1);
2369 		}
2370 	}
2371 
2372 	if (ret != 1)
2373 		DRM_DEBUG_KMS("failed to %s sink power state\n",
2374 			      mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2375 }
2376 
2377 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2378 				  enum i915_pipe *pipe)
2379 {
2380 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2381 	enum port port = dp_to_dig_port(intel_dp)->port;
2382 	struct drm_device *dev = encoder->base.dev;
2383 	struct drm_i915_private *dev_priv = dev->dev_private;
2384 	enum intel_display_power_domain power_domain;
2385 	u32 tmp;
2386 	bool ret;
2387 
2388 	power_domain = intel_display_port_power_domain(encoder);
2389 	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
2390 		return false;
2391 
2392 	ret = false;
2393 
2394 	tmp = I915_READ(intel_dp->output_reg);
2395 
2396 	if (!(tmp & DP_PORT_EN))
2397 		goto out;
2398 
2399 	if (IS_GEN7(dev) && port == PORT_A) {
2400 		*pipe = PORT_TO_PIPE_CPT(tmp);
2401 	} else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2402 		enum i915_pipe p;
2403 
2404 		for_each_pipe(dev_priv, p) {
2405 			u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2406 			if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2407 				*pipe = p;
2408 				ret = true;
2409 
2410 				goto out;
2411 			}
2412 		}
2413 
2414 		DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2415 			      i915_mmio_reg_offset(intel_dp->output_reg));
2416 	} else if (IS_CHERRYVIEW(dev)) {
2417 		*pipe = DP_PORT_TO_PIPE_CHV(tmp);
2418 	} else {
2419 		*pipe = PORT_TO_PIPE(tmp);
2420 	}
2421 
2422 	ret = true;
2423 
2424 out:
2425 	intel_display_power_put(dev_priv, power_domain);
2426 
2427 	return ret;
2428 }
2429 
2430 static void intel_dp_get_config(struct intel_encoder *encoder,
2431 				struct intel_crtc_state *pipe_config)
2432 {
2433 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2434 	u32 tmp, flags = 0;
2435 	struct drm_device *dev = encoder->base.dev;
2436 	struct drm_i915_private *dev_priv = dev->dev_private;
2437 	enum port port = dp_to_dig_port(intel_dp)->port;
2438 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2439 	int dotclock;
2440 
2441 	tmp = I915_READ(intel_dp->output_reg);
2442 
2443 	pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2444 
2445 	if (HAS_PCH_CPT(dev) && port != PORT_A) {
2446 		u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2447 
2448 		if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2449 			flags |= DRM_MODE_FLAG_PHSYNC;
2450 		else
2451 			flags |= DRM_MODE_FLAG_NHSYNC;
2452 
2453 		if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2454 			flags |= DRM_MODE_FLAG_PVSYNC;
2455 		else
2456 			flags |= DRM_MODE_FLAG_NVSYNC;
2457 	} else {
2458 		if (tmp & DP_SYNC_HS_HIGH)
2459 			flags |= DRM_MODE_FLAG_PHSYNC;
2460 		else
2461 			flags |= DRM_MODE_FLAG_NHSYNC;
2462 
2463 		if (tmp & DP_SYNC_VS_HIGH)
2464 			flags |= DRM_MODE_FLAG_PVSYNC;
2465 		else
2466 			flags |= DRM_MODE_FLAG_NVSYNC;
2467 	}
2468 
2469 	pipe_config->base.adjusted_mode.flags |= flags;
2470 
2471 	if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2472 	    !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
2473 		pipe_config->limited_color_range = true;
2474 
2475 	pipe_config->has_dp_encoder = true;
2476 
2477 	pipe_config->lane_count =
2478 		((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2479 
2480 	intel_dp_get_m_n(crtc, pipe_config);
2481 
2482 	if (port == PORT_A) {
2483 		if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
2484 			pipe_config->port_clock = 162000;
2485 		else
2486 			pipe_config->port_clock = 270000;
2487 	}
2488 
2489 	dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2490 					    &pipe_config->dp_m_n);
2491 
2492 	if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2493 		ironlake_check_encoder_dotclock(pipe_config, dotclock);
2494 
2495 	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2496 
2497 	if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2498 	    pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2499 		/*
2500 		 * This is a big fat ugly hack.
2501 		 *
2502 		 * Some machines in UEFI boot mode provide us a VBT that has 18
2503 		 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2504 		 * unknown we fail to light up. Yet the same BIOS boots up with
2505 		 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2506 		 * max, not what it tells us to use.
2507 		 *
2508 		 * Note: This will still be broken if the eDP panel is not lit
2509 		 * up by the BIOS, and thus we can't get the mode at module
2510 		 * load.
2511 		 */
2512 		DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2513 			      pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2514 		dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2515 	}
2516 }
2517 
2518 static void intel_disable_dp(struct intel_encoder *encoder)
2519 {
2520 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2521 	struct drm_device *dev = encoder->base.dev;
2522 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2523 
2524 	if (crtc->config->has_audio)
2525 		intel_audio_codec_disable(encoder);
2526 
2527 	if (HAS_PSR(dev) && !HAS_DDI(dev))
2528 		intel_psr_disable(intel_dp);
2529 
2530 	/* Make sure the panel is off before trying to change the mode. But also
2531 	 * ensure that we have vdd while we switch off the panel. */
2532 	intel_edp_panel_vdd_on(intel_dp);
2533 	intel_edp_backlight_off(intel_dp);
2534 	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2535 	intel_edp_panel_off(intel_dp);
2536 
2537 	/* disable the port before the pipe on g4x */
2538 	if (INTEL_INFO(dev)->gen < 5)
2539 		intel_dp_link_down(intel_dp);
2540 }
2541 
2542 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2543 {
2544 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2545 	enum port port = dp_to_dig_port(intel_dp)->port;
2546 
2547 	intel_dp_link_down(intel_dp);
2548 
2549 	/* Only ilk+ has port A */
2550 	if (port == PORT_A)
2551 		ironlake_edp_pll_off(intel_dp);
2552 }
2553 
2554 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2555 {
2556 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2557 
2558 	intel_dp_link_down(intel_dp);
2559 }
2560 
2561 static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2562 				     bool reset)
2563 {
2564 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2565 	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2566 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2567 	enum i915_pipe pipe = crtc->pipe;
2568 	uint32_t val;
2569 
2570 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2571 	if (reset)
2572 		val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2573 	else
2574 		val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2575 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2576 
2577 	if (crtc->config->lane_count > 2) {
2578 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2579 		if (reset)
2580 			val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2581 		else
2582 			val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2583 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2584 	}
2585 
2586 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2587 	val |= CHV_PCS_REQ_SOFTRESET_EN;
2588 	if (reset)
2589 		val &= ~DPIO_PCS_CLK_SOFT_RESET;
2590 	else
2591 		val |= DPIO_PCS_CLK_SOFT_RESET;
2592 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2593 
2594 	if (crtc->config->lane_count > 2) {
2595 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2596 		val |= CHV_PCS_REQ_SOFTRESET_EN;
2597 		if (reset)
2598 			val &= ~DPIO_PCS_CLK_SOFT_RESET;
2599 		else
2600 			val |= DPIO_PCS_CLK_SOFT_RESET;
2601 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2602 	}
2603 }
2604 
2605 static void chv_post_disable_dp(struct intel_encoder *encoder)
2606 {
2607 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2608 	struct drm_device *dev = encoder->base.dev;
2609 	struct drm_i915_private *dev_priv = dev->dev_private;
2610 
2611 	intel_dp_link_down(intel_dp);
2612 
2613 	mutex_lock(&dev_priv->sb_lock);
2614 
2615 	/* Assert data lane reset */
2616 	chv_data_lane_soft_reset(encoder, true);
2617 
2618 	mutex_unlock(&dev_priv->sb_lock);
2619 }
2620 
2621 static void
2622 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2623 			 uint32_t *DP,
2624 			 uint8_t dp_train_pat)
2625 {
2626 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2627 	struct drm_device *dev = intel_dig_port->base.base.dev;
2628 	struct drm_i915_private *dev_priv = dev->dev_private;
2629 	enum port port = intel_dig_port->port;
2630 
2631 	if (HAS_DDI(dev)) {
2632 		uint32_t temp = I915_READ(DP_TP_CTL(port));
2633 
2634 		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2635 			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2636 		else
2637 			temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2638 
2639 		temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2640 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2641 		case DP_TRAINING_PATTERN_DISABLE:
2642 			temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2643 
2644 			break;
2645 		case DP_TRAINING_PATTERN_1:
2646 			temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2647 			break;
2648 		case DP_TRAINING_PATTERN_2:
2649 			temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2650 			break;
2651 		case DP_TRAINING_PATTERN_3:
2652 			temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2653 			break;
2654 		}
2655 		I915_WRITE(DP_TP_CTL(port), temp);
2656 
2657 	} else if ((IS_GEN7(dev) && port == PORT_A) ||
2658 		   (HAS_PCH_CPT(dev) && port != PORT_A)) {
2659 		*DP &= ~DP_LINK_TRAIN_MASK_CPT;
2660 
2661 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2662 		case DP_TRAINING_PATTERN_DISABLE:
2663 			*DP |= DP_LINK_TRAIN_OFF_CPT;
2664 			break;
2665 		case DP_TRAINING_PATTERN_1:
2666 			*DP |= DP_LINK_TRAIN_PAT_1_CPT;
2667 			break;
2668 		case DP_TRAINING_PATTERN_2:
2669 			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2670 			break;
2671 		case DP_TRAINING_PATTERN_3:
2672 			DRM_ERROR("DP training pattern 3 not supported\n");
2673 			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2674 			break;
2675 		}
2676 
2677 	} else {
2678 		if (IS_CHERRYVIEW(dev))
2679 			*DP &= ~DP_LINK_TRAIN_MASK_CHV;
2680 		else
2681 			*DP &= ~DP_LINK_TRAIN_MASK;
2682 
2683 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2684 		case DP_TRAINING_PATTERN_DISABLE:
2685 			*DP |= DP_LINK_TRAIN_OFF;
2686 			break;
2687 		case DP_TRAINING_PATTERN_1:
2688 			*DP |= DP_LINK_TRAIN_PAT_1;
2689 			break;
2690 		case DP_TRAINING_PATTERN_2:
2691 			*DP |= DP_LINK_TRAIN_PAT_2;
2692 			break;
2693 		case DP_TRAINING_PATTERN_3:
2694 			if (IS_CHERRYVIEW(dev)) {
2695 				*DP |= DP_LINK_TRAIN_PAT_3_CHV;
2696 			} else {
2697 				DRM_ERROR("DP training pattern 3 not supported\n");
2698 				*DP |= DP_LINK_TRAIN_PAT_2;
2699 			}
2700 			break;
2701 		}
2702 	}
2703 }
2704 
2705 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2706 {
2707 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2708 	struct drm_i915_private *dev_priv = dev->dev_private;
2709 	struct intel_crtc *crtc =
2710 		to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
2711 
2712 	/* enable with pattern 1 (as per spec) */
2713 	_intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2714 				 DP_TRAINING_PATTERN_1);
2715 
2716 	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2717 	POSTING_READ(intel_dp->output_reg);
2718 
2719 	/*
2720 	 * Magic for VLV/CHV. We _must_ first set up the register
2721 	 * without actually enabling the port, and then do another
2722 	 * write to enable the port. Otherwise link training will
2723 	 * fail when the power sequencer is freshly used for this port.
2724 	 */
2725 	intel_dp->DP |= DP_PORT_EN;
2726 	if (crtc->config->has_audio)
2727 		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
2728 
2729 	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2730 	POSTING_READ(intel_dp->output_reg);
2731 }
2732 
2733 static void intel_enable_dp(struct intel_encoder *encoder)
2734 {
2735 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2736 	struct drm_device *dev = encoder->base.dev;
2737 	struct drm_i915_private *dev_priv = dev->dev_private;
2738 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2739 	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2740 	enum port port = dp_to_dig_port(intel_dp)->port;
2741 	enum i915_pipe pipe = crtc->pipe;
2742 
2743 	if (WARN_ON(dp_reg & DP_PORT_EN))
2744 		return;
2745 
2746 	pps_lock(intel_dp);
2747 
2748 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
2749 		vlv_init_panel_power_sequencer(intel_dp);
2750 
2751 	/*
2752 	 * We get an occasional spurious underrun between the port
2753 	 * enable and vdd enable, when enabling port A eDP.
2754 	 *
2755 	 * FIXME: Not sure if this applies to (PCH) port D eDP as well
2756 	 */
2757 	if (port == PORT_A)
2758 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2759 
2760 	intel_dp_enable_port(intel_dp);
2761 
2762 	if (port == PORT_A && IS_GEN5(dev_priv)) {
2763 		/*
2764 		 * Underrun reporting for the other pipe was disabled in
2765 		 * g4x_pre_enable_dp(). The eDP PLL and port have now been
2766 		 * enabled, so it's now safe to re-enable underrun reporting.
2767 		 */
2768 		intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2769 		intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2770 		intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
2771 	}
2772 
2773 	edp_panel_vdd_on(intel_dp);
2774 	edp_panel_on(intel_dp);
2775 	edp_panel_vdd_off(intel_dp, true);
2776 
2777 	if (port == PORT_A)
2778 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2779 
2780 	pps_unlock(intel_dp);
2781 
2782 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
2783 		unsigned int lane_mask = 0x0;
2784 
2785 		if (IS_CHERRYVIEW(dev))
2786 			lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2787 
2788 		vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2789 				    lane_mask);
2790 	}
2791 
2792 	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2793 	intel_dp_start_link_train(intel_dp);
2794 	intel_dp_stop_link_train(intel_dp);
2795 
2796 	if (crtc->config->has_audio) {
2797 		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2798 				 pipe_name(pipe));
2799 		intel_audio_codec_enable(encoder);
2800 	}
2801 }
2802 
2803 static void g4x_enable_dp(struct intel_encoder *encoder)
2804 {
2805 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2806 
2807 	intel_enable_dp(encoder);
2808 	intel_edp_backlight_on(intel_dp);
2809 }
2810 
2811 static void vlv_enable_dp(struct intel_encoder *encoder)
2812 {
2813 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2814 
2815 	intel_edp_backlight_on(intel_dp);
2816 	intel_psr_enable(intel_dp);
2817 }
2818 
2819 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2820 {
2821 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2822 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2823 	enum port port = dp_to_dig_port(intel_dp)->port;
2824 	enum i915_pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
2825 
2826 	intel_dp_prepare(encoder);
2827 
2828 	if (port == PORT_A && IS_GEN5(dev_priv)) {
2829 		/*
2830 		 * We get FIFO underruns on the other pipe when
2831 		 * enabling the CPU eDP PLL, and when enabling CPU
2832 		 * eDP port. We could potentially avoid the PLL
2833 		 * underrun with a vblank wait just prior to enabling
2834 		 * the PLL, but that doesn't appear to help the port
2835 		 * enable case. Just sweep it all under the rug.
2836 		 */
2837 		intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2838 		intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2839 	}
2840 
2841 	/* Only ilk+ has port A */
2842 	if (port == PORT_A)
2843 		ironlake_edp_pll_on(intel_dp);
2844 }
2845 
2846 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2847 {
2848 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2849 	struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2850 	enum i915_pipe pipe = intel_dp->pps_pipe;
2851 	i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2852 
2853 	edp_panel_vdd_off_sync(intel_dp);
2854 
2855 	/*
2856 	 * VLV seems to get confused when multiple power seqeuencers
2857 	 * have the same port selected (even if only one has power/vdd
2858 	 * enabled). The failure manifests as vlv_wait_port_ready() failing
2859 	 * CHV on the other hand doesn't seem to mind having the same port
2860 	 * selected in multiple power seqeuencers, but let's clear the
2861 	 * port select always when logically disconnecting a power sequencer
2862 	 * from a port.
2863 	 */
2864 	DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2865 		      pipe_name(pipe), port_name(intel_dig_port->port));
2866 	I915_WRITE(pp_on_reg, 0);
2867 	POSTING_READ(pp_on_reg);
2868 
2869 	intel_dp->pps_pipe = INVALID_PIPE;
2870 }
2871 
2872 static void vlv_steal_power_sequencer(struct drm_device *dev,
2873 				      enum i915_pipe pipe)
2874 {
2875 	struct drm_i915_private *dev_priv = dev->dev_private;
2876 	struct intel_encoder *encoder;
2877 
2878 	lockdep_assert_held(&dev_priv->pps_mutex);
2879 
2880 	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2881 		return;
2882 
2883 	for_each_intel_encoder(dev, encoder) {
2884 		struct intel_dp *intel_dp;
2885 		enum port port;
2886 
2887 		if (encoder->type != INTEL_OUTPUT_EDP)
2888 			continue;
2889 
2890 		intel_dp = enc_to_intel_dp(&encoder->base);
2891 		port = dp_to_dig_port(intel_dp)->port;
2892 
2893 		if (intel_dp->pps_pipe != pipe)
2894 			continue;
2895 
2896 		DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2897 			      pipe_name(pipe), port_name(port));
2898 
2899 		WARN(encoder->base.crtc,
2900 		     "stealing pipe %c power sequencer from active eDP port %c\n",
2901 		     pipe_name(pipe), port_name(port));
2902 
2903 		/* make sure vdd is off before we steal it */
2904 		vlv_detach_power_sequencer(intel_dp);
2905 	}
2906 }
2907 
2908 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2909 {
2910 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2911 	struct intel_encoder *encoder = &intel_dig_port->base;
2912 	struct drm_device *dev = encoder->base.dev;
2913 	struct drm_i915_private *dev_priv = dev->dev_private;
2914 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2915 
2916 	lockdep_assert_held(&dev_priv->pps_mutex);
2917 
2918 	if (!is_edp(intel_dp))
2919 		return;
2920 
2921 	if (intel_dp->pps_pipe == crtc->pipe)
2922 		return;
2923 
2924 	/*
2925 	 * If another power sequencer was being used on this
2926 	 * port previously make sure to turn off vdd there while
2927 	 * we still have control of it.
2928 	 */
2929 	if (intel_dp->pps_pipe != INVALID_PIPE)
2930 		vlv_detach_power_sequencer(intel_dp);
2931 
2932 	/*
2933 	 * We may be stealing the power
2934 	 * sequencer from another port.
2935 	 */
2936 	vlv_steal_power_sequencer(dev, crtc->pipe);
2937 
2938 	/* now it's all ours */
2939 	intel_dp->pps_pipe = crtc->pipe;
2940 
2941 	DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2942 		      pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2943 
2944 	/* init power sequencer on this pipe and port */
2945 	intel_dp_init_panel_power_sequencer(dev, intel_dp);
2946 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2947 }
2948 
2949 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2950 {
2951 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2952 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2953 	struct drm_device *dev = encoder->base.dev;
2954 	struct drm_i915_private *dev_priv = dev->dev_private;
2955 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2956 	enum dpio_channel port = vlv_dport_to_channel(dport);
2957 	int pipe = intel_crtc->pipe;
2958 	u32 val;
2959 
2960 	mutex_lock(&dev_priv->sb_lock);
2961 
2962 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2963 	val = 0;
2964 	if (pipe)
2965 		val |= (1<<21);
2966 	else
2967 		val &= ~(1<<21);
2968 	val |= 0x001000c4;
2969 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2970 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2971 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2972 
2973 	mutex_unlock(&dev_priv->sb_lock);
2974 
2975 	intel_enable_dp(encoder);
2976 }
2977 
2978 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2979 {
2980 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2981 	struct drm_device *dev = encoder->base.dev;
2982 	struct drm_i915_private *dev_priv = dev->dev_private;
2983 	struct intel_crtc *intel_crtc =
2984 		to_intel_crtc(encoder->base.crtc);
2985 	enum dpio_channel port = vlv_dport_to_channel(dport);
2986 	int pipe = intel_crtc->pipe;
2987 
2988 	intel_dp_prepare(encoder);
2989 
2990 	/* Program Tx lane resets to default */
2991 	mutex_lock(&dev_priv->sb_lock);
2992 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2993 			 DPIO_PCS_TX_LANE2_RESET |
2994 			 DPIO_PCS_TX_LANE1_RESET);
2995 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2996 			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2997 			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2998 			 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2999 				 DPIO_PCS_CLK_SOFT_RESET);
3000 
3001 	/* Fix up inter-pair skew failure */
3002 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
3003 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
3004 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
3005 	mutex_unlock(&dev_priv->sb_lock);
3006 }
3007 
3008 static void chv_pre_enable_dp(struct intel_encoder *encoder)
3009 {
3010 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3011 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3012 	struct drm_device *dev = encoder->base.dev;
3013 	struct drm_i915_private *dev_priv = dev->dev_private;
3014 	struct intel_crtc *intel_crtc =
3015 		to_intel_crtc(encoder->base.crtc);
3016 	enum dpio_channel ch = vlv_dport_to_channel(dport);
3017 	int pipe = intel_crtc->pipe;
3018 	int data, i, stagger;
3019 	u32 val;
3020 
3021 	mutex_lock(&dev_priv->sb_lock);
3022 
3023 	/* allow hardware to manage TX FIFO reset source */
3024 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
3025 	val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
3026 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
3027 
3028 	if (intel_crtc->config->lane_count > 2) {
3029 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3030 		val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
3031 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3032 	}
3033 
3034 	/* Program Tx lane latency optimal setting*/
3035 	for (i = 0; i < intel_crtc->config->lane_count; i++) {
3036 		/* Set the upar bit */
3037 		if (intel_crtc->config->lane_count == 1)
3038 			data = 0x0;
3039 		else
3040 			data = (i == 1) ? 0x0 : 0x1;
3041 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
3042 				data << DPIO_UPAR_SHIFT);
3043 	}
3044 
3045 	/* Data lane stagger programming */
3046 	if (intel_crtc->config->port_clock > 270000)
3047 		stagger = 0x18;
3048 	else if (intel_crtc->config->port_clock > 135000)
3049 		stagger = 0xd;
3050 	else if (intel_crtc->config->port_clock > 67500)
3051 		stagger = 0x7;
3052 	else if (intel_crtc->config->port_clock > 33750)
3053 		stagger = 0x4;
3054 	else
3055 		stagger = 0x2;
3056 
3057 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
3058 	val |= DPIO_TX2_STAGGER_MASK(0x1f);
3059 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
3060 
3061 	if (intel_crtc->config->lane_count > 2) {
3062 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3063 		val |= DPIO_TX2_STAGGER_MASK(0x1f);
3064 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3065 	}
3066 
3067 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
3068 		       DPIO_LANESTAGGER_STRAP(stagger) |
3069 		       DPIO_LANESTAGGER_STRAP_OVRD |
3070 		       DPIO_TX1_STAGGER_MASK(0x1f) |
3071 		       DPIO_TX1_STAGGER_MULT(6) |
3072 		       DPIO_TX2_STAGGER_MULT(0));
3073 
3074 	if (intel_crtc->config->lane_count > 2) {
3075 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
3076 			       DPIO_LANESTAGGER_STRAP(stagger) |
3077 			       DPIO_LANESTAGGER_STRAP_OVRD |
3078 			       DPIO_TX1_STAGGER_MASK(0x1f) |
3079 			       DPIO_TX1_STAGGER_MULT(7) |
3080 			       DPIO_TX2_STAGGER_MULT(5));
3081 	}
3082 
3083 	/* Deassert data lane reset */
3084 	chv_data_lane_soft_reset(encoder, false);
3085 
3086 	mutex_unlock(&dev_priv->sb_lock);
3087 
3088 	intel_enable_dp(encoder);
3089 
3090 	/* Second common lane will stay alive on its own now */
3091 	if (dport->release_cl2_override) {
3092 		chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
3093 		dport->release_cl2_override = false;
3094 	}
3095 }
3096 
3097 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
3098 {
3099 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
3100 	struct drm_device *dev = encoder->base.dev;
3101 	struct drm_i915_private *dev_priv = dev->dev_private;
3102 	struct intel_crtc *intel_crtc =
3103 		to_intel_crtc(encoder->base.crtc);
3104 	enum dpio_channel ch = vlv_dport_to_channel(dport);
3105 	enum i915_pipe pipe = intel_crtc->pipe;
3106 	unsigned int lane_mask =
3107 		intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
3108 	u32 val;
3109 
3110 	intel_dp_prepare(encoder);
3111 
3112 	/*
3113 	 * Must trick the second common lane into life.
3114 	 * Otherwise we can't even access the PLL.
3115 	 */
3116 	if (ch == DPIO_CH0 && pipe == PIPE_B)
3117 		dport->release_cl2_override =
3118 			!chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
3119 
3120 	chv_phy_powergate_lanes(encoder, true, lane_mask);
3121 
3122 	mutex_lock(&dev_priv->sb_lock);
3123 
3124 	/* Assert data lane reset */
3125 	chv_data_lane_soft_reset(encoder, true);
3126 
3127 	/* program left/right clock distribution */
3128 	if (pipe != PIPE_B) {
3129 		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3130 		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3131 		if (ch == DPIO_CH0)
3132 			val |= CHV_BUFLEFTENA1_FORCE;
3133 		if (ch == DPIO_CH1)
3134 			val |= CHV_BUFRIGHTENA1_FORCE;
3135 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3136 	} else {
3137 		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3138 		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3139 		if (ch == DPIO_CH0)
3140 			val |= CHV_BUFLEFTENA2_FORCE;
3141 		if (ch == DPIO_CH1)
3142 			val |= CHV_BUFRIGHTENA2_FORCE;
3143 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3144 	}
3145 
3146 	/* program clock channel usage */
3147 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3148 	val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3149 	if (pipe != PIPE_B)
3150 		val &= ~CHV_PCS_USEDCLKCHANNEL;
3151 	else
3152 		val |= CHV_PCS_USEDCLKCHANNEL;
3153 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3154 
3155 	if (intel_crtc->config->lane_count > 2) {
3156 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3157 		val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3158 		if (pipe != PIPE_B)
3159 			val &= ~CHV_PCS_USEDCLKCHANNEL;
3160 		else
3161 			val |= CHV_PCS_USEDCLKCHANNEL;
3162 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3163 	}
3164 
3165 	/*
3166 	 * This a a bit weird since generally CL
3167 	 * matches the pipe, but here we need to
3168 	 * pick the CL based on the port.
3169 	 */
3170 	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3171 	if (pipe != PIPE_B)
3172 		val &= ~CHV_CMN_USEDCLKCHANNEL;
3173 	else
3174 		val |= CHV_CMN_USEDCLKCHANNEL;
3175 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3176 
3177 	mutex_unlock(&dev_priv->sb_lock);
3178 }
3179 
3180 static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3181 {
3182 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3183 	enum i915_pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3184 	u32 val;
3185 
3186 	mutex_lock(&dev_priv->sb_lock);
3187 
3188 	/* disable left/right clock distribution */
3189 	if (pipe != PIPE_B) {
3190 		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3191 		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3192 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3193 	} else {
3194 		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3195 		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3196 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3197 	}
3198 
3199 	mutex_unlock(&dev_priv->sb_lock);
3200 
3201 	/*
3202 	 * Leave the power down bit cleared for at least one
3203 	 * lane so that chv_powergate_phy_ch() will power
3204 	 * on something when the channel is otherwise unused.
3205 	 * When the port is off and the override is removed
3206 	 * the lanes power down anyway, so otherwise it doesn't
3207 	 * really matter what the state of power down bits is
3208 	 * after this.
3209 	 */
3210 	chv_phy_powergate_lanes(encoder, false, 0x0);
3211 }
3212 
3213 /*
3214  * Native read with retry for link status and receiver capability reads for
3215  * cases where the sink may still be asleep.
3216  *
3217  * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3218  * supposed to retry 3 times per the spec.
3219  */
3220 static ssize_t
3221 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3222 			void *buffer, size_t size)
3223 {
3224 	ssize_t ret;
3225 	int i;
3226 
3227 	/*
3228 	 * Sometime we just get the same incorrect byte repeated
3229 	 * over the entire buffer. Doing just one throw away read
3230 	 * initially seems to "solve" it.
3231 	 */
3232 	drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3233 
3234 	for (i = 0; i < 3; i++) {
3235 		ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3236 		if (ret == size)
3237 			return ret;
3238 		msleep(1);
3239 	}
3240 
3241 	return ret;
3242 }
3243 
3244 /*
3245  * Fetch AUX CH registers 0x202 - 0x207 which contain
3246  * link status information
3247  */
3248 bool
3249 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3250 {
3251 	return intel_dp_dpcd_read_wake(&intel_dp->aux,
3252 				       DP_LANE0_1_STATUS,
3253 				       link_status,
3254 				       DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3255 }
3256 
3257 /* These are source-specific values. */
3258 uint8_t
3259 intel_dp_voltage_max(struct intel_dp *intel_dp)
3260 {
3261 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3262 	struct drm_i915_private *dev_priv = dev->dev_private;
3263 	enum port port = dp_to_dig_port(intel_dp)->port;
3264 
3265 	if (IS_BROXTON(dev))
3266 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3267 	else if (INTEL_INFO(dev)->gen >= 9) {
3268 		if (dev_priv->edp_low_vswing && port == PORT_A)
3269 			return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3270 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3271 	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
3272 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3273 	else if (IS_GEN7(dev) && port == PORT_A)
3274 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3275 	else if (HAS_PCH_CPT(dev) && port != PORT_A)
3276 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3277 	else
3278 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3279 }
3280 
3281 uint8_t
3282 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3283 {
3284 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3285 	enum port port = dp_to_dig_port(intel_dp)->port;
3286 
3287 	if (INTEL_INFO(dev)->gen >= 9) {
3288 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3289 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3290 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3291 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3292 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3293 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3294 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3295 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3296 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3297 		default:
3298 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3299 		}
3300 	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3301 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3302 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3303 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3304 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3305 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3306 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3307 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3308 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3309 		default:
3310 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3311 		}
3312 	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
3313 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3314 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3315 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3316 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3317 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3318 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3319 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3320 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3321 		default:
3322 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3323 		}
3324 	} else if (IS_GEN7(dev) && port == PORT_A) {
3325 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3326 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3327 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3328 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3329 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3330 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3331 		default:
3332 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3333 		}
3334 	} else {
3335 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3336 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3337 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3338 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3339 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3340 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3341 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3342 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3343 		default:
3344 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3345 		}
3346 	}
3347 }
3348 
3349 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3350 {
3351 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3352 	struct drm_i915_private *dev_priv = dev->dev_private;
3353 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3354 	struct intel_crtc *intel_crtc =
3355 		to_intel_crtc(dport->base.base.crtc);
3356 	unsigned long demph_reg_value, preemph_reg_value,
3357 		uniqtranscale_reg_value;
3358 	uint8_t train_set = intel_dp->train_set[0];
3359 	enum dpio_channel port = vlv_dport_to_channel(dport);
3360 	int pipe = intel_crtc->pipe;
3361 
3362 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3363 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3364 		preemph_reg_value = 0x0004000;
3365 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3366 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3367 			demph_reg_value = 0x2B405555;
3368 			uniqtranscale_reg_value = 0x552AB83A;
3369 			break;
3370 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3371 			demph_reg_value = 0x2B404040;
3372 			uniqtranscale_reg_value = 0x5548B83A;
3373 			break;
3374 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3375 			demph_reg_value = 0x2B245555;
3376 			uniqtranscale_reg_value = 0x5560B83A;
3377 			break;
3378 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3379 			demph_reg_value = 0x2B405555;
3380 			uniqtranscale_reg_value = 0x5598DA3A;
3381 			break;
3382 		default:
3383 			return 0;
3384 		}
3385 		break;
3386 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3387 		preemph_reg_value = 0x0002000;
3388 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3389 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3390 			demph_reg_value = 0x2B404040;
3391 			uniqtranscale_reg_value = 0x5552B83A;
3392 			break;
3393 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3394 			demph_reg_value = 0x2B404848;
3395 			uniqtranscale_reg_value = 0x5580B83A;
3396 			break;
3397 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3398 			demph_reg_value = 0x2B404040;
3399 			uniqtranscale_reg_value = 0x55ADDA3A;
3400 			break;
3401 		default:
3402 			return 0;
3403 		}
3404 		break;
3405 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3406 		preemph_reg_value = 0x0000000;
3407 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3408 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3409 			demph_reg_value = 0x2B305555;
3410 			uniqtranscale_reg_value = 0x5570B83A;
3411 			break;
3412 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3413 			demph_reg_value = 0x2B2B4040;
3414 			uniqtranscale_reg_value = 0x55ADDA3A;
3415 			break;
3416 		default:
3417 			return 0;
3418 		}
3419 		break;
3420 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3421 		preemph_reg_value = 0x0006000;
3422 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3423 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3424 			demph_reg_value = 0x1B405555;
3425 			uniqtranscale_reg_value = 0x55ADDA3A;
3426 			break;
3427 		default:
3428 			return 0;
3429 		}
3430 		break;
3431 	default:
3432 		return 0;
3433 	}
3434 
3435 	mutex_lock(&dev_priv->sb_lock);
3436 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3437 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3438 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3439 			 uniqtranscale_reg_value);
3440 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3441 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3442 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3443 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3444 	mutex_unlock(&dev_priv->sb_lock);
3445 
3446 	return 0;
3447 }
3448 
3449 static bool chv_need_uniq_trans_scale(uint8_t train_set)
3450 {
3451 	return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3452 		(train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3453 }
3454 
3455 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3456 {
3457 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3458 	struct drm_i915_private *dev_priv = dev->dev_private;
3459 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3460 	struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3461 	u32 deemph_reg_value, margin_reg_value, val;
3462 	uint8_t train_set = intel_dp->train_set[0];
3463 	enum dpio_channel ch = vlv_dport_to_channel(dport);
3464 	enum i915_pipe pipe = intel_crtc->pipe;
3465 	int i;
3466 
3467 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3468 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3469 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3470 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3471 			deemph_reg_value = 128;
3472 			margin_reg_value = 52;
3473 			break;
3474 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3475 			deemph_reg_value = 128;
3476 			margin_reg_value = 77;
3477 			break;
3478 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3479 			deemph_reg_value = 128;
3480 			margin_reg_value = 102;
3481 			break;
3482 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3483 			deemph_reg_value = 128;
3484 			margin_reg_value = 154;
3485 			/* FIXME extra to set for 1200 */
3486 			break;
3487 		default:
3488 			return 0;
3489 		}
3490 		break;
3491 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3492 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3493 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3494 			deemph_reg_value = 85;
3495 			margin_reg_value = 78;
3496 			break;
3497 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3498 			deemph_reg_value = 85;
3499 			margin_reg_value = 116;
3500 			break;
3501 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3502 			deemph_reg_value = 85;
3503 			margin_reg_value = 154;
3504 			break;
3505 		default:
3506 			return 0;
3507 		}
3508 		break;
3509 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3510 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3511 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3512 			deemph_reg_value = 64;
3513 			margin_reg_value = 104;
3514 			break;
3515 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3516 			deemph_reg_value = 64;
3517 			margin_reg_value = 154;
3518 			break;
3519 		default:
3520 			return 0;
3521 		}
3522 		break;
3523 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3524 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3525 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3526 			deemph_reg_value = 43;
3527 			margin_reg_value = 154;
3528 			break;
3529 		default:
3530 			return 0;
3531 		}
3532 		break;
3533 	default:
3534 		return 0;
3535 	}
3536 
3537 	mutex_lock(&dev_priv->sb_lock);
3538 
3539 	/* Clear calc init */
3540 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3541 	val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3542 	val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3543 	val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3544 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3545 
3546 	if (intel_crtc->config->lane_count > 2) {
3547 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3548 		val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3549 		val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3550 		val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3551 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3552 	}
3553 
3554 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3555 	val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3556 	val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3557 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3558 
3559 	if (intel_crtc->config->lane_count > 2) {
3560 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3561 		val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3562 		val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3563 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3564 	}
3565 
3566 	/* Program swing deemph */
3567 	for (i = 0; i < intel_crtc->config->lane_count; i++) {
3568 		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3569 		val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3570 		val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3571 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3572 	}
3573 
3574 	/* Program swing margin */
3575 	for (i = 0; i < intel_crtc->config->lane_count; i++) {
3576 		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3577 
3578 		val &= ~DPIO_SWING_MARGIN000_MASK;
3579 		val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3580 
3581 		/*
3582 		 * Supposedly this value shouldn't matter when unique transition
3583 		 * scale is disabled, but in fact it does matter. Let's just
3584 		 * always program the same value and hope it's OK.
3585 		 */
3586 		val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3587 		val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3588 
3589 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3590 	}
3591 
3592 	/*
3593 	 * The document said it needs to set bit 27 for ch0 and bit 26
3594 	 * for ch1. Might be a typo in the doc.
3595 	 * For now, for this unique transition scale selection, set bit
3596 	 * 27 for ch0 and ch1.
3597 	 */
3598 	for (i = 0; i < intel_crtc->config->lane_count; i++) {
3599 		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3600 		if (chv_need_uniq_trans_scale(train_set))
3601 			val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3602 		else
3603 			val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3604 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3605 	}
3606 
3607 	/* Start swing calculation */
3608 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3609 	val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3610 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3611 
3612 	if (intel_crtc->config->lane_count > 2) {
3613 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3614 		val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3615 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3616 	}
3617 
3618 	mutex_unlock(&dev_priv->sb_lock);
3619 
3620 	return 0;
3621 }
3622 
3623 static uint32_t
3624 gen4_signal_levels(uint8_t train_set)
3625 {
3626 	uint32_t	signal_levels = 0;
3627 
3628 	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3629 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3630 	default:
3631 		signal_levels |= DP_VOLTAGE_0_4;
3632 		break;
3633 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3634 		signal_levels |= DP_VOLTAGE_0_6;
3635 		break;
3636 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3637 		signal_levels |= DP_VOLTAGE_0_8;
3638 		break;
3639 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3640 		signal_levels |= DP_VOLTAGE_1_2;
3641 		break;
3642 	}
3643 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3644 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3645 	default:
3646 		signal_levels |= DP_PRE_EMPHASIS_0;
3647 		break;
3648 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3649 		signal_levels |= DP_PRE_EMPHASIS_3_5;
3650 		break;
3651 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3652 		signal_levels |= DP_PRE_EMPHASIS_6;
3653 		break;
3654 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3655 		signal_levels |= DP_PRE_EMPHASIS_9_5;
3656 		break;
3657 	}
3658 	return signal_levels;
3659 }
3660 
3661 /* Gen6's DP voltage swing and pre-emphasis control */
3662 static uint32_t
3663 gen6_edp_signal_levels(uint8_t train_set)
3664 {
3665 	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3666 					 DP_TRAIN_PRE_EMPHASIS_MASK);
3667 	switch (signal_levels) {
3668 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3669 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3670 		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3671 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3672 		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3673 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3674 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3675 		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3676 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3677 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3678 		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3679 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3680 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3681 		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3682 	default:
3683 		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3684 			      "0x%x\n", signal_levels);
3685 		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3686 	}
3687 }
3688 
3689 /* Gen7's DP voltage swing and pre-emphasis control */
3690 static uint32_t
3691 gen7_edp_signal_levels(uint8_t train_set)
3692 {
3693 	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3694 					 DP_TRAIN_PRE_EMPHASIS_MASK);
3695 	switch (signal_levels) {
3696 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3697 		return EDP_LINK_TRAIN_400MV_0DB_IVB;
3698 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3699 		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3700 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3701 		return EDP_LINK_TRAIN_400MV_6DB_IVB;
3702 
3703 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3704 		return EDP_LINK_TRAIN_600MV_0DB_IVB;
3705 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3706 		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3707 
3708 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3709 		return EDP_LINK_TRAIN_800MV_0DB_IVB;
3710 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3711 		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3712 
3713 	default:
3714 		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3715 			      "0x%x\n", signal_levels);
3716 		return EDP_LINK_TRAIN_500MV_0DB_IVB;
3717 	}
3718 }
3719 
3720 void
3721 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3722 {
3723 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3724 	enum port port = intel_dig_port->port;
3725 	struct drm_device *dev = intel_dig_port->base.base.dev;
3726 	struct drm_i915_private *dev_priv = to_i915(dev);
3727 	uint32_t signal_levels, mask = 0;
3728 	uint8_t train_set = intel_dp->train_set[0];
3729 
3730 	if (HAS_DDI(dev)) {
3731 		signal_levels = ddi_signal_levels(intel_dp);
3732 
3733 		if (IS_BROXTON(dev))
3734 			signal_levels = 0;
3735 		else
3736 			mask = DDI_BUF_EMP_MASK;
3737 	} else if (IS_CHERRYVIEW(dev)) {
3738 		signal_levels = chv_signal_levels(intel_dp);
3739 	} else if (IS_VALLEYVIEW(dev)) {
3740 		signal_levels = vlv_signal_levels(intel_dp);
3741 	} else if (IS_GEN7(dev) && port == PORT_A) {
3742 		signal_levels = gen7_edp_signal_levels(train_set);
3743 		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3744 	} else if (IS_GEN6(dev) && port == PORT_A) {
3745 		signal_levels = gen6_edp_signal_levels(train_set);
3746 		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3747 	} else {
3748 		signal_levels = gen4_signal_levels(train_set);
3749 		mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3750 	}
3751 
3752 	if (mask)
3753 		DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3754 
3755 	DRM_DEBUG_KMS("Using vswing level %d\n",
3756 		train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3757 	DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3758 		(train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3759 			DP_TRAIN_PRE_EMPHASIS_SHIFT);
3760 
3761 	intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3762 
3763 	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3764 	POSTING_READ(intel_dp->output_reg);
3765 }
3766 
3767 void
3768 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3769 				       uint8_t dp_train_pat)
3770 {
3771 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3772 	struct drm_i915_private *dev_priv =
3773 		to_i915(intel_dig_port->base.base.dev);
3774 
3775 	_intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3776 
3777 	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3778 	POSTING_READ(intel_dp->output_reg);
3779 }
3780 
3781 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3782 {
3783 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3784 	struct drm_device *dev = intel_dig_port->base.base.dev;
3785 	struct drm_i915_private *dev_priv = dev->dev_private;
3786 	enum port port = intel_dig_port->port;
3787 	uint32_t val;
3788 
3789 	if (!HAS_DDI(dev))
3790 		return;
3791 
3792 	val = I915_READ(DP_TP_CTL(port));
3793 	val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3794 	val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3795 	I915_WRITE(DP_TP_CTL(port), val);
3796 
3797 	/*
3798 	 * On PORT_A we can have only eDP in SST mode. There the only reason
3799 	 * we need to set idle transmission mode is to work around a HW issue
3800 	 * where we enable the pipe while not in idle link-training mode.
3801 	 * In this case there is requirement to wait for a minimum number of
3802 	 * idle patterns to be sent.
3803 	 */
3804 	if (port == PORT_A)
3805 		return;
3806 
3807 	if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3808 		     1))
3809 		DRM_ERROR("Timed out waiting for DP idle patterns\n");
3810 }
3811 
3812 static void
3813 intel_dp_link_down(struct intel_dp *intel_dp)
3814 {
3815 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3816 	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3817 	enum port port = intel_dig_port->port;
3818 	struct drm_device *dev = intel_dig_port->base.base.dev;
3819 	struct drm_i915_private *dev_priv = dev->dev_private;
3820 	uint32_t DP = intel_dp->DP;
3821 
3822 	if (WARN_ON(HAS_DDI(dev)))
3823 		return;
3824 
3825 	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3826 		return;
3827 
3828 	DRM_DEBUG_KMS("\n");
3829 
3830 	if ((IS_GEN7(dev) && port == PORT_A) ||
3831 	    (HAS_PCH_CPT(dev) && port != PORT_A)) {
3832 		DP &= ~DP_LINK_TRAIN_MASK_CPT;
3833 		DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3834 	} else {
3835 		if (IS_CHERRYVIEW(dev))
3836 			DP &= ~DP_LINK_TRAIN_MASK_CHV;
3837 		else
3838 			DP &= ~DP_LINK_TRAIN_MASK;
3839 		DP |= DP_LINK_TRAIN_PAT_IDLE;
3840 	}
3841 	I915_WRITE(intel_dp->output_reg, DP);
3842 	POSTING_READ(intel_dp->output_reg);
3843 
3844 	DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3845 	I915_WRITE(intel_dp->output_reg, DP);
3846 	POSTING_READ(intel_dp->output_reg);
3847 
3848 	/*
3849 	 * HW workaround for IBX, we need to move the port
3850 	 * to transcoder A after disabling it to allow the
3851 	 * matching HDMI port to be enabled on transcoder A.
3852 	 */
3853 	if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3854 		/*
3855 		 * We get CPU/PCH FIFO underruns on the other pipe when
3856 		 * doing the workaround. Sweep them under the rug.
3857 		 */
3858 		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3859 		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3860 
3861 		/* always enable with pattern 1 (as per spec) */
3862 		DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3863 		DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3864 		I915_WRITE(intel_dp->output_reg, DP);
3865 		POSTING_READ(intel_dp->output_reg);
3866 
3867 		DP &= ~DP_PORT_EN;
3868 		I915_WRITE(intel_dp->output_reg, DP);
3869 		POSTING_READ(intel_dp->output_reg);
3870 
3871 		intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3872 		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3873 		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3874 	}
3875 
3876 	msleep(intel_dp->panel_power_down_delay);
3877 
3878 	intel_dp->DP = DP;
3879 }
3880 
3881 static bool
3882 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3883 {
3884 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3885 	struct drm_device *dev = dig_port->base.base.dev;
3886 	struct drm_i915_private *dev_priv = dev->dev_private;
3887 	uint8_t rev;
3888 
3889 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3890 				    sizeof(intel_dp->dpcd)) < 0)
3891 		return false; /* aux transfer failed */
3892 
3893 #ifdef __DragonFly__
3894 	char dpcd_hex_dump[DP_RECEIVER_CAP_SIZE * 3];
3895 	DRM_DEBUG_KMS("DPCD: %s\n", hexncpy(intel_dp->dpcd, sizeof(intel_dp->dpcd),
3896 		      dpcd_hex_dump, sizeof(dpcd_hex_dump), " "));
3897 #else
3898 	DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3899 #endif
3900 
3901 	if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3902 		return false; /* DPCD not present */
3903 
3904 	/* Check if the panel supports PSR */
3905 	memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3906 	if (is_edp(intel_dp)) {
3907 		intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3908 					intel_dp->psr_dpcd,
3909 					sizeof(intel_dp->psr_dpcd));
3910 		if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3911 			dev_priv->psr.sink_support = true;
3912 			DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3913 		}
3914 
3915 		if (INTEL_INFO(dev)->gen >= 9 &&
3916 			(intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3917 			uint8_t frame_sync_cap;
3918 
3919 			dev_priv->psr.sink_support = true;
3920 			intel_dp_dpcd_read_wake(&intel_dp->aux,
3921 					DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3922 					&frame_sync_cap, 1);
3923 			dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3924 			/* PSR2 needs frame sync as well */
3925 			dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3926 			DRM_DEBUG_KMS("PSR2 %s on sink",
3927 				dev_priv->psr.psr2_support ? "supported" : "not supported");
3928 		}
3929 	}
3930 
3931 	DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
3932 		      yesno(intel_dp_source_supports_hbr2(intel_dp)),
3933 		      yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
3934 
3935 	/* Intermediate frequency support */
3936 	if (is_edp(intel_dp) &&
3937 	    (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] &	DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3938 	    (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3939 	    (rev >= 0x03)) { /* eDp v1.4 or higher */
3940 		__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3941 		int i;
3942 
3943 		intel_dp_dpcd_read_wake(&intel_dp->aux,
3944 				DP_SUPPORTED_LINK_RATES,
3945 				sink_rates,
3946 				sizeof(sink_rates));
3947 
3948 		for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3949 			int val = le16_to_cpu(sink_rates[i]);
3950 
3951 			if (val == 0)
3952 				break;
3953 
3954 			/* Value read is in kHz while drm clock is saved in deca-kHz */
3955 			intel_dp->sink_rates[i] = (val * 200) / 10;
3956 		}
3957 		intel_dp->num_sink_rates = i;
3958 	}
3959 
3960 	intel_dp_print_rates(intel_dp);
3961 
3962 	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3963 	      DP_DWN_STRM_PORT_PRESENT))
3964 		return true; /* native DP sink */
3965 
3966 	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3967 		return true; /* no per-port downstream info */
3968 
3969 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3970 				    intel_dp->downstream_ports,
3971 				    DP_MAX_DOWNSTREAM_PORTS) < 0)
3972 		return false; /* downstream port status fetch failed */
3973 
3974 	return true;
3975 }
3976 
3977 static void
3978 intel_dp_probe_oui(struct intel_dp *intel_dp)
3979 {
3980 	u8 buf[3];
3981 
3982 	if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3983 		return;
3984 
3985 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3986 		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3987 			      buf[0], buf[1], buf[2]);
3988 
3989 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3990 		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3991 			      buf[0], buf[1], buf[2]);
3992 }
3993 
3994 static bool
3995 intel_dp_probe_mst(struct intel_dp *intel_dp)
3996 {
3997 	u8 buf[1];
3998 
3999 	if (!intel_dp->can_mst)
4000 		return false;
4001 
4002 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4003 		return false;
4004 
4005 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4006 		if (buf[0] & DP_MST_CAP) {
4007 			DRM_DEBUG_KMS("Sink is MST capable\n");
4008 			intel_dp->is_mst = true;
4009 		} else {
4010 			DRM_DEBUG_KMS("Sink is not MST capable\n");
4011 			intel_dp->is_mst = false;
4012 		}
4013 	}
4014 
4015 #if 0
4016 	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4017 	return intel_dp->is_mst;
4018 #else
4019 	return false;
4020 #endif
4021 }
4022 
4023 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
4024 {
4025 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4026 	struct drm_device *dev = dig_port->base.base.dev;
4027 	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4028 	u8 buf;
4029 	int ret = 0;
4030 	int count = 0;
4031 	int attempts = 10;
4032 
4033 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4034 		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4035 		ret = -EIO;
4036 		goto out;
4037 	}
4038 
4039 	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4040 			       buf & ~DP_TEST_SINK_START) < 0) {
4041 		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4042 		ret = -EIO;
4043 		goto out;
4044 	}
4045 
4046 	do {
4047 		intel_wait_for_vblank(dev, intel_crtc->pipe);
4048 
4049 		if (drm_dp_dpcd_readb(&intel_dp->aux,
4050 				      DP_TEST_SINK_MISC, &buf) < 0) {
4051 			ret = -EIO;
4052 			goto out;
4053 		}
4054 		count = buf & DP_TEST_COUNT_MASK;
4055 	} while (--attempts && count);
4056 
4057 	if (attempts == 0) {
4058 		DRM_ERROR("TIMEOUT: Sink CRC counter is not zeroed\n");
4059 		ret = -ETIMEDOUT;
4060 	}
4061 
4062  out:
4063 	hsw_enable_ips(intel_crtc);
4064 	return ret;
4065 }
4066 
4067 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4068 {
4069 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4070 	struct drm_device *dev = dig_port->base.base.dev;
4071 	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4072 	u8 buf;
4073 	int ret;
4074 
4075 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4076 		return -EIO;
4077 
4078 	if (!(buf & DP_TEST_CRC_SUPPORTED))
4079 		return -ENOTTY;
4080 
4081 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4082 		return -EIO;
4083 
4084 	if (buf & DP_TEST_SINK_START) {
4085 		ret = intel_dp_sink_crc_stop(intel_dp);
4086 		if (ret)
4087 			return ret;
4088 	}
4089 
4090 	hsw_disable_ips(intel_crtc);
4091 
4092 	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4093 			       buf | DP_TEST_SINK_START) < 0) {
4094 		hsw_enable_ips(intel_crtc);
4095 		return -EIO;
4096 	}
4097 
4098 	intel_wait_for_vblank(dev, intel_crtc->pipe);
4099 	return 0;
4100 }
4101 
4102 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4103 {
4104 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4105 	struct drm_device *dev = dig_port->base.base.dev;
4106 	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4107 	u8 buf;
4108 	int count, ret;
4109 	int attempts = 6;
4110 
4111 	ret = intel_dp_sink_crc_start(intel_dp);
4112 	if (ret)
4113 		return ret;
4114 
4115 	do {
4116 		intel_wait_for_vblank(dev, intel_crtc->pipe);
4117 
4118 		if (drm_dp_dpcd_readb(&intel_dp->aux,
4119 				      DP_TEST_SINK_MISC, &buf) < 0) {
4120 			ret = -EIO;
4121 			goto stop;
4122 		}
4123 		count = buf & DP_TEST_COUNT_MASK;
4124 
4125 	} while (--attempts && count == 0);
4126 
4127 	if (attempts == 0) {
4128 		DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4129 		ret = -ETIMEDOUT;
4130 		goto stop;
4131 	}
4132 
4133 	if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4134 		ret = -EIO;
4135 		goto stop;
4136 	}
4137 
4138 stop:
4139 	intel_dp_sink_crc_stop(intel_dp);
4140 	return ret;
4141 }
4142 
4143 static bool
4144 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4145 {
4146 	return intel_dp_dpcd_read_wake(&intel_dp->aux,
4147 				       DP_DEVICE_SERVICE_IRQ_VECTOR,
4148 				       sink_irq_vector, 1) == 1;
4149 }
4150 
4151 static bool
4152 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4153 {
4154 	int ret;
4155 
4156 	ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4157 					     DP_SINK_COUNT_ESI,
4158 					     sink_irq_vector, 14);
4159 	if (ret != 14)
4160 		return false;
4161 
4162 	return true;
4163 }
4164 
4165 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4166 {
4167 	uint8_t test_result = DP_TEST_ACK;
4168 	return test_result;
4169 }
4170 
4171 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4172 {
4173 	uint8_t test_result = DP_TEST_NAK;
4174 	return test_result;
4175 }
4176 
4177 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4178 {
4179 	uint8_t test_result = DP_TEST_NAK;
4180 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4181 	struct drm_connector *connector = &intel_connector->base;
4182 
4183 	if (intel_connector->detect_edid == NULL ||
4184 	    connector->edid_corrupt ||
4185 	    intel_dp->aux.i2c_defer_count > 6) {
4186 		/* Check EDID read for NACKs, DEFERs and corruption
4187 		 * (DP CTS 1.2 Core r1.1)
4188 		 *    4.2.2.4 : Failed EDID read, I2C_NAK
4189 		 *    4.2.2.5 : Failed EDID read, I2C_DEFER
4190 		 *    4.2.2.6 : EDID corruption detected
4191 		 * Use failsafe mode for all cases
4192 		 */
4193 		if (intel_dp->aux.i2c_nack_count > 0 ||
4194 			intel_dp->aux.i2c_defer_count > 0)
4195 			DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4196 				      intel_dp->aux.i2c_nack_count,
4197 				      intel_dp->aux.i2c_defer_count);
4198 		intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4199 	} else {
4200 		struct edid *block = intel_connector->detect_edid;
4201 
4202 		/* We have to write the checksum
4203 		 * of the last block read
4204 		 */
4205 		block += intel_connector->detect_edid->extensions;
4206 
4207 		if (!drm_dp_dpcd_write(&intel_dp->aux,
4208 					DP_TEST_EDID_CHECKSUM,
4209 					&block->checksum,
4210 					1))
4211 			DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4212 
4213 		test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4214 		intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4215 	}
4216 
4217 	/* Set test active flag here so userspace doesn't interrupt things */
4218 	intel_dp->compliance_test_active = 1;
4219 
4220 	return test_result;
4221 }
4222 
4223 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4224 {
4225 	uint8_t test_result = DP_TEST_NAK;
4226 	return test_result;
4227 }
4228 
4229 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4230 {
4231 	uint8_t response = DP_TEST_NAK;
4232 	uint8_t rxdata = 0;
4233 	int status = 0;
4234 
4235 	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4236 	if (status <= 0) {
4237 		DRM_DEBUG_KMS("Could not read test request from sink\n");
4238 		goto update_status;
4239 	}
4240 
4241 	switch (rxdata) {
4242 	case DP_TEST_LINK_TRAINING:
4243 		DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4244 		intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4245 		response = intel_dp_autotest_link_training(intel_dp);
4246 		break;
4247 	case DP_TEST_LINK_VIDEO_PATTERN:
4248 		DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4249 		intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4250 		response = intel_dp_autotest_video_pattern(intel_dp);
4251 		break;
4252 	case DP_TEST_LINK_EDID_READ:
4253 		DRM_DEBUG_KMS("EDID test requested\n");
4254 		intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4255 		response = intel_dp_autotest_edid(intel_dp);
4256 		break;
4257 	case DP_TEST_LINK_PHY_TEST_PATTERN:
4258 		DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4259 		intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4260 		response = intel_dp_autotest_phy_pattern(intel_dp);
4261 		break;
4262 	default:
4263 		DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4264 		break;
4265 	}
4266 
4267 update_status:
4268 	status = drm_dp_dpcd_write(&intel_dp->aux,
4269 				   DP_TEST_RESPONSE,
4270 				   &response, 1);
4271 	if (status <= 0)
4272 		DRM_DEBUG_KMS("Could not write test response to sink\n");
4273 }
4274 
4275 static int
4276 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4277 {
4278 	bool bret;
4279 
4280 	if (intel_dp->is_mst) {
4281 		u8 esi[16] = { 0 };
4282 		int ret = 0;
4283 		int retry;
4284 		bool handled;
4285 		bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4286 go_again:
4287 		if (bret == true) {
4288 
4289 			/* check link status - esi[10] = 0x200c */
4290 			if (intel_dp->active_mst_links &&
4291 			    !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4292 				DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4293 				intel_dp_start_link_train(intel_dp);
4294 				intel_dp_stop_link_train(intel_dp);
4295 			}
4296 
4297 			DRM_DEBUG_KMS("got esi %3ph\n", esi);
4298 			ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4299 
4300 			if (handled) {
4301 				for (retry = 0; retry < 3; retry++) {
4302 					int wret;
4303 					wret = drm_dp_dpcd_write(&intel_dp->aux,
4304 								 DP_SINK_COUNT_ESI+1,
4305 								 &esi[1], 3);
4306 					if (wret == 3) {
4307 						break;
4308 					}
4309 				}
4310 
4311 				bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4312 				if (bret == true) {
4313 					DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4314 					goto go_again;
4315 				}
4316 			} else
4317 				ret = 0;
4318 
4319 			return ret;
4320 		} else {
4321 			struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4322 			DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4323 			intel_dp->is_mst = false;
4324 			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4325 			/* send a hotplug event */
4326 			drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4327 		}
4328 	}
4329 	return -EINVAL;
4330 }
4331 
4332 /*
4333  * According to DP spec
4334  * 5.1.2:
4335  *  1. Read DPCD
4336  *  2. Configure link according to Receiver Capabilities
4337  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4338  *  4. Check link status on receipt of hot-plug interrupt
4339  */
4340 static void
4341 intel_dp_check_link_status(struct intel_dp *intel_dp)
4342 {
4343 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4344 	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4345 	u8 sink_irq_vector;
4346 	u8 link_status[DP_LINK_STATUS_SIZE];
4347 
4348 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4349 
4350 	/*
4351 	 * Clearing compliance test variables to allow capturing
4352 	 * of values for next automated test request.
4353 	 */
4354 	intel_dp->compliance_test_active = 0;
4355 	intel_dp->compliance_test_type = 0;
4356 	intel_dp->compliance_test_data = 0;
4357 
4358 	if (!intel_encoder->base.crtc)
4359 		return;
4360 
4361 	if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4362 		return;
4363 
4364 	/* Try to read receiver status if the link appears to be up */
4365 	if (!intel_dp_get_link_status(intel_dp, link_status)) {
4366 		return;
4367 	}
4368 
4369 	/* Now read the DPCD to see if it's actually running */
4370 	if (!intel_dp_get_dpcd(intel_dp)) {
4371 		return;
4372 	}
4373 
4374 	/* Try to read the source of the interrupt */
4375 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4376 	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4377 		/* Clear interrupt source */
4378 		drm_dp_dpcd_writeb(&intel_dp->aux,
4379 				   DP_DEVICE_SERVICE_IRQ_VECTOR,
4380 				   sink_irq_vector);
4381 
4382 		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4383 			DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4384 		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4385 			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4386 	}
4387 
4388 	/* if link training is requested we should perform it always */
4389 	if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
4390 		(!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
4391 		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4392 			      intel_encoder->base.name);
4393 		intel_dp_start_link_train(intel_dp);
4394 		intel_dp_stop_link_train(intel_dp);
4395 	}
4396 }
4397 
4398 /* XXX this is probably wrong for multiple downstream ports */
4399 static enum drm_connector_status
4400 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4401 {
4402 	uint8_t *dpcd = intel_dp->dpcd;
4403 	uint8_t type;
4404 
4405 	if (!intel_dp_get_dpcd(intel_dp))
4406 		return connector_status_disconnected;
4407 
4408 	/* if there's no downstream port, we're done */
4409 	if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4410 		return connector_status_connected;
4411 
4412 	/* If we're HPD-aware, SINK_COUNT changes dynamically */
4413 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4414 	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4415 		uint8_t reg;
4416 
4417 		if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4418 					    &reg, 1) < 0)
4419 			return connector_status_unknown;
4420 
4421 		return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4422 					      : connector_status_disconnected;
4423 	}
4424 
4425 	/* If no HPD, poke DDC gently */
4426 	if (drm_probe_ddc(&intel_dp->aux.ddc))
4427 		return connector_status_connected;
4428 
4429 	/* Well we tried, say unknown for unreliable port types */
4430 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4431 		type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4432 		if (type == DP_DS_PORT_TYPE_VGA ||
4433 		    type == DP_DS_PORT_TYPE_NON_EDID)
4434 			return connector_status_unknown;
4435 	} else {
4436 		type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4437 			DP_DWN_STRM_PORT_TYPE_MASK;
4438 		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4439 		    type == DP_DWN_STRM_PORT_TYPE_OTHER)
4440 			return connector_status_unknown;
4441 	}
4442 
4443 	/* Anything else is out of spec, warn and ignore */
4444 	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4445 	return connector_status_disconnected;
4446 }
4447 
4448 static enum drm_connector_status
4449 edp_detect(struct intel_dp *intel_dp)
4450 {
4451 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4452 	enum drm_connector_status status;
4453 
4454 	status = intel_panel_detect(dev);
4455 	if (status == connector_status_unknown)
4456 		status = connector_status_connected;
4457 
4458 	return status;
4459 }
4460 
4461 static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4462 				       struct intel_digital_port *port)
4463 {
4464 	u32 bit;
4465 
4466 	switch (port->port) {
4467 	case PORT_A:
4468 		return true;
4469 	case PORT_B:
4470 		bit = SDE_PORTB_HOTPLUG;
4471 		break;
4472 	case PORT_C:
4473 		bit = SDE_PORTC_HOTPLUG;
4474 		break;
4475 	case PORT_D:
4476 		bit = SDE_PORTD_HOTPLUG;
4477 		break;
4478 	default:
4479 		MISSING_CASE(port->port);
4480 		return false;
4481 	}
4482 
4483 	return I915_READ(SDEISR) & bit;
4484 }
4485 
4486 static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4487 				       struct intel_digital_port *port)
4488 {
4489 	u32 bit;
4490 
4491 	switch (port->port) {
4492 	case PORT_A:
4493 		return true;
4494 	case PORT_B:
4495 		bit = SDE_PORTB_HOTPLUG_CPT;
4496 		break;
4497 	case PORT_C:
4498 		bit = SDE_PORTC_HOTPLUG_CPT;
4499 		break;
4500 	case PORT_D:
4501 		bit = SDE_PORTD_HOTPLUG_CPT;
4502 		break;
4503 	case PORT_E:
4504 		bit = SDE_PORTE_HOTPLUG_SPT;
4505 		break;
4506 	default:
4507 		MISSING_CASE(port->port);
4508 		return false;
4509 	}
4510 
4511 	return I915_READ(SDEISR) & bit;
4512 }
4513 
4514 static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4515 				       struct intel_digital_port *port)
4516 {
4517 	u32 bit;
4518 
4519 	switch (port->port) {
4520 	case PORT_B:
4521 		bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4522 		break;
4523 	case PORT_C:
4524 		bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4525 		break;
4526 	case PORT_D:
4527 		bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4528 		break;
4529 	default:
4530 		MISSING_CASE(port->port);
4531 		return false;
4532 	}
4533 
4534 	return I915_READ(PORT_HOTPLUG_STAT) & bit;
4535 }
4536 
4537 static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
4538 					struct intel_digital_port *port)
4539 {
4540 	u32 bit;
4541 
4542 	switch (port->port) {
4543 	case PORT_B:
4544 		bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
4545 		break;
4546 	case PORT_C:
4547 		bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
4548 		break;
4549 	case PORT_D:
4550 		bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
4551 		break;
4552 	default:
4553 		MISSING_CASE(port->port);
4554 		return false;
4555 	}
4556 
4557 	return I915_READ(PORT_HOTPLUG_STAT) & bit;
4558 }
4559 
4560 static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4561 				       struct intel_digital_port *intel_dig_port)
4562 {
4563 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4564 	enum port port;
4565 	u32 bit;
4566 
4567 	intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4568 	switch (port) {
4569 	case PORT_A:
4570 		bit = BXT_DE_PORT_HP_DDIA;
4571 		break;
4572 	case PORT_B:
4573 		bit = BXT_DE_PORT_HP_DDIB;
4574 		break;
4575 	case PORT_C:
4576 		bit = BXT_DE_PORT_HP_DDIC;
4577 		break;
4578 	default:
4579 		MISSING_CASE(port);
4580 		return false;
4581 	}
4582 
4583 	return I915_READ(GEN8_DE_PORT_ISR) & bit;
4584 }
4585 
4586 /*
4587  * intel_digital_port_connected - is the specified port connected?
4588  * @dev_priv: i915 private structure
4589  * @port: the port to test
4590  *
4591  * Return %true if @port is connected, %false otherwise.
4592  */
4593 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4594 					 struct intel_digital_port *port)
4595 {
4596 	if (HAS_PCH_IBX(dev_priv))
4597 		return ibx_digital_port_connected(dev_priv, port);
4598 	if (HAS_PCH_SPLIT(dev_priv))
4599 		return cpt_digital_port_connected(dev_priv, port);
4600 	else if (IS_BROXTON(dev_priv))
4601 		return bxt_digital_port_connected(dev_priv, port);
4602 	else if (IS_GM45(dev_priv))
4603 		return gm45_digital_port_connected(dev_priv, port);
4604 	else
4605 		return g4x_digital_port_connected(dev_priv, port);
4606 }
4607 
4608 static struct edid *
4609 intel_dp_get_edid(struct intel_dp *intel_dp)
4610 {
4611 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4612 
4613 	/* use cached edid if we have one */
4614 	if (intel_connector->edid) {
4615 		/* invalid edid */
4616 		if (IS_ERR(intel_connector->edid))
4617 			return NULL;
4618 
4619 		return drm_edid_duplicate(intel_connector->edid);
4620 	} else
4621 		return drm_get_edid(&intel_connector->base,
4622 				    &intel_dp->aux.ddc);
4623 }
4624 
4625 static void
4626 intel_dp_set_edid(struct intel_dp *intel_dp)
4627 {
4628 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4629 	struct edid *edid;
4630 
4631 	edid = intel_dp_get_edid(intel_dp);
4632 	intel_connector->detect_edid = edid;
4633 
4634 	if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4635 		intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4636 	else
4637 		intel_dp->has_audio = drm_detect_monitor_audio(edid);
4638 }
4639 
4640 static void
4641 intel_dp_unset_edid(struct intel_dp *intel_dp)
4642 {
4643 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4644 
4645 	kfree(intel_connector->detect_edid);
4646 	intel_connector->detect_edid = NULL;
4647 
4648 	intel_dp->has_audio = false;
4649 }
4650 
4651 static enum drm_connector_status
4652 intel_dp_detect(struct drm_connector *connector, bool force)
4653 {
4654 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4655 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4656 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4657 	struct drm_device *dev = connector->dev;
4658 	enum drm_connector_status status;
4659 	enum intel_display_power_domain power_domain;
4660 	bool ret;
4661 	u8 sink_irq_vector;
4662 
4663 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4664 		      connector->base.id, connector->name);
4665 	intel_dp_unset_edid(intel_dp);
4666 
4667 	if (intel_dp->is_mst) {
4668 		/* MST devices are disconnected from a monitor POV */
4669 		if (intel_encoder->type != INTEL_OUTPUT_EDP)
4670 			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4671 		return connector_status_disconnected;
4672 	}
4673 
4674 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
4675 	intel_display_power_get(to_i915(dev), power_domain);
4676 
4677 	/* Can't disconnect eDP, but you can close the lid... */
4678 	if (is_edp(intel_dp))
4679 		status = edp_detect(intel_dp);
4680 	else if (intel_digital_port_connected(to_i915(dev),
4681 					      dp_to_dig_port(intel_dp)))
4682 		status = intel_dp_detect_dpcd(intel_dp);
4683 	else
4684 		status = connector_status_disconnected;
4685 
4686 	if (status != connector_status_connected) {
4687 		intel_dp->compliance_test_active = 0;
4688 		intel_dp->compliance_test_type = 0;
4689 		intel_dp->compliance_test_data = 0;
4690 
4691 		goto out;
4692 	}
4693 
4694 	intel_dp_probe_oui(intel_dp);
4695 
4696 	ret = intel_dp_probe_mst(intel_dp);
4697 	if (ret) {
4698 		/* if we are in MST mode then this connector
4699 		   won't appear connected or have anything with EDID on it */
4700 		if (intel_encoder->type != INTEL_OUTPUT_EDP)
4701 			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4702 		status = connector_status_disconnected;
4703 		goto out;
4704 	}
4705 
4706 	/*
4707 	 * Clearing NACK and defer counts to get their exact values
4708 	 * while reading EDID which are required by Compliance tests
4709 	 * 4.2.2.4 and 4.2.2.5
4710 	 */
4711 	intel_dp->aux.i2c_nack_count = 0;
4712 	intel_dp->aux.i2c_defer_count = 0;
4713 
4714 	intel_dp_set_edid(intel_dp);
4715 
4716 	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4717 		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4718 	status = connector_status_connected;
4719 
4720 	/* Try to read the source of the interrupt */
4721 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4722 	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4723 		/* Clear interrupt source */
4724 		drm_dp_dpcd_writeb(&intel_dp->aux,
4725 				   DP_DEVICE_SERVICE_IRQ_VECTOR,
4726 				   sink_irq_vector);
4727 
4728 		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4729 			intel_dp_handle_test_request(intel_dp);
4730 		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4731 			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4732 	}
4733 
4734 out:
4735 	intel_display_power_put(to_i915(dev), power_domain);
4736 	return status;
4737 }
4738 
4739 static void
4740 intel_dp_force(struct drm_connector *connector)
4741 {
4742 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4743 	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4744 	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4745 	enum intel_display_power_domain power_domain;
4746 
4747 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4748 		      connector->base.id, connector->name);
4749 	intel_dp_unset_edid(intel_dp);
4750 
4751 	if (connector->status != connector_status_connected)
4752 		return;
4753 
4754 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
4755 	intel_display_power_get(dev_priv, power_domain);
4756 
4757 	intel_dp_set_edid(intel_dp);
4758 
4759 	intel_display_power_put(dev_priv, power_domain);
4760 
4761 	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4762 		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4763 }
4764 
4765 static int intel_dp_get_modes(struct drm_connector *connector)
4766 {
4767 	struct intel_connector *intel_connector = to_intel_connector(connector);
4768 	struct edid *edid;
4769 
4770 	edid = intel_connector->detect_edid;
4771 	if (edid) {
4772 		int ret = intel_connector_update_modes(connector, edid);
4773 		if (ret)
4774 			return ret;
4775 	}
4776 
4777 	/* if eDP has no EDID, fall back to fixed mode */
4778 	if (is_edp(intel_attached_dp(connector)) &&
4779 	    intel_connector->panel.fixed_mode) {
4780 		struct drm_display_mode *mode;
4781 
4782 		mode = drm_mode_duplicate(connector->dev,
4783 					  intel_connector->panel.fixed_mode);
4784 		if (mode) {
4785 			drm_mode_probed_add(connector, mode);
4786 			return 1;
4787 		}
4788 	}
4789 
4790 	return 0;
4791 }
4792 
4793 static bool
4794 intel_dp_detect_audio(struct drm_connector *connector)
4795 {
4796 	bool has_audio = false;
4797 	struct edid *edid;
4798 
4799 	edid = to_intel_connector(connector)->detect_edid;
4800 	if (edid)
4801 		has_audio = drm_detect_monitor_audio(edid);
4802 
4803 	return has_audio;
4804 }
4805 
4806 static int
4807 intel_dp_set_property(struct drm_connector *connector,
4808 		      struct drm_property *property,
4809 		      uint64_t val)
4810 {
4811 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
4812 	struct intel_connector *intel_connector = to_intel_connector(connector);
4813 	struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4814 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4815 	int ret;
4816 
4817 	ret = drm_object_property_set_value(&connector->base, property, val);
4818 	if (ret)
4819 		return ret;
4820 
4821 	if (property == dev_priv->force_audio_property) {
4822 		int i = val;
4823 		bool has_audio;
4824 
4825 		if (i == intel_dp->force_audio)
4826 			return 0;
4827 
4828 		intel_dp->force_audio = i;
4829 
4830 		if (i == HDMI_AUDIO_AUTO)
4831 			has_audio = intel_dp_detect_audio(connector);
4832 		else
4833 			has_audio = (i == HDMI_AUDIO_ON);
4834 
4835 		if (has_audio == intel_dp->has_audio)
4836 			return 0;
4837 
4838 		intel_dp->has_audio = has_audio;
4839 		goto done;
4840 	}
4841 
4842 	if (property == dev_priv->broadcast_rgb_property) {
4843 		bool old_auto = intel_dp->color_range_auto;
4844 		bool old_range = intel_dp->limited_color_range;
4845 
4846 		switch (val) {
4847 		case INTEL_BROADCAST_RGB_AUTO:
4848 			intel_dp->color_range_auto = true;
4849 			break;
4850 		case INTEL_BROADCAST_RGB_FULL:
4851 			intel_dp->color_range_auto = false;
4852 			intel_dp->limited_color_range = false;
4853 			break;
4854 		case INTEL_BROADCAST_RGB_LIMITED:
4855 			intel_dp->color_range_auto = false;
4856 			intel_dp->limited_color_range = true;
4857 			break;
4858 		default:
4859 			return -EINVAL;
4860 		}
4861 
4862 		if (old_auto == intel_dp->color_range_auto &&
4863 		    old_range == intel_dp->limited_color_range)
4864 			return 0;
4865 
4866 		goto done;
4867 	}
4868 
4869 	if (is_edp(intel_dp) &&
4870 	    property == connector->dev->mode_config.scaling_mode_property) {
4871 		if (val == DRM_MODE_SCALE_NONE) {
4872 			DRM_DEBUG_KMS("no scaling not supported\n");
4873 			return -EINVAL;
4874 		}
4875 
4876 		if (intel_connector->panel.fitting_mode == val) {
4877 			/* the eDP scaling property is not changed */
4878 			return 0;
4879 		}
4880 		intel_connector->panel.fitting_mode = val;
4881 
4882 		goto done;
4883 	}
4884 
4885 	return -EINVAL;
4886 
4887 done:
4888 	if (intel_encoder->base.crtc)
4889 		intel_crtc_restore_mode(intel_encoder->base.crtc);
4890 
4891 	return 0;
4892 }
4893 
4894 static void
4895 intel_dp_connector_destroy(struct drm_connector *connector)
4896 {
4897 	struct intel_connector *intel_connector = to_intel_connector(connector);
4898 
4899 	kfree(intel_connector->detect_edid);
4900 
4901 	if (!IS_ERR_OR_NULL(intel_connector->edid))
4902 		kfree(intel_connector->edid);
4903 
4904 	/* Can't call is_edp() since the encoder may have been destroyed
4905 	 * already. */
4906 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4907 		intel_panel_fini(&intel_connector->panel);
4908 
4909 	drm_connector_cleanup(connector);
4910 	kfree(connector);
4911 }
4912 
4913 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4914 {
4915 	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4916 	struct intel_dp *intel_dp = &intel_dig_port->dp;
4917 
4918 	intel_dp_aux_fini(intel_dp);
4919 	intel_dp_mst_encoder_cleanup(intel_dig_port);
4920 	if (is_edp(intel_dp)) {
4921 		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4922 		/*
4923 		 * vdd might still be enabled do to the delayed vdd off.
4924 		 * Make sure vdd is actually turned off here.
4925 		 */
4926 		pps_lock(intel_dp);
4927 		edp_panel_vdd_off_sync(intel_dp);
4928 		pps_unlock(intel_dp);
4929 
4930 #if 0
4931 		if (intel_dp->edp_notifier.notifier_call) {
4932 			unregister_reboot_notifier(&intel_dp->edp_notifier);
4933 			intel_dp->edp_notifier.notifier_call = NULL;
4934 		}
4935 #endif
4936 	}
4937 	drm_encoder_cleanup(encoder);
4938 	kfree(intel_dig_port);
4939 }
4940 
4941 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4942 {
4943 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4944 
4945 	if (!is_edp(intel_dp))
4946 		return;
4947 
4948 	/*
4949 	 * vdd might still be enabled do to the delayed vdd off.
4950 	 * Make sure vdd is actually turned off here.
4951 	 */
4952 	cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4953 	pps_lock(intel_dp);
4954 	edp_panel_vdd_off_sync(intel_dp);
4955 	pps_unlock(intel_dp);
4956 }
4957 
4958 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4959 {
4960 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4961 	struct drm_device *dev = intel_dig_port->base.base.dev;
4962 	struct drm_i915_private *dev_priv = dev->dev_private;
4963 	enum intel_display_power_domain power_domain;
4964 
4965 	lockdep_assert_held(&dev_priv->pps_mutex);
4966 
4967 	if (!edp_have_panel_vdd(intel_dp))
4968 		return;
4969 
4970 	/*
4971 	 * The VDD bit needs a power domain reference, so if the bit is
4972 	 * already enabled when we boot or resume, grab this reference and
4973 	 * schedule a vdd off, so we don't hold on to the reference
4974 	 * indefinitely.
4975 	 */
4976 	DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4977 	power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
4978 	intel_display_power_get(dev_priv, power_domain);
4979 
4980 	edp_panel_vdd_schedule_off(intel_dp);
4981 }
4982 
4983 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4984 {
4985 	struct intel_dp *intel_dp;
4986 
4987 	if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4988 		return;
4989 
4990 	intel_dp = enc_to_intel_dp(encoder);
4991 
4992 	pps_lock(intel_dp);
4993 
4994 	/*
4995 	 * Read out the current power sequencer assignment,
4996 	 * in case the BIOS did something with it.
4997 	 */
4998 	if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev))
4999 		vlv_initial_power_sequencer_setup(intel_dp);
5000 
5001 	intel_edp_panel_vdd_sanitize(intel_dp);
5002 
5003 	pps_unlock(intel_dp);
5004 }
5005 
5006 static const struct drm_connector_funcs intel_dp_connector_funcs = {
5007 	.dpms = drm_atomic_helper_connector_dpms,
5008 	.detect = intel_dp_detect,
5009 	.force = intel_dp_force,
5010 	.fill_modes = drm_helper_probe_single_connector_modes,
5011 	.set_property = intel_dp_set_property,
5012 	.atomic_get_property = intel_connector_atomic_get_property,
5013 	.destroy = intel_dp_connector_destroy,
5014 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5015 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
5016 };
5017 
5018 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5019 	.get_modes = intel_dp_get_modes,
5020 	.mode_valid = intel_dp_mode_valid,
5021 	.best_encoder = intel_best_encoder,
5022 };
5023 
5024 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5025 	.reset = intel_dp_encoder_reset,
5026 	.destroy = intel_dp_encoder_destroy,
5027 };
5028 
5029 bool
5030 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5031 {
5032 	struct intel_dp *intel_dp = &intel_dig_port->dp;
5033 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5034 	struct drm_device *dev = intel_dig_port->base.base.dev;
5035 	struct drm_i915_private *dev_priv = dev->dev_private;
5036 	enum intel_display_power_domain power_domain;
5037 	bool ret = true;
5038 
5039 	if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
5040 		intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
5041 
5042 	if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5043 		/*
5044 		 * vdd off can generate a long pulse on eDP which
5045 		 * would require vdd on to handle it, and thus we
5046 		 * would end up in an endless cycle of
5047 		 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5048 		 */
5049 		DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5050 			      port_name(intel_dig_port->port));
5051 		return false;
5052 	}
5053 
5054 	DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5055 		      port_name(intel_dig_port->port),
5056 		      long_hpd ? "long" : "short");
5057 
5058 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
5059 	intel_display_power_get(dev_priv, power_domain);
5060 
5061 	if (long_hpd) {
5062 		/* indicate that we need to restart link training */
5063 		intel_dp->train_set_valid = false;
5064 
5065 		if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5066 			goto mst_fail;
5067 
5068 		if (!intel_dp_get_dpcd(intel_dp)) {
5069 			goto mst_fail;
5070 		}
5071 
5072 		intel_dp_probe_oui(intel_dp);
5073 
5074 		if (!intel_dp_probe_mst(intel_dp)) {
5075 			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5076 			intel_dp_check_link_status(intel_dp);
5077 			drm_modeset_unlock(&dev->mode_config.connection_mutex);
5078 			goto mst_fail;
5079 		}
5080 	} else {
5081 		if (intel_dp->is_mst) {
5082 			if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
5083 				goto mst_fail;
5084 		}
5085 
5086 		if (!intel_dp->is_mst) {
5087 			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5088 			intel_dp_check_link_status(intel_dp);
5089 			drm_modeset_unlock(&dev->mode_config.connection_mutex);
5090 		}
5091 	}
5092 
5093 	ret = false;
5094 
5095 	goto put_power;
5096 mst_fail:
5097 	/* if we were in MST mode, and device is not there get out of MST mode */
5098 	if (intel_dp->is_mst) {
5099 		DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5100 		intel_dp->is_mst = false;
5101 		drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5102 	}
5103 put_power:
5104 	intel_display_power_put(dev_priv, power_domain);
5105 
5106 	return ret;
5107 }
5108 
5109 /* check the VBT to see whether the eDP is on another port */
5110 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5111 {
5112 	struct drm_i915_private *dev_priv = dev->dev_private;
5113 	union child_device_config *p_child;
5114 	int i;
5115 	static const short port_mapping[] = {
5116 		[PORT_B] = DVO_PORT_DPB,
5117 		[PORT_C] = DVO_PORT_DPC,
5118 		[PORT_D] = DVO_PORT_DPD,
5119 		[PORT_E] = DVO_PORT_DPE,
5120 	};
5121 
5122 	/*
5123 	 * eDP not supported on g4x. so bail out early just
5124 	 * for a bit extra safety in case the VBT is bonkers.
5125 	 */
5126 	if (INTEL_INFO(dev)->gen < 5)
5127 		return false;
5128 
5129 	if (port == PORT_A)
5130 		return true;
5131 
5132 	if (!dev_priv->vbt.child_dev_num)
5133 		return false;
5134 
5135 	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5136 		p_child = dev_priv->vbt.child_dev + i;
5137 
5138 		if (p_child->common.dvo_port == port_mapping[port] &&
5139 		    (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5140 		    (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5141 			return true;
5142 	}
5143 	return false;
5144 }
5145 
5146 void
5147 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5148 {
5149 	struct intel_connector *intel_connector = to_intel_connector(connector);
5150 
5151 	intel_attach_force_audio_property(connector);
5152 	intel_attach_broadcast_rgb_property(connector);
5153 	intel_dp->color_range_auto = true;
5154 
5155 	if (is_edp(intel_dp)) {
5156 		drm_mode_create_scaling_mode_property(connector->dev);
5157 		drm_object_attach_property(
5158 			&connector->base,
5159 			connector->dev->mode_config.scaling_mode_property,
5160 			DRM_MODE_SCALE_ASPECT);
5161 		intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5162 	}
5163 }
5164 
5165 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5166 {
5167 	intel_dp->last_power_cycle = jiffies;
5168 	intel_dp->last_power_on = jiffies;
5169 	intel_dp->last_backlight_off = jiffies;
5170 }
5171 
5172 static void
5173 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5174 				    struct intel_dp *intel_dp)
5175 {
5176 	struct drm_i915_private *dev_priv = dev->dev_private;
5177 	struct edp_power_seq cur, vbt, spec,
5178 		*final = &intel_dp->pps_delays;
5179 	u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5180 	i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
5181 
5182 	lockdep_assert_held(&dev_priv->pps_mutex);
5183 
5184 	/* already initialized? */
5185 	if (final->t11_t12 != 0)
5186 		return;
5187 
5188 	if (IS_BROXTON(dev)) {
5189 		/*
5190 		 * TODO: BXT has 2 sets of PPS registers.
5191 		 * Correct Register for Broxton need to be identified
5192 		 * using VBT. hardcoding for now
5193 		 */
5194 		pp_ctrl_reg = BXT_PP_CONTROL(0);
5195 		pp_on_reg = BXT_PP_ON_DELAYS(0);
5196 		pp_off_reg = BXT_PP_OFF_DELAYS(0);
5197 	} else if (HAS_PCH_SPLIT(dev)) {
5198 		pp_ctrl_reg = PCH_PP_CONTROL;
5199 		pp_on_reg = PCH_PP_ON_DELAYS;
5200 		pp_off_reg = PCH_PP_OFF_DELAYS;
5201 		pp_div_reg = PCH_PP_DIVISOR;
5202 	} else {
5203 		enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5204 
5205 		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5206 		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5207 		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5208 		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5209 	}
5210 
5211 	/* Workaround: Need to write PP_CONTROL with the unlock key as
5212 	 * the very first thing. */
5213 	pp_ctl = ironlake_get_pp_control(intel_dp);
5214 
5215 	pp_on = I915_READ(pp_on_reg);
5216 	pp_off = I915_READ(pp_off_reg);
5217 	if (!IS_BROXTON(dev)) {
5218 		I915_WRITE(pp_ctrl_reg, pp_ctl);
5219 		pp_div = I915_READ(pp_div_reg);
5220 	}
5221 
5222 	/* Pull timing values out of registers */
5223 	cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5224 		PANEL_POWER_UP_DELAY_SHIFT;
5225 
5226 	cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5227 		PANEL_LIGHT_ON_DELAY_SHIFT;
5228 
5229 	cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5230 		PANEL_LIGHT_OFF_DELAY_SHIFT;
5231 
5232 	cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5233 		PANEL_POWER_DOWN_DELAY_SHIFT;
5234 
5235 	if (IS_BROXTON(dev)) {
5236 		u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5237 			BXT_POWER_CYCLE_DELAY_SHIFT;
5238 		if (tmp > 0)
5239 			cur.t11_t12 = (tmp - 1) * 1000;
5240 		else
5241 			cur.t11_t12 = 0;
5242 	} else {
5243 		cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5244 		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5245 	}
5246 
5247 	DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5248 		      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5249 
5250 	vbt = dev_priv->vbt.edp_pps;
5251 
5252 	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5253 	 * our hw here, which are all in 100usec. */
5254 	spec.t1_t3 = 210 * 10;
5255 	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5256 	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5257 	spec.t10 = 500 * 10;
5258 	/* This one is special and actually in units of 100ms, but zero
5259 	 * based in the hw (so we need to add 100 ms). But the sw vbt
5260 	 * table multiplies it with 1000 to make it in units of 100usec,
5261 	 * too. */
5262 	spec.t11_t12 = (510 + 100) * 10;
5263 
5264 	DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5265 		      vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5266 
5267 	/* Use the max of the register settings and vbt. If both are
5268 	 * unset, fall back to the spec limits. */
5269 #define assign_final(field)	final->field = (max(cur.field, vbt.field) == 0 ? \
5270 				       spec.field : \
5271 				       max(cur.field, vbt.field))
5272 	assign_final(t1_t3);
5273 	assign_final(t8);
5274 	assign_final(t9);
5275 	assign_final(t10);
5276 	assign_final(t11_t12);
5277 #undef assign_final
5278 
5279 #define get_delay(field)	(DIV_ROUND_UP(final->field, 10))
5280 	intel_dp->panel_power_up_delay = get_delay(t1_t3);
5281 	intel_dp->backlight_on_delay = get_delay(t8);
5282 	intel_dp->backlight_off_delay = get_delay(t9);
5283 	intel_dp->panel_power_down_delay = get_delay(t10);
5284 	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5285 #undef get_delay
5286 
5287 	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5288 		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5289 		      intel_dp->panel_power_cycle_delay);
5290 
5291 	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5292 		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5293 }
5294 
5295 static void
5296 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5297 					      struct intel_dp *intel_dp)
5298 {
5299 	struct drm_i915_private *dev_priv = dev->dev_private;
5300 	u32 pp_on, pp_off, pp_div, port_sel = 0;
5301 	int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5302 	i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
5303 	enum port port = dp_to_dig_port(intel_dp)->port;
5304 	const struct edp_power_seq *seq = &intel_dp->pps_delays;
5305 
5306 	lockdep_assert_held(&dev_priv->pps_mutex);
5307 
5308 	if (IS_BROXTON(dev)) {
5309 		/*
5310 		 * TODO: BXT has 2 sets of PPS registers.
5311 		 * Correct Register for Broxton need to be identified
5312 		 * using VBT. hardcoding for now
5313 		 */
5314 		pp_ctrl_reg = BXT_PP_CONTROL(0);
5315 		pp_on_reg = BXT_PP_ON_DELAYS(0);
5316 		pp_off_reg = BXT_PP_OFF_DELAYS(0);
5317 
5318 	} else if (HAS_PCH_SPLIT(dev)) {
5319 		pp_on_reg = PCH_PP_ON_DELAYS;
5320 		pp_off_reg = PCH_PP_OFF_DELAYS;
5321 		pp_div_reg = PCH_PP_DIVISOR;
5322 	} else {
5323 		enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5324 
5325 		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5326 		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5327 		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5328 	}
5329 
5330 	/*
5331 	 * And finally store the new values in the power sequencer. The
5332 	 * backlight delays are set to 1 because we do manual waits on them. For
5333 	 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5334 	 * we'll end up waiting for the backlight off delay twice: once when we
5335 	 * do the manual sleep, and once when we disable the panel and wait for
5336 	 * the PP_STATUS bit to become zero.
5337 	 */
5338 	pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5339 		(1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5340 	pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5341 		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5342 	/* Compute the divisor for the pp clock, simply match the Bspec
5343 	 * formula. */
5344 	if (IS_BROXTON(dev)) {
5345 		pp_div = I915_READ(pp_ctrl_reg);
5346 		pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5347 		pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5348 				<< BXT_POWER_CYCLE_DELAY_SHIFT);
5349 	} else {
5350 		pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5351 		pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5352 				<< PANEL_POWER_CYCLE_DELAY_SHIFT);
5353 	}
5354 
5355 	/* Haswell doesn't have any port selection bits for the panel
5356 	 * power sequencer any more. */
5357 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5358 		port_sel = PANEL_PORT_SELECT_VLV(port);
5359 	} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5360 		if (port == PORT_A)
5361 			port_sel = PANEL_PORT_SELECT_DPA;
5362 		else
5363 			port_sel = PANEL_PORT_SELECT_DPD;
5364 	}
5365 
5366 	pp_on |= port_sel;
5367 
5368 	I915_WRITE(pp_on_reg, pp_on);
5369 	I915_WRITE(pp_off_reg, pp_off);
5370 	if (IS_BROXTON(dev))
5371 		I915_WRITE(pp_ctrl_reg, pp_div);
5372 	else
5373 		I915_WRITE(pp_div_reg, pp_div);
5374 
5375 	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5376 		      I915_READ(pp_on_reg),
5377 		      I915_READ(pp_off_reg),
5378 		      IS_BROXTON(dev) ?
5379 		      (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5380 		      I915_READ(pp_div_reg));
5381 }
5382 
5383 /**
5384  * intel_dp_set_drrs_state - program registers for RR switch to take effect
5385  * @dev: DRM device
5386  * @refresh_rate: RR to be programmed
5387  *
5388  * This function gets called when refresh rate (RR) has to be changed from
5389  * one frequency to another. Switches can be between high and low RR
5390  * supported by the panel or to any other RR based on media playback (in
5391  * this case, RR value needs to be passed from user space).
5392  *
5393  * The caller of this function needs to take a lock on dev_priv->drrs.
5394  */
5395 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5396 {
5397 	struct drm_i915_private *dev_priv = dev->dev_private;
5398 	struct intel_encoder *encoder;
5399 	struct intel_digital_port *dig_port = NULL;
5400 	struct intel_dp *intel_dp = dev_priv->drrs.dp;
5401 	struct intel_crtc_state *config = NULL;
5402 	struct intel_crtc *intel_crtc = NULL;
5403 	enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5404 
5405 	if (refresh_rate <= 0) {
5406 		DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5407 		return;
5408 	}
5409 
5410 	if (intel_dp == NULL) {
5411 		DRM_DEBUG_KMS("DRRS not supported.\n");
5412 		return;
5413 	}
5414 
5415 	/*
5416 	 * FIXME: This needs proper synchronization with psr state for some
5417 	 * platforms that cannot have PSR and DRRS enabled at the same time.
5418 	 */
5419 
5420 	dig_port = dp_to_dig_port(intel_dp);
5421 	encoder = &dig_port->base;
5422 	intel_crtc = to_intel_crtc(encoder->base.crtc);
5423 
5424 	if (!intel_crtc) {
5425 		DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5426 		return;
5427 	}
5428 
5429 	config = intel_crtc->config;
5430 
5431 	if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5432 		DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5433 		return;
5434 	}
5435 
5436 	if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5437 			refresh_rate)
5438 		index = DRRS_LOW_RR;
5439 
5440 	if (index == dev_priv->drrs.refresh_rate_type) {
5441 		DRM_DEBUG_KMS(
5442 			"DRRS requested for previously set RR...ignoring\n");
5443 		return;
5444 	}
5445 
5446 	if (!intel_crtc->active) {
5447 		DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5448 		return;
5449 	}
5450 
5451 	if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5452 		switch (index) {
5453 		case DRRS_HIGH_RR:
5454 			intel_dp_set_m_n(intel_crtc, M1_N1);
5455 			break;
5456 		case DRRS_LOW_RR:
5457 			intel_dp_set_m_n(intel_crtc, M2_N2);
5458 			break;
5459 		case DRRS_MAX_RR:
5460 		default:
5461 			DRM_ERROR("Unsupported refreshrate type\n");
5462 		}
5463 	} else if (INTEL_INFO(dev)->gen > 6) {
5464 		i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5465 		u32 val;
5466 
5467 		val = I915_READ(reg);
5468 		if (index > DRRS_HIGH_RR) {
5469 			if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5470 				val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5471 			else
5472 				val |= PIPECONF_EDP_RR_MODE_SWITCH;
5473 		} else {
5474 			if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5475 				val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5476 			else
5477 				val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5478 		}
5479 		I915_WRITE(reg, val);
5480 	}
5481 
5482 	dev_priv->drrs.refresh_rate_type = index;
5483 
5484 	DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5485 }
5486 
5487 /**
5488  * intel_edp_drrs_enable - init drrs struct if supported
5489  * @intel_dp: DP struct
5490  *
5491  * Initializes frontbuffer_bits and drrs.dp
5492  */
5493 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5494 {
5495 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5496 	struct drm_i915_private *dev_priv = dev->dev_private;
5497 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5498 	struct drm_crtc *crtc = dig_port->base.base.crtc;
5499 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5500 
5501 	if (!intel_crtc->config->has_drrs) {
5502 		DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5503 		return;
5504 	}
5505 
5506 	mutex_lock(&dev_priv->drrs.mutex);
5507 	if (WARN_ON(dev_priv->drrs.dp)) {
5508 		DRM_ERROR("DRRS already enabled\n");
5509 		goto unlock;
5510 	}
5511 
5512 	dev_priv->drrs.busy_frontbuffer_bits = 0;
5513 
5514 	dev_priv->drrs.dp = intel_dp;
5515 
5516 unlock:
5517 	mutex_unlock(&dev_priv->drrs.mutex);
5518 }
5519 
5520 /**
5521  * intel_edp_drrs_disable - Disable DRRS
5522  * @intel_dp: DP struct
5523  *
5524  */
5525 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5526 {
5527 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5528 	struct drm_i915_private *dev_priv = dev->dev_private;
5529 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5530 	struct drm_crtc *crtc = dig_port->base.base.crtc;
5531 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5532 
5533 	if (!intel_crtc->config->has_drrs)
5534 		return;
5535 
5536 	mutex_lock(&dev_priv->drrs.mutex);
5537 	if (!dev_priv->drrs.dp) {
5538 		mutex_unlock(&dev_priv->drrs.mutex);
5539 		return;
5540 	}
5541 
5542 	if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5543 		intel_dp_set_drrs_state(dev_priv->dev,
5544 			intel_dp->attached_connector->panel.
5545 			fixed_mode->vrefresh);
5546 
5547 	dev_priv->drrs.dp = NULL;
5548 	mutex_unlock(&dev_priv->drrs.mutex);
5549 
5550 	cancel_delayed_work_sync(&dev_priv->drrs.work);
5551 }
5552 
5553 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5554 {
5555 	struct drm_i915_private *dev_priv =
5556 		container_of(work, typeof(*dev_priv), drrs.work.work);
5557 	struct intel_dp *intel_dp;
5558 
5559 	mutex_lock(&dev_priv->drrs.mutex);
5560 
5561 	intel_dp = dev_priv->drrs.dp;
5562 
5563 	if (!intel_dp)
5564 		goto unlock;
5565 
5566 	/*
5567 	 * The delayed work can race with an invalidate hence we need to
5568 	 * recheck.
5569 	 */
5570 
5571 	if (dev_priv->drrs.busy_frontbuffer_bits)
5572 		goto unlock;
5573 
5574 	if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5575 		intel_dp_set_drrs_state(dev_priv->dev,
5576 			intel_dp->attached_connector->panel.
5577 			downclock_mode->vrefresh);
5578 
5579 unlock:
5580 	mutex_unlock(&dev_priv->drrs.mutex);
5581 }
5582 
5583 /**
5584  * intel_edp_drrs_invalidate - Disable Idleness DRRS
5585  * @dev: DRM device
5586  * @frontbuffer_bits: frontbuffer plane tracking bits
5587  *
5588  * This function gets called everytime rendering on the given planes start.
5589  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5590  *
5591  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5592  */
5593 void intel_edp_drrs_invalidate(struct drm_device *dev,
5594 		unsigned frontbuffer_bits)
5595 {
5596 	struct drm_i915_private *dev_priv = dev->dev_private;
5597 	struct drm_crtc *crtc;
5598 	enum i915_pipe pipe;
5599 
5600 	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5601 		return;
5602 
5603 	cancel_delayed_work(&dev_priv->drrs.work);
5604 
5605 	mutex_lock(&dev_priv->drrs.mutex);
5606 	if (!dev_priv->drrs.dp) {
5607 		mutex_unlock(&dev_priv->drrs.mutex);
5608 		return;
5609 	}
5610 
5611 	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5612 	pipe = to_intel_crtc(crtc)->pipe;
5613 
5614 	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5615 	dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5616 
5617 	/* invalidate means busy screen hence upclock */
5618 	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5619 		intel_dp_set_drrs_state(dev_priv->dev,
5620 				dev_priv->drrs.dp->attached_connector->panel.
5621 				fixed_mode->vrefresh);
5622 
5623 	mutex_unlock(&dev_priv->drrs.mutex);
5624 }
5625 
5626 /**
5627  * intel_edp_drrs_flush - Restart Idleness DRRS
5628  * @dev: DRM device
5629  * @frontbuffer_bits: frontbuffer plane tracking bits
5630  *
5631  * This function gets called every time rendering on the given planes has
5632  * completed or flip on a crtc is completed. So DRRS should be upclocked
5633  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5634  * if no other planes are dirty.
5635  *
5636  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5637  */
5638 void intel_edp_drrs_flush(struct drm_device *dev,
5639 		unsigned frontbuffer_bits)
5640 {
5641 	struct drm_i915_private *dev_priv = dev->dev_private;
5642 	struct drm_crtc *crtc;
5643 	enum i915_pipe pipe;
5644 
5645 	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5646 		return;
5647 
5648 	cancel_delayed_work(&dev_priv->drrs.work);
5649 
5650 	mutex_lock(&dev_priv->drrs.mutex);
5651 	if (!dev_priv->drrs.dp) {
5652 		mutex_unlock(&dev_priv->drrs.mutex);
5653 		return;
5654 	}
5655 
5656 	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5657 	pipe = to_intel_crtc(crtc)->pipe;
5658 
5659 	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5660 	dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5661 
5662 	/* flush means busy screen hence upclock */
5663 	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5664 		intel_dp_set_drrs_state(dev_priv->dev,
5665 				dev_priv->drrs.dp->attached_connector->panel.
5666 				fixed_mode->vrefresh);
5667 
5668 	/*
5669 	 * flush also means no more activity hence schedule downclock, if all
5670 	 * other fbs are quiescent too
5671 	 */
5672 	if (!dev_priv->drrs.busy_frontbuffer_bits)
5673 		schedule_delayed_work(&dev_priv->drrs.work,
5674 				msecs_to_jiffies(1000));
5675 	mutex_unlock(&dev_priv->drrs.mutex);
5676 }
5677 
5678 /**
5679  * DOC: Display Refresh Rate Switching (DRRS)
5680  *
5681  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5682  * which enables swtching between low and high refresh rates,
5683  * dynamically, based on the usage scenario. This feature is applicable
5684  * for internal panels.
5685  *
5686  * Indication that the panel supports DRRS is given by the panel EDID, which
5687  * would list multiple refresh rates for one resolution.
5688  *
5689  * DRRS is of 2 types - static and seamless.
5690  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5691  * (may appear as a blink on screen) and is used in dock-undock scenario.
5692  * Seamless DRRS involves changing RR without any visual effect to the user
5693  * and can be used during normal system usage. This is done by programming
5694  * certain registers.
5695  *
5696  * Support for static/seamless DRRS may be indicated in the VBT based on
5697  * inputs from the panel spec.
5698  *
5699  * DRRS saves power by switching to low RR based on usage scenarios.
5700  *
5701  * eDP DRRS:-
5702  *        The implementation is based on frontbuffer tracking implementation.
5703  * When there is a disturbance on the screen triggered by user activity or a
5704  * periodic system activity, DRRS is disabled (RR is changed to high RR).
5705  * When there is no movement on screen, after a timeout of 1 second, a switch
5706  * to low RR is made.
5707  *        For integration with frontbuffer tracking code,
5708  * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5709  *
5710  * DRRS can be further extended to support other internal panels and also
5711  * the scenario of video playback wherein RR is set based on the rate
5712  * requested by userspace.
5713  */
5714 
5715 /**
5716  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5717  * @intel_connector: eDP connector
5718  * @fixed_mode: preferred mode of panel
5719  *
5720  * This function is  called only once at driver load to initialize basic
5721  * DRRS stuff.
5722  *
5723  * Returns:
5724  * Downclock mode if panel supports it, else return NULL.
5725  * DRRS support is determined by the presence of downclock mode (apart
5726  * from VBT setting).
5727  */
5728 static struct drm_display_mode *
5729 intel_dp_drrs_init(struct intel_connector *intel_connector,
5730 		struct drm_display_mode *fixed_mode)
5731 {
5732 	struct drm_connector *connector = &intel_connector->base;
5733 	struct drm_device *dev = connector->dev;
5734 	struct drm_i915_private *dev_priv = dev->dev_private;
5735 	struct drm_display_mode *downclock_mode = NULL;
5736 
5737 	INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5738 	lockinit(&dev_priv->drrs.mutex, "i915dm", 0, LK_CANRECURSE);
5739 
5740 	if (INTEL_INFO(dev)->gen <= 6) {
5741 		DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5742 		return NULL;
5743 	}
5744 
5745 	if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5746 		DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5747 		return NULL;
5748 	}
5749 
5750 	downclock_mode = intel_find_panel_downclock
5751 					(dev, fixed_mode, connector);
5752 
5753 	if (!downclock_mode) {
5754 		DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5755 		return NULL;
5756 	}
5757 
5758 	dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5759 
5760 	dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5761 	DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5762 	return downclock_mode;
5763 }
5764 
5765 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5766 				     struct intel_connector *intel_connector)
5767 {
5768 	struct drm_connector *connector = &intel_connector->base;
5769 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5770 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5771 	struct drm_device *dev = intel_encoder->base.dev;
5772 	struct drm_i915_private *dev_priv = dev->dev_private;
5773 	struct drm_display_mode *fixed_mode = NULL;
5774 	struct drm_display_mode *downclock_mode = NULL;
5775 	bool has_dpcd;
5776 	struct drm_display_mode *scan;
5777 	struct edid *edid;
5778 	enum i915_pipe pipe = INVALID_PIPE;
5779 
5780 	if (!is_edp(intel_dp))
5781 		return true;
5782 
5783 	pps_lock(intel_dp);
5784 	intel_edp_panel_vdd_sanitize(intel_dp);
5785 	pps_unlock(intel_dp);
5786 
5787 	/* Cache DPCD and EDID for edp. */
5788 	has_dpcd = intel_dp_get_dpcd(intel_dp);
5789 
5790 	if (has_dpcd) {
5791 		if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5792 			dev_priv->no_aux_handshake =
5793 				intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5794 				DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5795 	} else {
5796 		/* if this fails, presume the device is a ghost */
5797 		DRM_INFO("failed to retrieve link info, disabling eDP\n");
5798 		return false;
5799 	}
5800 
5801 	/* We now know it's not a ghost, init power sequence regs. */
5802 	pps_lock(intel_dp);
5803 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5804 	pps_unlock(intel_dp);
5805 
5806 	mutex_lock(&dev->mode_config.mutex);
5807 	edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5808 	if (edid) {
5809 		if (drm_add_edid_modes(connector, edid)) {
5810 			drm_mode_connector_update_edid_property(connector,
5811 								edid);
5812 			drm_edid_to_eld(connector, edid);
5813 		} else {
5814 			kfree(edid);
5815 			edid = ERR_PTR(-EINVAL);
5816 		}
5817 	} else {
5818 		edid = ERR_PTR(-ENOENT);
5819 	}
5820 	intel_connector->edid = edid;
5821 
5822 	/* prefer fixed mode from EDID if available */
5823 	list_for_each_entry(scan, &connector->probed_modes, head) {
5824 		if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5825 			fixed_mode = drm_mode_duplicate(dev, scan);
5826 			downclock_mode = intel_dp_drrs_init(
5827 						intel_connector, fixed_mode);
5828 			break;
5829 		}
5830 	}
5831 
5832 	/* fallback to VBT if available for eDP */
5833 	if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5834 		fixed_mode = drm_mode_duplicate(dev,
5835 					dev_priv->vbt.lfp_lvds_vbt_mode);
5836 		if (fixed_mode)
5837 			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5838 	}
5839 	mutex_unlock(&dev->mode_config.mutex);
5840 
5841 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5842 		intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5843 #if 0
5844 		register_reboot_notifier(&intel_dp->edp_notifier);
5845 #endif
5846 
5847 		/*
5848 		 * Figure out the current pipe for the initial backlight setup.
5849 		 * If the current pipe isn't valid, try the PPS pipe, and if that
5850 		 * fails just assume pipe A.
5851 		 */
5852 		if (IS_CHERRYVIEW(dev))
5853 			pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5854 		else
5855 			pipe = PORT_TO_PIPE(intel_dp->DP);
5856 
5857 		if (pipe != PIPE_A && pipe != PIPE_B)
5858 			pipe = intel_dp->pps_pipe;
5859 
5860 		if (pipe != PIPE_A && pipe != PIPE_B)
5861 			pipe = PIPE_A;
5862 
5863 		DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5864 			      pipe_name(pipe));
5865 	}
5866 
5867 	intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5868 	intel_connector->panel.backlight.power = intel_edp_backlight_power;
5869 	intel_panel_setup_backlight(connector, pipe);
5870 
5871 	return true;
5872 }
5873 
5874 bool
5875 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5876 			struct intel_connector *intel_connector)
5877 {
5878 	struct drm_connector *connector = &intel_connector->base;
5879 	struct intel_dp *intel_dp = &intel_dig_port->dp;
5880 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5881 	struct drm_device *dev = intel_encoder->base.dev;
5882 	struct drm_i915_private *dev_priv = dev->dev_private;
5883 	enum port port = intel_dig_port->port;
5884 	int type, ret;
5885 
5886 	intel_dp->pps_pipe = INVALID_PIPE;
5887 
5888 	/* intel_dp vfuncs */
5889 	if (INTEL_INFO(dev)->gen >= 9)
5890 		intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5891 	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5892 		intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5893 	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5894 		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5895 	else if (HAS_PCH_SPLIT(dev))
5896 		intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5897 	else
5898 		intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5899 
5900 	if (INTEL_INFO(dev)->gen >= 9)
5901 		intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5902 	else
5903 		intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5904 
5905 	if (HAS_DDI(dev))
5906 		intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5907 
5908 	/* Preserve the current hw state. */
5909 	intel_dp->DP = I915_READ(intel_dp->output_reg);
5910 	intel_dp->attached_connector = intel_connector;
5911 
5912 	if (intel_dp_is_edp(dev, port))
5913 		type = DRM_MODE_CONNECTOR_eDP;
5914 	else
5915 		type = DRM_MODE_CONNECTOR_DisplayPort;
5916 
5917 	/*
5918 	 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5919 	 * for DP the encoder type can be set by the caller to
5920 	 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5921 	 */
5922 	if (type == DRM_MODE_CONNECTOR_eDP)
5923 		intel_encoder->type = INTEL_OUTPUT_EDP;
5924 
5925 	/* eDP only on port B and/or C on vlv/chv */
5926 	if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
5927 		    is_edp(intel_dp) && port != PORT_B && port != PORT_C))
5928 		return false;
5929 
5930 	DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5931 			type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5932 			port_name(port));
5933 
5934 	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5935 	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5936 
5937 	connector->interlace_allowed = true;
5938 	connector->doublescan_allowed = 0;
5939 
5940 	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5941 			  edp_panel_vdd_work);
5942 
5943 	intel_connector_attach_encoder(intel_connector, intel_encoder);
5944 	drm_connector_register(connector);
5945 
5946 	if (HAS_DDI(dev))
5947 		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5948 	else
5949 		intel_connector->get_hw_state = intel_connector_get_hw_state;
5950 	intel_connector->unregister = intel_dp_connector_unregister;
5951 
5952 	/* Set up the hotplug pin. */
5953 	switch (port) {
5954 	case PORT_A:
5955 		intel_encoder->hpd_pin = HPD_PORT_A;
5956 		break;
5957 	case PORT_B:
5958 		intel_encoder->hpd_pin = HPD_PORT_B;
5959 		if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
5960 			intel_encoder->hpd_pin = HPD_PORT_A;
5961 		break;
5962 	case PORT_C:
5963 		intel_encoder->hpd_pin = HPD_PORT_C;
5964 		break;
5965 	case PORT_D:
5966 		intel_encoder->hpd_pin = HPD_PORT_D;
5967 		break;
5968 	case PORT_E:
5969 		intel_encoder->hpd_pin = HPD_PORT_E;
5970 		break;
5971 	default:
5972 		BUG();
5973 	}
5974 
5975 	if (is_edp(intel_dp)) {
5976 		pps_lock(intel_dp);
5977 		intel_dp_init_panel_power_timestamps(intel_dp);
5978 		if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5979 			vlv_initial_power_sequencer_setup(intel_dp);
5980 		else
5981 			intel_dp_init_panel_power_sequencer(dev, intel_dp);
5982 		pps_unlock(intel_dp);
5983 	}
5984 
5985 	ret = intel_dp_aux_init(intel_dp, intel_connector);
5986 	if (ret)
5987 		goto fail;
5988 
5989 	/* init MST on ports that can support it */
5990 	if (HAS_DP_MST(dev) &&
5991 	    (port == PORT_B || port == PORT_C || port == PORT_D))
5992 		intel_dp_mst_encoder_init(intel_dig_port,
5993 					  intel_connector->base.base.id);
5994 
5995 	if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5996 		intel_dp_aux_fini(intel_dp);
5997 		intel_dp_mst_encoder_cleanup(intel_dig_port);
5998 		goto fail;
5999 	}
6000 
6001 	intel_dp_add_properties(intel_dp, connector);
6002 
6003 	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6004 	 * 0xd.  Failure to do so will result in spurious interrupts being
6005 	 * generated on the port when a cable is not attached.
6006 	 */
6007 	if (IS_G4X(dev) && !IS_GM45(dev)) {
6008 		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6009 		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6010 	}
6011 
6012 	i915_debugfs_connector_add(connector);
6013 
6014 	return true;
6015 
6016 fail:
6017 	if (is_edp(intel_dp)) {
6018 		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
6019 		/*
6020 		 * vdd might still be enabled do to the delayed vdd off.
6021 		 * Make sure vdd is actually turned off here.
6022 		 */
6023 		pps_lock(intel_dp);
6024 		edp_panel_vdd_off_sync(intel_dp);
6025 		pps_unlock(intel_dp);
6026 	}
6027 	drm_connector_unregister(connector);
6028 	drm_connector_cleanup(connector);
6029 
6030 	return false;
6031 }
6032 
6033 void
6034 intel_dp_init(struct drm_device *dev,
6035 	      i915_reg_t output_reg, enum port port)
6036 {
6037 	struct drm_i915_private *dev_priv = dev->dev_private;
6038 	struct intel_digital_port *intel_dig_port;
6039 	struct intel_encoder *intel_encoder;
6040 	struct drm_encoder *encoder;
6041 	struct intel_connector *intel_connector;
6042 
6043 	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6044 	if (!intel_dig_port)
6045 		return;
6046 
6047 	intel_connector = intel_connector_alloc();
6048 	if (!intel_connector)
6049 		goto err_connector_alloc;
6050 
6051 	intel_encoder = &intel_dig_port->base;
6052 	encoder = &intel_encoder->base;
6053 
6054 	drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6055 			 DRM_MODE_ENCODER_TMDS, NULL);
6056 
6057 	intel_encoder->compute_config = intel_dp_compute_config;
6058 	intel_encoder->disable = intel_disable_dp;
6059 	intel_encoder->get_hw_state = intel_dp_get_hw_state;
6060 	intel_encoder->get_config = intel_dp_get_config;
6061 	intel_encoder->suspend = intel_dp_encoder_suspend;
6062 	if (IS_CHERRYVIEW(dev)) {
6063 		intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6064 		intel_encoder->pre_enable = chv_pre_enable_dp;
6065 		intel_encoder->enable = vlv_enable_dp;
6066 		intel_encoder->post_disable = chv_post_disable_dp;
6067 		intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6068 	} else if (IS_VALLEYVIEW(dev)) {
6069 		intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6070 		intel_encoder->pre_enable = vlv_pre_enable_dp;
6071 		intel_encoder->enable = vlv_enable_dp;
6072 		intel_encoder->post_disable = vlv_post_disable_dp;
6073 	} else {
6074 		intel_encoder->pre_enable = g4x_pre_enable_dp;
6075 		intel_encoder->enable = g4x_enable_dp;
6076 		if (INTEL_INFO(dev)->gen >= 5)
6077 			intel_encoder->post_disable = ilk_post_disable_dp;
6078 	}
6079 
6080 	intel_dig_port->port = port;
6081 	intel_dig_port->dp.output_reg = output_reg;
6082 
6083 	intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
6084 	if (IS_CHERRYVIEW(dev)) {
6085 		if (port == PORT_D)
6086 			intel_encoder->crtc_mask = 1 << 2;
6087 		else
6088 			intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6089 	} else {
6090 		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6091 	}
6092 	intel_encoder->cloneable = 0;
6093 
6094 	intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6095 	dev_priv->hotplug.irq_port[port] = intel_dig_port;
6096 
6097 	if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6098 		goto err_init_connector;
6099 
6100 	return;
6101 
6102 err_init_connector:
6103 	drm_encoder_cleanup(encoder);
6104 	kfree(intel_connector);
6105 err_connector_alloc:
6106 	kfree(intel_dig_port);
6107 
6108 	return;
6109 }
6110 
6111 #if 0
6112 void intel_dp_mst_suspend(struct drm_device *dev)
6113 {
6114 	struct drm_i915_private *dev_priv = dev->dev_private;
6115 	int i;
6116 
6117 	/* disable MST */
6118 	for (i = 0; i < I915_MAX_PORTS; i++) {
6119 		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6120 		if (!intel_dig_port)
6121 			continue;
6122 
6123 		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6124 			if (!intel_dig_port->dp.can_mst)
6125 				continue;
6126 			if (intel_dig_port->dp.is_mst)
6127 				drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6128 		}
6129 	}
6130 }
6131 #endif
6132 
6133 void intel_dp_mst_resume(struct drm_device *dev)
6134 {
6135 	struct drm_i915_private *dev_priv = dev->dev_private;
6136 	int i;
6137 
6138 	for (i = 0; i < I915_MAX_PORTS; i++) {
6139 		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6140 		if (!intel_dig_port)
6141 			continue;
6142 		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6143 			int ret;
6144 
6145 			if (!intel_dig_port->dp.can_mst)
6146 				continue;
6147 
6148 			ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6149 			if (ret != 0) {
6150 				intel_dp_check_mst_status(&intel_dig_port->dp);
6151 			}
6152 		}
6153 	}
6154 }
6155