xref: /dragonfly/sys/dev/drm/i915/intel_dp.c (revision 0fe46dc6)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27 
28 #include <linux/i2c.h>
29 #include <linux/export.h>
30 #include <linux/notifier.h>
31 #include <drm/drmP.h>
32 #include <linux/slab.h>
33 #include <drm/drm_atomic_helper.h>
34 #include <drm/drm_crtc.h>
35 #include <drm/drm_crtc_helper.h>
36 #include <drm/drm_edid.h>
37 #include "intel_drv.h"
38 #include <drm/i915_drm.h>
39 #include "i915_drv.h"
40 
41 #define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
42 
43 static int disable_aux_irq = 0;
44 TUNABLE_INT("drm.i915.disable_aux_irq", &disable_aux_irq);
45 
46 /* Compliance test status bits  */
47 #define INTEL_DP_RESOLUTION_SHIFT_MASK	0
48 #define INTEL_DP_RESOLUTION_PREFERRED	(1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49 #define INTEL_DP_RESOLUTION_STANDARD	(2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
50 #define INTEL_DP_RESOLUTION_FAILSAFE	(3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
51 
52 struct dp_link_dpll {
53 	int clock;
54 	struct dpll dpll;
55 };
56 
57 static const struct dp_link_dpll gen4_dpll[] = {
58 	{ 162000,
59 		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
60 	{ 270000,
61 		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
62 };
63 
64 static const struct dp_link_dpll pch_dpll[] = {
65 	{ 162000,
66 		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
67 	{ 270000,
68 		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
69 };
70 
71 static const struct dp_link_dpll vlv_dpll[] = {
72 	{ 162000,
73 		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
74 	{ 270000,
75 		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
76 };
77 
78 /*
79  * CHV supports eDP 1.4 that have  more link rates.
80  * Below only provides the fixed rate but exclude variable rate.
81  */
82 static const struct dp_link_dpll chv_dpll[] = {
83 	/*
84 	 * CHV requires to program fractional division for m2.
85 	 * m2 is stored in fixed point format using formula below
86 	 * (m2_int << 22) | m2_fraction
87 	 */
88 	{ 162000,	/* m2_int = 32, m2_fraction = 1677722 */
89 		{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
90 	{ 270000,	/* m2_int = 27, m2_fraction = 0 */
91 		{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
92 	{ 540000,	/* m2_int = 27, m2_fraction = 0 */
93 		{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
94 };
95 
96 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
97 				  324000, 432000, 540000 };
98 static const int skl_rates[] = { 162000, 216000, 270000,
99 				  324000, 432000, 540000 };
100 static const int default_rates[] = { 162000, 270000, 540000 };
101 
102 /**
103  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
104  * @intel_dp: DP struct
105  *
106  * If a CPU or PCH DP output is attached to an eDP panel, this function
107  * will return true, and false otherwise.
108  */
109 static bool is_edp(struct intel_dp *intel_dp)
110 {
111 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
112 
113 	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
114 }
115 
116 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
117 {
118 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
119 
120 	return intel_dig_port->base.base.dev;
121 }
122 
123 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
124 {
125 	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
126 }
127 
128 static void intel_dp_link_down(struct intel_dp *intel_dp);
129 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
130 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
131 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
132 static void vlv_steal_power_sequencer(struct drm_device *dev,
133 				      enum i915_pipe pipe);
134 
135 static unsigned int intel_dp_unused_lane_mask(int lane_count)
136 {
137 	return ~((1 << lane_count) - 1) & 0xf;
138 }
139 
140 static int
141 intel_dp_max_link_bw(struct intel_dp  *intel_dp)
142 {
143 	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
144 
145 	switch (max_link_bw) {
146 	case DP_LINK_BW_1_62:
147 	case DP_LINK_BW_2_7:
148 	case DP_LINK_BW_5_4:
149 		break;
150 	default:
151 		WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
152 		     max_link_bw);
153 		max_link_bw = DP_LINK_BW_1_62;
154 		break;
155 	}
156 	return max_link_bw;
157 }
158 
159 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
160 {
161 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
162 	struct drm_device *dev = intel_dig_port->base.base.dev;
163 	u8 source_max, sink_max;
164 
165 	source_max = 4;
166 	if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
167 	    (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
168 		source_max = 2;
169 
170 	sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
171 
172 	return min(source_max, sink_max);
173 }
174 
175 /*
176  * The units on the numbers in the next two are... bizarre.  Examples will
177  * make it clearer; this one parallels an example in the eDP spec.
178  *
179  * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
180  *
181  *     270000 * 1 * 8 / 10 == 216000
182  *
183  * The actual data capacity of that configuration is 2.16Gbit/s, so the
184  * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
185  * or equivalently, kilopixels per second - so for 1680x1050R it'd be
186  * 119000.  At 18bpp that's 2142000 kilobits per second.
187  *
188  * Thus the strange-looking division by 10 in intel_dp_link_required, to
189  * get the result in decakilobits instead of kilobits.
190  */
191 
192 static int
193 intel_dp_link_required(int pixel_clock, int bpp)
194 {
195 	return (pixel_clock * bpp + 9) / 10;
196 }
197 
198 static int
199 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
200 {
201 	return (max_link_clock * max_lanes * 8) / 10;
202 }
203 
204 static enum drm_mode_status
205 intel_dp_mode_valid(struct drm_connector *connector,
206 		    struct drm_display_mode *mode)
207 {
208 	struct intel_dp *intel_dp = intel_attached_dp(connector);
209 	struct intel_connector *intel_connector = to_intel_connector(connector);
210 	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
211 	int target_clock = mode->clock;
212 	int max_rate, mode_rate, max_lanes, max_link_clock;
213 
214 	if (is_edp(intel_dp) && fixed_mode) {
215 		if (mode->hdisplay > fixed_mode->hdisplay)
216 			return MODE_PANEL;
217 
218 		if (mode->vdisplay > fixed_mode->vdisplay)
219 			return MODE_PANEL;
220 
221 		target_clock = fixed_mode->clock;
222 	}
223 
224 	max_link_clock = intel_dp_max_link_rate(intel_dp);
225 	max_lanes = intel_dp_max_lane_count(intel_dp);
226 
227 	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
228 	mode_rate = intel_dp_link_required(target_clock, 18);
229 
230 	if (mode_rate > max_rate)
231 		return MODE_CLOCK_HIGH;
232 
233 	if (mode->clock < 10000)
234 		return MODE_CLOCK_LOW;
235 
236 	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
237 		return MODE_H_ILLEGAL;
238 
239 	return MODE_OK;
240 }
241 
242 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
243 {
244 	int	i;
245 	uint32_t v = 0;
246 
247 	if (src_bytes > 4)
248 		src_bytes = 4;
249 	for (i = 0; i < src_bytes; i++)
250 		v |= ((uint32_t) src[i]) << ((3-i) * 8);
251 	return v;
252 }
253 
254 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
255 {
256 	int i;
257 	if (dst_bytes > 4)
258 		dst_bytes = 4;
259 	for (i = 0; i < dst_bytes; i++)
260 		dst[i] = src >> ((3-i) * 8);
261 }
262 
263 static void
264 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
265 				    struct intel_dp *intel_dp);
266 static void
267 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
268 					      struct intel_dp *intel_dp);
269 
270 static void pps_lock(struct intel_dp *intel_dp)
271 {
272 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
273 	struct intel_encoder *encoder = &intel_dig_port->base;
274 	struct drm_device *dev = encoder->base.dev;
275 	struct drm_i915_private *dev_priv = dev->dev_private;
276 	enum intel_display_power_domain power_domain;
277 
278 	/*
279 	 * See vlv_power_sequencer_reset() why we need
280 	 * a power domain reference here.
281 	 */
282 	power_domain = intel_display_port_aux_power_domain(encoder);
283 	intel_display_power_get(dev_priv, power_domain);
284 
285 	mutex_lock(&dev_priv->pps_mutex);
286 }
287 
288 static void pps_unlock(struct intel_dp *intel_dp)
289 {
290 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
291 	struct intel_encoder *encoder = &intel_dig_port->base;
292 	struct drm_device *dev = encoder->base.dev;
293 	struct drm_i915_private *dev_priv = dev->dev_private;
294 	enum intel_display_power_domain power_domain;
295 
296 	mutex_unlock(&dev_priv->pps_mutex);
297 
298 	power_domain = intel_display_port_aux_power_domain(encoder);
299 	intel_display_power_put(dev_priv, power_domain);
300 }
301 
302 static void
303 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
304 {
305 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
306 	struct drm_device *dev = intel_dig_port->base.base.dev;
307 	struct drm_i915_private *dev_priv = dev->dev_private;
308 	enum i915_pipe pipe = intel_dp->pps_pipe;
309 	bool pll_enabled, release_cl_override = false;
310 	enum dpio_phy phy = DPIO_PHY(pipe);
311 	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
312 	uint32_t DP;
313 
314 	if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
315 		 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
316 		 pipe_name(pipe), port_name(intel_dig_port->port)))
317 		return;
318 
319 	DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
320 		      pipe_name(pipe), port_name(intel_dig_port->port));
321 
322 	/* Preserve the BIOS-computed detected bit. This is
323 	 * supposed to be read-only.
324 	 */
325 	DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
326 	DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
327 	DP |= DP_PORT_WIDTH(1);
328 	DP |= DP_LINK_TRAIN_PAT_1;
329 
330 	if (IS_CHERRYVIEW(dev))
331 		DP |= DP_PIPE_SELECT_CHV(pipe);
332 	else if (pipe == PIPE_B)
333 		DP |= DP_PIPEB_SELECT;
334 
335 	pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
336 
337 	/*
338 	 * The DPLL for the pipe must be enabled for this to work.
339 	 * So enable temporarily it if it's not already enabled.
340 	 */
341 	if (!pll_enabled) {
342 		release_cl_override = IS_CHERRYVIEW(dev) &&
343 			!chv_phy_powergate_ch(dev_priv, phy, ch, true);
344 
345 		vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
346 				 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
347 	}
348 
349 	/*
350 	 * Similar magic as in intel_dp_enable_port().
351 	 * We _must_ do this port enable + disable trick
352 	 * to make this power seqeuencer lock onto the port.
353 	 * Otherwise even VDD force bit won't work.
354 	 */
355 	I915_WRITE(intel_dp->output_reg, DP);
356 	POSTING_READ(intel_dp->output_reg);
357 
358 	I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
359 	POSTING_READ(intel_dp->output_reg);
360 
361 	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
362 	POSTING_READ(intel_dp->output_reg);
363 
364 	if (!pll_enabled) {
365 		vlv_force_pll_off(dev, pipe);
366 
367 		if (release_cl_override)
368 			chv_phy_powergate_ch(dev_priv, phy, ch, false);
369 	}
370 }
371 
372 static enum i915_pipe
373 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
374 {
375 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
376 	struct drm_device *dev = intel_dig_port->base.base.dev;
377 	struct drm_i915_private *dev_priv = dev->dev_private;
378 	struct intel_encoder *encoder;
379 	unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
380 	enum i915_pipe pipe;
381 
382 	lockdep_assert_held(&dev_priv->pps_mutex);
383 
384 	/* We should never land here with regular DP ports */
385 	WARN_ON(!is_edp(intel_dp));
386 
387 	if (intel_dp->pps_pipe != INVALID_PIPE)
388 		return intel_dp->pps_pipe;
389 
390 	/*
391 	 * We don't have power sequencer currently.
392 	 * Pick one that's not used by other ports.
393 	 */
394 	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
395 			    base.head) {
396 		struct intel_dp *tmp;
397 
398 		if (encoder->type != INTEL_OUTPUT_EDP)
399 			continue;
400 
401 		tmp = enc_to_intel_dp(&encoder->base);
402 
403 		if (tmp->pps_pipe != INVALID_PIPE)
404 			pipes &= ~(1 << tmp->pps_pipe);
405 	}
406 
407 	/*
408 	 * Didn't find one. This should not happen since there
409 	 * are two power sequencers and up to two eDP ports.
410 	 */
411 	if (WARN_ON(pipes == 0))
412 		pipe = PIPE_A;
413 	else
414 		pipe = ffs(pipes) - 1;
415 
416 	vlv_steal_power_sequencer(dev, pipe);
417 	intel_dp->pps_pipe = pipe;
418 
419 	DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
420 		      pipe_name(intel_dp->pps_pipe),
421 		      port_name(intel_dig_port->port));
422 
423 	/* init power sequencer on this pipe and port */
424 	intel_dp_init_panel_power_sequencer(dev, intel_dp);
425 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
426 
427 	/*
428 	 * Even vdd force doesn't work until we've made
429 	 * the power sequencer lock in on the port.
430 	 */
431 	vlv_power_sequencer_kick(intel_dp);
432 
433 	return intel_dp->pps_pipe;
434 }
435 
436 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
437 			       enum i915_pipe pipe);
438 
439 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
440 			       enum i915_pipe pipe)
441 {
442 	return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
443 }
444 
445 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
446 				enum i915_pipe pipe)
447 {
448 	return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
449 }
450 
451 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
452 			 enum i915_pipe pipe)
453 {
454 	return true;
455 }
456 
457 static enum i915_pipe
458 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
459 		     enum port port,
460 		     vlv_pipe_check pipe_check)
461 {
462 	enum i915_pipe pipe;
463 
464 	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
465 		u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
466 			PANEL_PORT_SELECT_MASK;
467 
468 		if (port_sel != PANEL_PORT_SELECT_VLV(port))
469 			continue;
470 
471 		if (!pipe_check(dev_priv, pipe))
472 			continue;
473 
474 		return pipe;
475 	}
476 
477 	return INVALID_PIPE;
478 }
479 
480 static void
481 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
482 {
483 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
484 	struct drm_device *dev = intel_dig_port->base.base.dev;
485 	struct drm_i915_private *dev_priv = dev->dev_private;
486 	enum port port = intel_dig_port->port;
487 
488 	lockdep_assert_held(&dev_priv->pps_mutex);
489 
490 	/* try to find a pipe with this port selected */
491 	/* first pick one where the panel is on */
492 	intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
493 						  vlv_pipe_has_pp_on);
494 	/* didn't find one? pick one where vdd is on */
495 	if (intel_dp->pps_pipe == INVALID_PIPE)
496 		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
497 							  vlv_pipe_has_vdd_on);
498 	/* didn't find one? pick one with just the correct port */
499 	if (intel_dp->pps_pipe == INVALID_PIPE)
500 		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
501 							  vlv_pipe_any);
502 
503 	/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
504 	if (intel_dp->pps_pipe == INVALID_PIPE) {
505 		DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
506 			      port_name(port));
507 		return;
508 	}
509 
510 	DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
511 		      port_name(port), pipe_name(intel_dp->pps_pipe));
512 
513 	intel_dp_init_panel_power_sequencer(dev, intel_dp);
514 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
515 }
516 
517 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
518 {
519 	struct drm_device *dev = dev_priv->dev;
520 	struct intel_encoder *encoder;
521 
522 	if (WARN_ON(!IS_VALLEYVIEW(dev)))
523 		return;
524 
525 	/*
526 	 * We can't grab pps_mutex here due to deadlock with power_domain
527 	 * mutex when power_domain functions are called while holding pps_mutex.
528 	 * That also means that in order to use pps_pipe the code needs to
529 	 * hold both a power domain reference and pps_mutex, and the power domain
530 	 * reference get/put must be done while _not_ holding pps_mutex.
531 	 * pps_{lock,unlock}() do these steps in the correct order, so one
532 	 * should use them always.
533 	 */
534 
535 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
536 		struct intel_dp *intel_dp;
537 
538 		if (encoder->type != INTEL_OUTPUT_EDP)
539 			continue;
540 
541 		intel_dp = enc_to_intel_dp(&encoder->base);
542 		intel_dp->pps_pipe = INVALID_PIPE;
543 	}
544 }
545 
546 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
547 {
548 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
549 
550 	if (IS_BROXTON(dev))
551 		return BXT_PP_CONTROL(0);
552 	else if (HAS_PCH_SPLIT(dev))
553 		return PCH_PP_CONTROL;
554 	else
555 		return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
556 }
557 
558 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
559 {
560 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
561 
562 	if (IS_BROXTON(dev))
563 		return BXT_PP_STATUS(0);
564 	else if (HAS_PCH_SPLIT(dev))
565 		return PCH_PP_STATUS;
566 	else
567 		return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
568 }
569 
570 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
571    This function only applicable when panel PM state is not to be tracked */
572 #if 0
573 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
574 			      void *unused)
575 {
576 	struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
577 						 edp_notifier);
578 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
579 	struct drm_i915_private *dev_priv = dev->dev_private;
580 
581 	if (!is_edp(intel_dp) || code != SYS_RESTART)
582 		return 0;
583 
584 	pps_lock(intel_dp);
585 
586 	if (IS_VALLEYVIEW(dev)) {
587 		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
588 		u32 pp_ctrl_reg, pp_div_reg;
589 		u32 pp_div;
590 
591 		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
592 		pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
593 		pp_div = I915_READ(pp_div_reg);
594 		pp_div &= PP_REFERENCE_DIVIDER_MASK;
595 
596 		/* 0x1F write to PP_DIV_REG sets max cycle delay */
597 		I915_WRITE(pp_div_reg, pp_div | 0x1F);
598 		I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
599 		msleep(intel_dp->panel_power_cycle_delay);
600 	}
601 
602 	pps_unlock(intel_dp);
603 
604 	return 0;
605 }
606 #endif
607 
608 static bool edp_have_panel_power(struct intel_dp *intel_dp)
609 {
610 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
611 	struct drm_i915_private *dev_priv = dev->dev_private;
612 
613 	lockdep_assert_held(&dev_priv->pps_mutex);
614 
615 	if (IS_VALLEYVIEW(dev) &&
616 	    intel_dp->pps_pipe == INVALID_PIPE)
617 		return false;
618 
619 	return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
620 }
621 
622 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
623 {
624 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
625 	struct drm_i915_private *dev_priv = dev->dev_private;
626 
627 	lockdep_assert_held(&dev_priv->pps_mutex);
628 
629 	if (IS_VALLEYVIEW(dev) &&
630 	    intel_dp->pps_pipe == INVALID_PIPE)
631 		return false;
632 
633 	return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
634 }
635 
636 static void
637 intel_dp_check_edp(struct intel_dp *intel_dp)
638 {
639 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
640 	struct drm_i915_private *dev_priv = dev->dev_private;
641 
642 	if (!is_edp(intel_dp))
643 		return;
644 
645 	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
646 		WARN(1, "eDP powered off while attempting aux channel communication.\n");
647 		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
648 			      I915_READ(_pp_stat_reg(intel_dp)),
649 			      I915_READ(_pp_ctrl_reg(intel_dp)));
650 	}
651 }
652 
653 static uint32_t
654 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
655 {
656 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
657 	struct drm_device *dev = intel_dig_port->base.base.dev;
658 	struct drm_i915_private *dev_priv = dev->dev_private;
659 	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
660 	uint32_t status;
661 	bool done;
662 
663 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
664 	if (has_aux_irq)
665 		done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
666 					  msecs_to_jiffies_timeout(10));
667 	else
668 		done = wait_for_atomic(C, 10) == 0;
669 	if (!done)
670 		DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
671 			  has_aux_irq);
672 #undef C
673 
674 	return status;
675 }
676 
677 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
678 {
679 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
680 	struct drm_device *dev = intel_dig_port->base.base.dev;
681 
682 	/*
683 	 * The clock divider is based off the hrawclk, and would like to run at
684 	 * 2MHz.  So, take the hrawclk value and divide by 2 and use that
685 	 */
686 	return index ? 0 : intel_hrawclk(dev) / 2;
687 }
688 
689 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
690 {
691 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
692 	struct drm_device *dev = intel_dig_port->base.base.dev;
693 	struct drm_i915_private *dev_priv = dev->dev_private;
694 
695 	if (index)
696 		return 0;
697 
698 	if (intel_dig_port->port == PORT_A) {
699 		return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
700 
701 	} else {
702 		return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
703 	}
704 }
705 
706 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
707 {
708 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
709 	struct drm_device *dev = intel_dig_port->base.base.dev;
710 	struct drm_i915_private *dev_priv = dev->dev_private;
711 
712 	if (intel_dig_port->port == PORT_A) {
713 		if (index)
714 			return 0;
715 		return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
716 	} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
717 		/* Workaround for non-ULT HSW */
718 		switch (index) {
719 		case 0: return 63;
720 		case 1: return 72;
721 		default: return 0;
722 		}
723 	} else  {
724 		return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
725 	}
726 }
727 
728 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
729 {
730 	return index ? 0 : 100;
731 }
732 
733 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
734 {
735 	/*
736 	 * SKL doesn't need us to program the AUX clock divider (Hardware will
737 	 * derive the clock from CDCLK automatically). We still implement the
738 	 * get_aux_clock_divider vfunc to plug-in into the existing code.
739 	 */
740 	return index ? 0 : 1;
741 }
742 
743 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
744 				      bool has_aux_irq,
745 				      int send_bytes,
746 				      uint32_t aux_clock_divider)
747 {
748 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
749 	struct drm_device *dev = intel_dig_port->base.base.dev;
750 	uint32_t precharge, timeout;
751 
752 	if (IS_GEN6(dev))
753 		precharge = 3;
754 	else
755 		precharge = 5;
756 
757 	if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
758 		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
759 	else
760 		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
761 
762 	return DP_AUX_CH_CTL_SEND_BUSY |
763 	       DP_AUX_CH_CTL_DONE |
764 	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
765 	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
766 	       timeout |
767 	       DP_AUX_CH_CTL_RECEIVE_ERROR |
768 	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
769 	       (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
770 	       (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
771 }
772 
773 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
774 				      bool has_aux_irq,
775 				      int send_bytes,
776 				      uint32_t unused)
777 {
778 	return DP_AUX_CH_CTL_SEND_BUSY |
779 	       DP_AUX_CH_CTL_DONE |
780 	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
781 	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
782 	       DP_AUX_CH_CTL_TIME_OUT_1600us |
783 	       DP_AUX_CH_CTL_RECEIVE_ERROR |
784 	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
785 	       DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
786 }
787 
788 static int
789 intel_dp_aux_ch(struct intel_dp *intel_dp,
790 		const uint8_t *send, int send_bytes,
791 		uint8_t *recv, int recv_size)
792 {
793 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
794 	struct drm_device *dev = intel_dig_port->base.base.dev;
795 	struct drm_i915_private *dev_priv = dev->dev_private;
796 	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
797 	uint32_t ch_data = ch_ctl + 4;
798 	uint32_t aux_clock_divider;
799 	int i, ret, recv_bytes;
800 	uint32_t status;
801 	int try, clock = 0;
802 #ifdef __DragonFly__
803 	bool has_aux_irq = HAS_AUX_IRQ(dev) && !disable_aux_irq;
804 #else
805 	bool has_aux_irq = HAS_AUX_IRQ(dev);
806 #endif
807 	bool vdd;
808 
809 	pps_lock(intel_dp);
810 
811 	/*
812 	 * We will be called with VDD already enabled for dpcd/edid/oui reads.
813 	 * In such cases we want to leave VDD enabled and it's up to upper layers
814 	 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
815 	 * ourselves.
816 	 */
817 	vdd = edp_panel_vdd_on(intel_dp);
818 
819 	/* dp aux is extremely sensitive to irq latency, hence request the
820 	 * lowest possible wakeup latency and so prevent the cpu from going into
821 	 * deep sleep states.
822 	 */
823 	pm_qos_update_request(&dev_priv->pm_qos, 0);
824 
825 	intel_dp_check_edp(intel_dp);
826 
827 	/* Try to wait for any previous AUX channel activity */
828 	for (try = 0; try < 3; try++) {
829 		status = I915_READ_NOTRACE(ch_ctl);
830 		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
831 			break;
832 		msleep(1);
833 	}
834 
835 	if (try == 3) {
836 		static u32 last_status = -1;
837 		const u32 status = I915_READ(ch_ctl);
838 
839 		if (status != last_status) {
840 			WARN(1, "dp_aux_ch not started status 0x%08x\n",
841 			     status);
842 			last_status = status;
843 		}
844 
845 		ret = -EBUSY;
846 		goto out;
847 	}
848 
849 	/* Only 5 data registers! */
850 	if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
851 		ret = -E2BIG;
852 		goto out;
853 	}
854 
855 	while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
856 		u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
857 							  has_aux_irq,
858 							  send_bytes,
859 							  aux_clock_divider);
860 
861 		/* Must try at least 3 times according to DP spec */
862 		for (try = 0; try < 5; try++) {
863 			/* Load the send data into the aux channel data registers */
864 			for (i = 0; i < send_bytes; i += 4)
865 				I915_WRITE(ch_data + i,
866 					   intel_dp_pack_aux(send + i,
867 							     send_bytes - i));
868 
869 			/* Send the command and wait for it to complete */
870 			I915_WRITE(ch_ctl, send_ctl);
871 
872 			status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
873 
874 			/* Clear done status and any errors */
875 			I915_WRITE(ch_ctl,
876 				   status |
877 				   DP_AUX_CH_CTL_DONE |
878 				   DP_AUX_CH_CTL_TIME_OUT_ERROR |
879 				   DP_AUX_CH_CTL_RECEIVE_ERROR);
880 
881 			if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
882 				continue;
883 
884 			/* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
885 			 *   400us delay required for errors and timeouts
886 			 *   Timeout errors from the HW already meet this
887 			 *   requirement so skip to next iteration
888 			 */
889 			if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
890 				usleep_range(400, 500);
891 				continue;
892 			}
893 			if (status & DP_AUX_CH_CTL_DONE)
894 				goto done;
895 		}
896 	}
897 
898 	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
899 		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
900 		ret = -EBUSY;
901 		goto out;
902 	}
903 
904 done:
905 	/* Check for timeout or receive error.
906 	 * Timeouts occur when the sink is not connected
907 	 */
908 	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
909 		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
910 		ret = -EIO;
911 		goto out;
912 	}
913 
914 	/* Timeouts occur when the device isn't connected, so they're
915 	 * "normal" -- don't fill the kernel log with these */
916 	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
917 		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
918 		ret = -ETIMEDOUT;
919 		goto out;
920 	}
921 
922 	/* Unload any bytes sent back from the other side */
923 	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
924 		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
925 	if (recv_bytes > recv_size)
926 		recv_bytes = recv_size;
927 
928 	for (i = 0; i < recv_bytes; i += 4)
929 		intel_dp_unpack_aux(I915_READ(ch_data + i),
930 				    recv + i, recv_bytes - i);
931 
932 	ret = recv_bytes;
933 out:
934 	pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
935 
936 	if (vdd)
937 		edp_panel_vdd_off(intel_dp, false);
938 
939 	pps_unlock(intel_dp);
940 
941 	return ret;
942 }
943 
944 #define BARE_ADDRESS_SIZE	3
945 #define HEADER_SIZE		(BARE_ADDRESS_SIZE + 1)
946 static ssize_t
947 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
948 {
949 	struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
950 	uint8_t txbuf[20], rxbuf[20];
951 	size_t txsize, rxsize;
952 	int ret;
953 
954 	txbuf[0] = (msg->request << 4) |
955 		((msg->address >> 16) & 0xf);
956 	txbuf[1] = (msg->address >> 8) & 0xff;
957 	txbuf[2] = msg->address & 0xff;
958 	txbuf[3] = msg->size - 1;
959 
960 	switch (msg->request & ~DP_AUX_I2C_MOT) {
961 	case DP_AUX_NATIVE_WRITE:
962 	case DP_AUX_I2C_WRITE:
963 	case DP_AUX_I2C_WRITE_STATUS_UPDATE:
964 		txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
965 		rxsize = 2; /* 0 or 1 data bytes */
966 
967 		if (WARN_ON(txsize > 20))
968 			return -E2BIG;
969 
970 		memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
971 
972 		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
973 		if (ret > 0) {
974 			msg->reply = rxbuf[0] >> 4;
975 
976 			if (ret > 1) {
977 				/* Number of bytes written in a short write. */
978 				ret = clamp_t(int, rxbuf[1], 0, msg->size);
979 			} else {
980 				/* Return payload size. */
981 				ret = msg->size;
982 			}
983 		}
984 		break;
985 
986 	case DP_AUX_NATIVE_READ:
987 	case DP_AUX_I2C_READ:
988 		txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
989 		rxsize = msg->size + 1;
990 
991 		if (WARN_ON(rxsize > 20))
992 			return -E2BIG;
993 
994 		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
995 		if (ret > 0) {
996 			msg->reply = rxbuf[0] >> 4;
997 			/*
998 			 * Assume happy day, and copy the data. The caller is
999 			 * expected to check msg->reply before touching it.
1000 			 *
1001 			 * Return payload size.
1002 			 */
1003 			ret--;
1004 			memcpy(msg->buffer, rxbuf + 1, ret);
1005 		}
1006 		break;
1007 
1008 	default:
1009 		ret = -EINVAL;
1010 		break;
1011 	}
1012 
1013 	return ret;
1014 }
1015 
1016 static void
1017 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1018 {
1019 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1020 	struct drm_i915_private *dev_priv = dev->dev_private;
1021 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1022 	enum port port = intel_dig_port->port;
1023 	struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
1024 	const char *name = NULL;
1025 	uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1026 	int ret;
1027 
1028 	/* On SKL we don't have Aux for port E so we rely on VBT to set
1029 	 * a proper alternate aux channel.
1030 	 */
1031 	if (IS_SKYLAKE(dev) && port == PORT_E) {
1032 		switch (info->alternate_aux_channel) {
1033 		case DP_AUX_B:
1034 			porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1035 			break;
1036 		case DP_AUX_C:
1037 			porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1038 			break;
1039 		case DP_AUX_D:
1040 			porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1041 			break;
1042 		case DP_AUX_A:
1043 		default:
1044 			porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1045 		}
1046 	}
1047 
1048 	switch (port) {
1049 	case PORT_A:
1050 		intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1051 		name = "DPDDC-A";
1052 		break;
1053 	case PORT_B:
1054 		intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1055 		name = "DPDDC-B";
1056 		break;
1057 	case PORT_C:
1058 		intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1059 		name = "DPDDC-C";
1060 		break;
1061 	case PORT_D:
1062 		intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1063 		name = "DPDDC-D";
1064 		break;
1065 	case PORT_E:
1066 		intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1067 		name = "DPDDC-E";
1068 		break;
1069 	default:
1070 		BUG();
1071 	}
1072 
1073 	/*
1074 	 * The AUX_CTL register is usually DP_CTL + 0x10.
1075 	 *
1076 	 * On Haswell and Broadwell though:
1077 	 *   - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1078 	 *   - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1079 	 *
1080 	 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1081 	 */
1082 	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
1083 		intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1084 
1085 	intel_dp->aux.name = name;
1086 	intel_dp->aux.dev = dev->dev;
1087 	intel_dp->aux.transfer = intel_dp_aux_transfer;
1088 
1089 #if 0
1090 	DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1091 		      connector->base.kdev->kobj.name);
1092 #endif
1093 
1094 	ret = drm_dp_aux_register(&intel_dp->aux);
1095 	if (ret < 0) {
1096 		DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1097 			  name, ret);
1098 		return;
1099 	}
1100 
1101 #if 0
1102 	ret = sysfs_create_link(&connector->base.kdev->kobj,
1103 				&intel_dp->aux.ddc.dev.kobj,
1104 				intel_dp->aux.ddc.dev.kobj.name);
1105 	if (ret < 0) {
1106 		DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1107 		drm_dp_aux_unregister(&intel_dp->aux);
1108 	}
1109 #endif
1110 }
1111 
1112 static void
1113 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1114 {
1115 #if 0
1116 	struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1117 
1118 	if (!intel_connector->mst_port)
1119 		sysfs_remove_link(&intel_connector->base.kdev->kobj,
1120 				  intel_dp->aux.ddc.dev.kobj.name);
1121 #endif
1122 	intel_connector_unregister(intel_connector);
1123 }
1124 
1125 static void
1126 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
1127 {
1128 	u32 ctrl1;
1129 
1130 	memset(&pipe_config->dpll_hw_state, 0,
1131 	       sizeof(pipe_config->dpll_hw_state));
1132 
1133 	pipe_config->ddi_pll_sel = SKL_DPLL0;
1134 	pipe_config->dpll_hw_state.cfgcr1 = 0;
1135 	pipe_config->dpll_hw_state.cfgcr2 = 0;
1136 
1137 	ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1138 	switch (pipe_config->port_clock / 2) {
1139 	case 81000:
1140 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1141 					      SKL_DPLL0);
1142 		break;
1143 	case 135000:
1144 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1145 					      SKL_DPLL0);
1146 		break;
1147 	case 270000:
1148 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1149 					      SKL_DPLL0);
1150 		break;
1151 	case 162000:
1152 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1153 					      SKL_DPLL0);
1154 		break;
1155 	/* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1156 	results in CDCLK change. Need to handle the change of CDCLK by
1157 	disabling pipes and re-enabling them */
1158 	case 108000:
1159 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1160 					      SKL_DPLL0);
1161 		break;
1162 	case 216000:
1163 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1164 					      SKL_DPLL0);
1165 		break;
1166 
1167 	}
1168 	pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1169 }
1170 
1171 void
1172 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
1173 {
1174 	memset(&pipe_config->dpll_hw_state, 0,
1175 	       sizeof(pipe_config->dpll_hw_state));
1176 
1177 	switch (pipe_config->port_clock / 2) {
1178 	case 81000:
1179 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1180 		break;
1181 	case 135000:
1182 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1183 		break;
1184 	case 270000:
1185 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1186 		break;
1187 	}
1188 }
1189 
1190 static int
1191 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1192 {
1193 	if (intel_dp->num_sink_rates) {
1194 		*sink_rates = intel_dp->sink_rates;
1195 		return intel_dp->num_sink_rates;
1196 	}
1197 
1198 	*sink_rates = default_rates;
1199 
1200 	return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1201 }
1202 
1203 static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
1204 {
1205 	/* WaDisableHBR2:skl */
1206 	if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1207 		return false;
1208 
1209 	if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1210 	    (INTEL_INFO(dev)->gen >= 9))
1211 		return true;
1212 	else
1213 		return false;
1214 }
1215 
1216 static int
1217 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1218 {
1219 	int size;
1220 
1221 	if (IS_BROXTON(dev)) {
1222 		*source_rates = bxt_rates;
1223 		size = ARRAY_SIZE(bxt_rates);
1224 	} else if (IS_SKYLAKE(dev)) {
1225 		*source_rates = skl_rates;
1226 		size = ARRAY_SIZE(skl_rates);
1227 	} else {
1228 		*source_rates = default_rates;
1229 		size = ARRAY_SIZE(default_rates);
1230 	}
1231 
1232 	/* This depends on the fact that 5.4 is last value in the array */
1233 	if (!intel_dp_source_supports_hbr2(dev))
1234 		size--;
1235 
1236 	return size;
1237 }
1238 
1239 static void
1240 intel_dp_set_clock(struct intel_encoder *encoder,
1241 		   struct intel_crtc_state *pipe_config)
1242 {
1243 	struct drm_device *dev = encoder->base.dev;
1244 	const struct dp_link_dpll *divisor = NULL;
1245 	int i, count = 0;
1246 
1247 	if (IS_G4X(dev)) {
1248 		divisor = gen4_dpll;
1249 		count = ARRAY_SIZE(gen4_dpll);
1250 	} else if (HAS_PCH_SPLIT(dev)) {
1251 		divisor = pch_dpll;
1252 		count = ARRAY_SIZE(pch_dpll);
1253 	} else if (IS_CHERRYVIEW(dev)) {
1254 		divisor = chv_dpll;
1255 		count = ARRAY_SIZE(chv_dpll);
1256 	} else if (IS_VALLEYVIEW(dev)) {
1257 		divisor = vlv_dpll;
1258 		count = ARRAY_SIZE(vlv_dpll);
1259 	}
1260 
1261 	if (divisor && count) {
1262 		for (i = 0; i < count; i++) {
1263 			if (pipe_config->port_clock == divisor[i].clock) {
1264 				pipe_config->dpll = divisor[i].dpll;
1265 				pipe_config->clock_set = true;
1266 				break;
1267 			}
1268 		}
1269 	}
1270 }
1271 
1272 static int intersect_rates(const int *source_rates, int source_len,
1273 			   const int *sink_rates, int sink_len,
1274 			   int *common_rates)
1275 {
1276 	int i = 0, j = 0, k = 0;
1277 
1278 	while (i < source_len && j < sink_len) {
1279 		if (source_rates[i] == sink_rates[j]) {
1280 			if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1281 				return k;
1282 			common_rates[k] = source_rates[i];
1283 			++k;
1284 			++i;
1285 			++j;
1286 		} else if (source_rates[i] < sink_rates[j]) {
1287 			++i;
1288 		} else {
1289 			++j;
1290 		}
1291 	}
1292 	return k;
1293 }
1294 
1295 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1296 				 int *common_rates)
1297 {
1298 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1299 	const int *source_rates, *sink_rates;
1300 	int source_len, sink_len;
1301 
1302 	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1303 	source_len = intel_dp_source_rates(dev, &source_rates);
1304 
1305 	return intersect_rates(source_rates, source_len,
1306 			       sink_rates, sink_len,
1307 			       common_rates);
1308 }
1309 
1310 static void snprintf_int_array(char *str, size_t len,
1311 			       const int *array, int nelem)
1312 {
1313 	int i;
1314 
1315 	str[0] = '\0';
1316 
1317 	for (i = 0; i < nelem; i++) {
1318 		int r = ksnprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1319 		if (r >= len)
1320 			return;
1321 		str += r;
1322 		len -= r;
1323 	}
1324 }
1325 
1326 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1327 {
1328 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1329 	const int *source_rates, *sink_rates;
1330 	int source_len, sink_len, common_len;
1331 	int common_rates[DP_MAX_SUPPORTED_RATES];
1332 	char str[128]; /* FIXME: too big for stack? */
1333 
1334 	if ((drm_debug & DRM_UT_KMS) == 0)
1335 		return;
1336 
1337 	source_len = intel_dp_source_rates(dev, &source_rates);
1338 	snprintf_int_array(str, sizeof(str), source_rates, source_len);
1339 	DRM_DEBUG_KMS("source rates: %s\n", str);
1340 
1341 	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1342 	snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1343 	DRM_DEBUG_KMS("sink rates: %s\n", str);
1344 
1345 	common_len = intel_dp_common_rates(intel_dp, common_rates);
1346 	snprintf_int_array(str, sizeof(str), common_rates, common_len);
1347 	DRM_DEBUG_KMS("common rates: %s\n", str);
1348 }
1349 
1350 static int rate_to_index(int find, const int *rates)
1351 {
1352 	int i = 0;
1353 
1354 	for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1355 		if (find == rates[i])
1356 			break;
1357 
1358 	return i;
1359 }
1360 
1361 int
1362 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1363 {
1364 	int rates[DP_MAX_SUPPORTED_RATES] = {};
1365 	int len;
1366 
1367 	len = intel_dp_common_rates(intel_dp, rates);
1368 	if (WARN_ON(len <= 0))
1369 		return 162000;
1370 
1371 	return rates[rate_to_index(0, rates) - 1];
1372 }
1373 
1374 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1375 {
1376 	return rate_to_index(rate, intel_dp->sink_rates);
1377 }
1378 
1379 static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1380 				  uint8_t *link_bw, uint8_t *rate_select)
1381 {
1382 	if (intel_dp->num_sink_rates) {
1383 		*link_bw = 0;
1384 		*rate_select =
1385 			intel_dp_rate_select(intel_dp, port_clock);
1386 	} else {
1387 		*link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1388 		*rate_select = 0;
1389 	}
1390 }
1391 
1392 bool
1393 intel_dp_compute_config(struct intel_encoder *encoder,
1394 			struct intel_crtc_state *pipe_config)
1395 {
1396 	struct drm_device *dev = encoder->base.dev;
1397 	struct drm_i915_private *dev_priv = dev->dev_private;
1398 	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1399 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1400 	enum port port = dp_to_dig_port(intel_dp)->port;
1401 	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1402 	struct intel_connector *intel_connector = intel_dp->attached_connector;
1403 	int lane_count, clock;
1404 	int min_lane_count = 1;
1405 	int max_lane_count = intel_dp_max_lane_count(intel_dp);
1406 	/* Conveniently, the link BW constants become indices with a shift...*/
1407 	int min_clock = 0;
1408 	int max_clock;
1409 	int bpp, mode_rate;
1410 	int link_avail, link_clock;
1411 	int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1412 	int common_len;
1413 	uint8_t link_bw, rate_select;
1414 
1415 	common_len = intel_dp_common_rates(intel_dp, common_rates);
1416 
1417 	/* No common link rates between source and sink */
1418 	WARN_ON(common_len <= 0);
1419 
1420 	max_clock = common_len - 1;
1421 
1422 	if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1423 		pipe_config->has_pch_encoder = true;
1424 
1425 	pipe_config->has_dp_encoder = true;
1426 	pipe_config->has_drrs = false;
1427 	pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1428 
1429 	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1430 		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1431 				       adjusted_mode);
1432 
1433 		if (INTEL_INFO(dev)->gen >= 9) {
1434 			int ret;
1435 			ret = skl_update_scaler_crtc(pipe_config);
1436 			if (ret)
1437 				return ret;
1438 		}
1439 
1440 		if (!HAS_PCH_SPLIT(dev))
1441 			intel_gmch_panel_fitting(intel_crtc, pipe_config,
1442 						 intel_connector->panel.fitting_mode);
1443 		else
1444 			intel_pch_panel_fitting(intel_crtc, pipe_config,
1445 						intel_connector->panel.fitting_mode);
1446 	}
1447 
1448 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1449 		return false;
1450 
1451 	DRM_DEBUG_KMS("DP link computation with max lane count %i "
1452 		      "max bw %d pixel clock %iKHz\n",
1453 		      max_lane_count, common_rates[max_clock],
1454 		      adjusted_mode->crtc_clock);
1455 
1456 	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
1457 	 * bpc in between. */
1458 	bpp = pipe_config->pipe_bpp;
1459 	if (is_edp(intel_dp)) {
1460 
1461 		/* Get bpp from vbt only for panels that dont have bpp in edid */
1462 		if (intel_connector->base.display_info.bpc == 0 &&
1463 			(dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1464 			DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1465 				      dev_priv->vbt.edp_bpp);
1466 			bpp = dev_priv->vbt.edp_bpp;
1467 		}
1468 
1469 		/*
1470 		 * Use the maximum clock and number of lanes the eDP panel
1471 		 * advertizes being capable of. The panels are generally
1472 		 * designed to support only a single clock and lane
1473 		 * configuration, and typically these values correspond to the
1474 		 * native resolution of the panel.
1475 		 */
1476 		min_lane_count = max_lane_count;
1477 		min_clock = max_clock;
1478 	}
1479 
1480 	for (; bpp >= 6*3; bpp -= 2*3) {
1481 		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1482 						   bpp);
1483 
1484 		for (clock = min_clock; clock <= max_clock; clock++) {
1485 			for (lane_count = min_lane_count;
1486 				lane_count <= max_lane_count;
1487 				lane_count <<= 1) {
1488 
1489 				link_clock = common_rates[clock];
1490 				link_avail = intel_dp_max_data_rate(link_clock,
1491 								    lane_count);
1492 
1493 				if (mode_rate <= link_avail) {
1494 					goto found;
1495 				}
1496 			}
1497 		}
1498 	}
1499 
1500 	return false;
1501 
1502 found:
1503 	if (intel_dp->color_range_auto) {
1504 		/*
1505 		 * See:
1506 		 * CEA-861-E - 5.1 Default Encoding Parameters
1507 		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1508 		 */
1509 		pipe_config->limited_color_range =
1510 			bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1511 	} else {
1512 		pipe_config->limited_color_range =
1513 			intel_dp->limited_color_range;
1514 	}
1515 
1516 	pipe_config->lane_count = lane_count;
1517 
1518 	pipe_config->pipe_bpp = bpp;
1519 	pipe_config->port_clock = common_rates[clock];
1520 
1521 	intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1522 			      &link_bw, &rate_select);
1523 
1524 	DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1525 		      link_bw, rate_select, pipe_config->lane_count,
1526 		      pipe_config->port_clock, bpp);
1527 	DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1528 		      mode_rate, link_avail);
1529 
1530 	intel_link_compute_m_n(bpp, lane_count,
1531 			       adjusted_mode->crtc_clock,
1532 			       pipe_config->port_clock,
1533 			       &pipe_config->dp_m_n);
1534 
1535 	if (intel_connector->panel.downclock_mode != NULL &&
1536 		dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1537 			pipe_config->has_drrs = true;
1538 			intel_link_compute_m_n(bpp, lane_count,
1539 				intel_connector->panel.downclock_mode->clock,
1540 				pipe_config->port_clock,
1541 				&pipe_config->dp_m2_n2);
1542 	}
1543 
1544 	if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1545 		skl_edp_set_pll_config(pipe_config);
1546 	else if (IS_BROXTON(dev))
1547 		/* handled in ddi */;
1548 	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1549 		hsw_dp_set_ddi_pll_sel(pipe_config);
1550 	else
1551 		intel_dp_set_clock(encoder, pipe_config);
1552 
1553 	return true;
1554 }
1555 
1556 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1557 {
1558 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1559 	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1560 	struct drm_device *dev = crtc->base.dev;
1561 	struct drm_i915_private *dev_priv = dev->dev_private;
1562 	u32 dpa_ctl;
1563 
1564 	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1565 		      crtc->config->port_clock);
1566 	dpa_ctl = I915_READ(DP_A);
1567 	dpa_ctl &= ~DP_PLL_FREQ_MASK;
1568 
1569 	if (crtc->config->port_clock == 162000) {
1570 		/* For a long time we've carried around a ILK-DevA w/a for the
1571 		 * 160MHz clock. If we're really unlucky, it's still required.
1572 		 */
1573 		DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1574 		dpa_ctl |= DP_PLL_FREQ_160MHZ;
1575 		intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1576 	} else {
1577 		dpa_ctl |= DP_PLL_FREQ_270MHZ;
1578 		intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1579 	}
1580 
1581 	I915_WRITE(DP_A, dpa_ctl);
1582 
1583 	POSTING_READ(DP_A);
1584 	udelay(500);
1585 }
1586 
1587 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1588 			      const struct intel_crtc_state *pipe_config)
1589 {
1590 	intel_dp->link_rate = pipe_config->port_clock;
1591 	intel_dp->lane_count = pipe_config->lane_count;
1592 }
1593 
1594 static void intel_dp_prepare(struct intel_encoder *encoder)
1595 {
1596 	struct drm_device *dev = encoder->base.dev;
1597 	struct drm_i915_private *dev_priv = dev->dev_private;
1598 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1599 	enum port port = dp_to_dig_port(intel_dp)->port;
1600 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1601 	const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1602 
1603 	intel_dp_set_link_params(intel_dp, crtc->config);
1604 
1605 	/*
1606 	 * There are four kinds of DP registers:
1607 	 *
1608 	 * 	IBX PCH
1609 	 * 	SNB CPU
1610 	 *	IVB CPU
1611 	 * 	CPT PCH
1612 	 *
1613 	 * IBX PCH and CPU are the same for almost everything,
1614 	 * except that the CPU DP PLL is configured in this
1615 	 * register
1616 	 *
1617 	 * CPT PCH is quite different, having many bits moved
1618 	 * to the TRANS_DP_CTL register instead. That
1619 	 * configuration happens (oddly) in ironlake_pch_enable
1620 	 */
1621 
1622 	/* Preserve the BIOS-computed detected bit. This is
1623 	 * supposed to be read-only.
1624 	 */
1625 	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1626 
1627 	/* Handle DP bits in common between all three register formats */
1628 	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1629 	intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1630 
1631 	if (crtc->config->has_audio)
1632 		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1633 
1634 	/* Split out the IBX/CPU vs CPT settings */
1635 
1636 	if (IS_GEN7(dev) && port == PORT_A) {
1637 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1638 			intel_dp->DP |= DP_SYNC_HS_HIGH;
1639 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1640 			intel_dp->DP |= DP_SYNC_VS_HIGH;
1641 		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1642 
1643 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1644 			intel_dp->DP |= DP_ENHANCED_FRAMING;
1645 
1646 		intel_dp->DP |= crtc->pipe << 29;
1647 	} else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1648 		u32 trans_dp;
1649 
1650 		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1651 
1652 		trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1653 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1654 			trans_dp |= TRANS_DP_ENH_FRAMING;
1655 		else
1656 			trans_dp &= ~TRANS_DP_ENH_FRAMING;
1657 		I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1658 	} else {
1659 		if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1660 		    crtc->config->limited_color_range)
1661 			intel_dp->DP |= DP_COLOR_RANGE_16_235;
1662 
1663 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1664 			intel_dp->DP |= DP_SYNC_HS_HIGH;
1665 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1666 			intel_dp->DP |= DP_SYNC_VS_HIGH;
1667 		intel_dp->DP |= DP_LINK_TRAIN_OFF;
1668 
1669 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1670 			intel_dp->DP |= DP_ENHANCED_FRAMING;
1671 
1672 		if (IS_CHERRYVIEW(dev))
1673 			intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1674 		else if (crtc->pipe == PIPE_B)
1675 			intel_dp->DP |= DP_PIPEB_SELECT;
1676 	}
1677 }
1678 
1679 #define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1680 #define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1681 
1682 #define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1683 #define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
1684 
1685 #define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1686 #define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1687 
1688 static void wait_panel_status(struct intel_dp *intel_dp,
1689 				       u32 mask,
1690 				       u32 value)
1691 {
1692 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1693 	struct drm_i915_private *dev_priv = dev->dev_private;
1694 	u32 pp_stat_reg, pp_ctrl_reg;
1695 
1696 	lockdep_assert_held(&dev_priv->pps_mutex);
1697 
1698 	pp_stat_reg = _pp_stat_reg(intel_dp);
1699 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1700 
1701 	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1702 			mask, value,
1703 			I915_READ(pp_stat_reg),
1704 			I915_READ(pp_ctrl_reg));
1705 
1706 	if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1707 		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1708 				I915_READ(pp_stat_reg),
1709 				I915_READ(pp_ctrl_reg));
1710 	}
1711 
1712 	DRM_DEBUG_KMS("Wait complete\n");
1713 }
1714 
1715 static void wait_panel_on(struct intel_dp *intel_dp)
1716 {
1717 	DRM_DEBUG_KMS("Wait for panel power on\n");
1718 	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1719 }
1720 
1721 static void wait_panel_off(struct intel_dp *intel_dp)
1722 {
1723 	DRM_DEBUG_KMS("Wait for panel power off time\n");
1724 	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1725 }
1726 
1727 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1728 {
1729 	DRM_DEBUG_KMS("Wait for panel power cycle\n");
1730 
1731 	/* When we disable the VDD override bit last we have to do the manual
1732 	 * wait. */
1733 	wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1734 				       intel_dp->panel_power_cycle_delay);
1735 
1736 	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1737 }
1738 
1739 static void wait_backlight_on(struct intel_dp *intel_dp)
1740 {
1741 	wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1742 				       intel_dp->backlight_on_delay);
1743 }
1744 
1745 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1746 {
1747 	wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1748 				       intel_dp->backlight_off_delay);
1749 }
1750 
1751 /* Read the current pp_control value, unlocking the register if it
1752  * is locked
1753  */
1754 
1755 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1756 {
1757 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1758 	struct drm_i915_private *dev_priv = dev->dev_private;
1759 	u32 control;
1760 
1761 	lockdep_assert_held(&dev_priv->pps_mutex);
1762 
1763 	control = I915_READ(_pp_ctrl_reg(intel_dp));
1764 	if (!IS_BROXTON(dev)) {
1765 		control &= ~PANEL_UNLOCK_MASK;
1766 		control |= PANEL_UNLOCK_REGS;
1767 	}
1768 	return control;
1769 }
1770 
1771 /*
1772  * Must be paired with edp_panel_vdd_off().
1773  * Must hold pps_mutex around the whole on/off sequence.
1774  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1775  */
1776 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1777 {
1778 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1779 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1780 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1781 	struct drm_i915_private *dev_priv = dev->dev_private;
1782 	enum intel_display_power_domain power_domain;
1783 	u32 pp;
1784 	u32 pp_stat_reg, pp_ctrl_reg;
1785 	bool need_to_disable = !intel_dp->want_panel_vdd;
1786 
1787 	lockdep_assert_held(&dev_priv->pps_mutex);
1788 
1789 	if (!is_edp(intel_dp))
1790 		return false;
1791 
1792 	cancel_delayed_work(&intel_dp->panel_vdd_work);
1793 	intel_dp->want_panel_vdd = true;
1794 
1795 	if (edp_have_panel_vdd(intel_dp))
1796 		return need_to_disable;
1797 
1798 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
1799 	intel_display_power_get(dev_priv, power_domain);
1800 
1801 	DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1802 		      port_name(intel_dig_port->port));
1803 
1804 	if (!edp_have_panel_power(intel_dp))
1805 		wait_panel_power_cycle(intel_dp);
1806 
1807 	pp = ironlake_get_pp_control(intel_dp);
1808 	pp |= EDP_FORCE_VDD;
1809 
1810 	pp_stat_reg = _pp_stat_reg(intel_dp);
1811 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1812 
1813 	I915_WRITE(pp_ctrl_reg, pp);
1814 	POSTING_READ(pp_ctrl_reg);
1815 	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1816 			I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1817 	/*
1818 	 * If the panel wasn't on, delay before accessing aux channel
1819 	 */
1820 	if (!edp_have_panel_power(intel_dp)) {
1821 		DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1822 			      port_name(intel_dig_port->port));
1823 		msleep(intel_dp->panel_power_up_delay);
1824 	}
1825 
1826 	return need_to_disable;
1827 }
1828 
1829 /*
1830  * Must be paired with intel_edp_panel_vdd_off() or
1831  * intel_edp_panel_off().
1832  * Nested calls to these functions are not allowed since
1833  * we drop the lock. Caller must use some higher level
1834  * locking to prevent nested calls from other threads.
1835  */
1836 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1837 {
1838 	bool vdd;
1839 
1840 	if (!is_edp(intel_dp))
1841 		return;
1842 
1843 	pps_lock(intel_dp);
1844 	vdd = edp_panel_vdd_on(intel_dp);
1845 	pps_unlock(intel_dp);
1846 
1847 #ifdef __DragonFly__
1848 /* XXX: limit dmesg spam to 16 warnings instead of 137, where is the bug? */
1849 	if(!vdd)
1850 		DRM_ERROR_RATELIMITED("eDP port %c VDD already requested on\n",
1851 		    port_name(dp_to_dig_port(intel_dp)->port));
1852 #else
1853 	I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1854 	     port_name(dp_to_dig_port(intel_dp)->port));
1855 #endif
1856 }
1857 
1858 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1859 {
1860 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1861 	struct drm_i915_private *dev_priv = dev->dev_private;
1862 	struct intel_digital_port *intel_dig_port =
1863 		dp_to_dig_port(intel_dp);
1864 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1865 	enum intel_display_power_domain power_domain;
1866 	u32 pp;
1867 	u32 pp_stat_reg, pp_ctrl_reg;
1868 
1869 	lockdep_assert_held(&dev_priv->pps_mutex);
1870 
1871 	WARN_ON(intel_dp->want_panel_vdd);
1872 
1873 	if (!edp_have_panel_vdd(intel_dp))
1874 		return;
1875 
1876 	DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1877 		      port_name(intel_dig_port->port));
1878 
1879 	pp = ironlake_get_pp_control(intel_dp);
1880 	pp &= ~EDP_FORCE_VDD;
1881 
1882 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1883 	pp_stat_reg = _pp_stat_reg(intel_dp);
1884 
1885 	I915_WRITE(pp_ctrl_reg, pp);
1886 	POSTING_READ(pp_ctrl_reg);
1887 
1888 	/* Make sure sequencer is idle before allowing subsequent activity */
1889 	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1890 	I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1891 
1892 	if ((pp & POWER_TARGET_ON) == 0)
1893 		intel_dp->last_power_cycle = jiffies;
1894 
1895 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
1896 	intel_display_power_put(dev_priv, power_domain);
1897 }
1898 
1899 static void edp_panel_vdd_work(struct work_struct *__work)
1900 {
1901 	struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1902 						 struct intel_dp, panel_vdd_work);
1903 
1904 	pps_lock(intel_dp);
1905 	if (!intel_dp->want_panel_vdd)
1906 		edp_panel_vdd_off_sync(intel_dp);
1907 	pps_unlock(intel_dp);
1908 }
1909 
1910 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1911 {
1912 	unsigned long delay;
1913 
1914 	/*
1915 	 * Queue the timer to fire a long time from now (relative to the power
1916 	 * down delay) to keep the panel power up across a sequence of
1917 	 * operations.
1918 	 */
1919 	delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1920 	schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1921 }
1922 
1923 /*
1924  * Must be paired with edp_panel_vdd_on().
1925  * Must hold pps_mutex around the whole on/off sequence.
1926  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1927  */
1928 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1929 {
1930 	struct drm_i915_private *dev_priv =
1931 		intel_dp_to_dev(intel_dp)->dev_private;
1932 
1933 	lockdep_assert_held(&dev_priv->pps_mutex);
1934 
1935 	if (!is_edp(intel_dp))
1936 		return;
1937 
1938 	I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1939 	     port_name(dp_to_dig_port(intel_dp)->port));
1940 
1941 	intel_dp->want_panel_vdd = false;
1942 
1943 	if (sync)
1944 		edp_panel_vdd_off_sync(intel_dp);
1945 	else
1946 		edp_panel_vdd_schedule_off(intel_dp);
1947 }
1948 
1949 static void edp_panel_on(struct intel_dp *intel_dp)
1950 {
1951 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1952 	struct drm_i915_private *dev_priv = dev->dev_private;
1953 	u32 pp;
1954 	u32 pp_ctrl_reg;
1955 
1956 	lockdep_assert_held(&dev_priv->pps_mutex);
1957 
1958 	if (!is_edp(intel_dp))
1959 		return;
1960 
1961 	DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1962 		      port_name(dp_to_dig_port(intel_dp)->port));
1963 
1964 	if (WARN(edp_have_panel_power(intel_dp),
1965 		 "eDP port %c panel power already on\n",
1966 		 port_name(dp_to_dig_port(intel_dp)->port)))
1967 		return;
1968 
1969 	wait_panel_power_cycle(intel_dp);
1970 
1971 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1972 	pp = ironlake_get_pp_control(intel_dp);
1973 	if (IS_GEN5(dev)) {
1974 		/* ILK workaround: disable reset around power sequence */
1975 		pp &= ~PANEL_POWER_RESET;
1976 		I915_WRITE(pp_ctrl_reg, pp);
1977 		POSTING_READ(pp_ctrl_reg);
1978 	}
1979 
1980 	pp |= POWER_TARGET_ON;
1981 	if (!IS_GEN5(dev))
1982 		pp |= PANEL_POWER_RESET;
1983 
1984 	I915_WRITE(pp_ctrl_reg, pp);
1985 	POSTING_READ(pp_ctrl_reg);
1986 
1987 	wait_panel_on(intel_dp);
1988 	intel_dp->last_power_on = jiffies;
1989 
1990 	if (IS_GEN5(dev)) {
1991 		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1992 		I915_WRITE(pp_ctrl_reg, pp);
1993 		POSTING_READ(pp_ctrl_reg);
1994 	}
1995 }
1996 
1997 void intel_edp_panel_on(struct intel_dp *intel_dp)
1998 {
1999 	if (!is_edp(intel_dp))
2000 		return;
2001 
2002 	pps_lock(intel_dp);
2003 	edp_panel_on(intel_dp);
2004 	pps_unlock(intel_dp);
2005 }
2006 
2007 
2008 static void edp_panel_off(struct intel_dp *intel_dp)
2009 {
2010 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2011 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
2012 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2013 	struct drm_i915_private *dev_priv = dev->dev_private;
2014 	enum intel_display_power_domain power_domain;
2015 	u32 pp;
2016 	u32 pp_ctrl_reg;
2017 
2018 	lockdep_assert_held(&dev_priv->pps_mutex);
2019 
2020 	if (!is_edp(intel_dp))
2021 		return;
2022 
2023 	DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2024 		      port_name(dp_to_dig_port(intel_dp)->port));
2025 
2026 	WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2027 	     port_name(dp_to_dig_port(intel_dp)->port));
2028 
2029 	pp = ironlake_get_pp_control(intel_dp);
2030 	/* We need to switch off panel power _and_ force vdd, for otherwise some
2031 	 * panels get very unhappy and cease to work. */
2032 	pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2033 		EDP_BLC_ENABLE);
2034 
2035 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2036 
2037 	intel_dp->want_panel_vdd = false;
2038 
2039 	I915_WRITE(pp_ctrl_reg, pp);
2040 	POSTING_READ(pp_ctrl_reg);
2041 
2042 	intel_dp->last_power_cycle = jiffies;
2043 	wait_panel_off(intel_dp);
2044 
2045 	/* We got a reference when we enabled the VDD. */
2046 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
2047 	intel_display_power_put(dev_priv, power_domain);
2048 }
2049 
2050 void intel_edp_panel_off(struct intel_dp *intel_dp)
2051 {
2052 	if (!is_edp(intel_dp))
2053 		return;
2054 
2055 	pps_lock(intel_dp);
2056 	edp_panel_off(intel_dp);
2057 	pps_unlock(intel_dp);
2058 }
2059 
2060 /* Enable backlight in the panel power control. */
2061 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2062 {
2063 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2064 	struct drm_device *dev = intel_dig_port->base.base.dev;
2065 	struct drm_i915_private *dev_priv = dev->dev_private;
2066 	u32 pp;
2067 	u32 pp_ctrl_reg;
2068 
2069 	/*
2070 	 * If we enable the backlight right away following a panel power
2071 	 * on, we may see slight flicker as the panel syncs with the eDP
2072 	 * link.  So delay a bit to make sure the image is solid before
2073 	 * allowing it to appear.
2074 	 */
2075 	wait_backlight_on(intel_dp);
2076 
2077 	pps_lock(intel_dp);
2078 
2079 	pp = ironlake_get_pp_control(intel_dp);
2080 	pp |= EDP_BLC_ENABLE;
2081 
2082 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2083 
2084 	I915_WRITE(pp_ctrl_reg, pp);
2085 	POSTING_READ(pp_ctrl_reg);
2086 
2087 	pps_unlock(intel_dp);
2088 }
2089 
2090 /* Enable backlight PWM and backlight PP control. */
2091 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2092 {
2093 	if (!is_edp(intel_dp))
2094 		return;
2095 
2096 	DRM_DEBUG_KMS("\n");
2097 
2098 	intel_panel_enable_backlight(intel_dp->attached_connector);
2099 	_intel_edp_backlight_on(intel_dp);
2100 }
2101 
2102 /* Disable backlight in the panel power control. */
2103 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2104 {
2105 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2106 	struct drm_i915_private *dev_priv = dev->dev_private;
2107 	u32 pp;
2108 	u32 pp_ctrl_reg;
2109 
2110 	if (!is_edp(intel_dp))
2111 		return;
2112 
2113 	pps_lock(intel_dp);
2114 
2115 	pp = ironlake_get_pp_control(intel_dp);
2116 	pp &= ~EDP_BLC_ENABLE;
2117 
2118 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2119 
2120 	I915_WRITE(pp_ctrl_reg, pp);
2121 	POSTING_READ(pp_ctrl_reg);
2122 
2123 	pps_unlock(intel_dp);
2124 
2125 	intel_dp->last_backlight_off = jiffies;
2126 	edp_wait_backlight_off(intel_dp);
2127 }
2128 
2129 /* Disable backlight PP control and backlight PWM. */
2130 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2131 {
2132 	if (!is_edp(intel_dp))
2133 		return;
2134 
2135 	DRM_DEBUG_KMS("\n");
2136 
2137 	_intel_edp_backlight_off(intel_dp);
2138 	intel_panel_disable_backlight(intel_dp->attached_connector);
2139 }
2140 
2141 /*
2142  * Hook for controlling the panel power control backlight through the bl_power
2143  * sysfs attribute. Take care to handle multiple calls.
2144  */
2145 static void intel_edp_backlight_power(struct intel_connector *connector,
2146 				      bool enable)
2147 {
2148 	struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2149 	bool is_enabled;
2150 
2151 	pps_lock(intel_dp);
2152 	is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2153 	pps_unlock(intel_dp);
2154 
2155 	if (is_enabled == enable)
2156 		return;
2157 
2158 	DRM_DEBUG_KMS("panel power control backlight %s\n",
2159 		      enable ? "enable" : "disable");
2160 
2161 	if (enable)
2162 		_intel_edp_backlight_on(intel_dp);
2163 	else
2164 		_intel_edp_backlight_off(intel_dp);
2165 }
2166 
2167 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2168 {
2169 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2170 	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2171 	struct drm_device *dev = crtc->dev;
2172 	struct drm_i915_private *dev_priv = dev->dev_private;
2173 	u32 dpa_ctl;
2174 
2175 	assert_pipe_disabled(dev_priv,
2176 			     to_intel_crtc(crtc)->pipe);
2177 
2178 	DRM_DEBUG_KMS("\n");
2179 	dpa_ctl = I915_READ(DP_A);
2180 	WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2181 	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2182 
2183 	/* We don't adjust intel_dp->DP while tearing down the link, to
2184 	 * facilitate link retraining (e.g. after hotplug). Hence clear all
2185 	 * enable bits here to ensure that we don't enable too much. */
2186 	intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2187 	intel_dp->DP |= DP_PLL_ENABLE;
2188 	I915_WRITE(DP_A, intel_dp->DP);
2189 	POSTING_READ(DP_A);
2190 	udelay(200);
2191 }
2192 
2193 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2194 {
2195 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2196 	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2197 	struct drm_device *dev = crtc->dev;
2198 	struct drm_i915_private *dev_priv = dev->dev_private;
2199 	u32 dpa_ctl;
2200 
2201 	assert_pipe_disabled(dev_priv,
2202 			     to_intel_crtc(crtc)->pipe);
2203 
2204 	dpa_ctl = I915_READ(DP_A);
2205 	WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2206 	     "dp pll off, should be on\n");
2207 	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2208 
2209 	/* We can't rely on the value tracked for the DP register in
2210 	 * intel_dp->DP because link_down must not change that (otherwise link
2211 	 * re-training will fail. */
2212 	dpa_ctl &= ~DP_PLL_ENABLE;
2213 	I915_WRITE(DP_A, dpa_ctl);
2214 	POSTING_READ(DP_A);
2215 	udelay(200);
2216 }
2217 
2218 /* If the sink supports it, try to set the power state appropriately */
2219 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2220 {
2221 	int ret, i;
2222 
2223 	/* Should have a valid DPCD by this point */
2224 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2225 		return;
2226 
2227 	if (mode != DRM_MODE_DPMS_ON) {
2228 		ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2229 					 DP_SET_POWER_D3);
2230 	} else {
2231 		/*
2232 		 * When turning on, we need to retry for 1ms to give the sink
2233 		 * time to wake up.
2234 		 */
2235 		for (i = 0; i < 3; i++) {
2236 			ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2237 						 DP_SET_POWER_D0);
2238 			if (ret == 1)
2239 				break;
2240 			msleep(1);
2241 		}
2242 	}
2243 
2244 	if (ret != 1)
2245 		DRM_DEBUG_KMS("failed to %s sink power state\n",
2246 			      mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2247 }
2248 
2249 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2250 				  enum i915_pipe *pipe)
2251 {
2252 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2253 	enum port port = dp_to_dig_port(intel_dp)->port;
2254 	struct drm_device *dev = encoder->base.dev;
2255 	struct drm_i915_private *dev_priv = dev->dev_private;
2256 	enum intel_display_power_domain power_domain;
2257 	u32 tmp;
2258 
2259 	power_domain = intel_display_port_power_domain(encoder);
2260 	if (!intel_display_power_is_enabled(dev_priv, power_domain))
2261 		return false;
2262 
2263 	tmp = I915_READ(intel_dp->output_reg);
2264 
2265 	if (!(tmp & DP_PORT_EN))
2266 		return false;
2267 
2268 	if (IS_GEN7(dev) && port == PORT_A) {
2269 		*pipe = PORT_TO_PIPE_CPT(tmp);
2270 	} else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2271 		enum i915_pipe p;
2272 
2273 		for_each_pipe(dev_priv, p) {
2274 			u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2275 			if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2276 				*pipe = p;
2277 				return true;
2278 			}
2279 		}
2280 
2281 		DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2282 			      intel_dp->output_reg);
2283 	} else if (IS_CHERRYVIEW(dev)) {
2284 		*pipe = DP_PORT_TO_PIPE_CHV(tmp);
2285 	} else {
2286 		*pipe = PORT_TO_PIPE(tmp);
2287 	}
2288 
2289 	return true;
2290 }
2291 
2292 static void intel_dp_get_config(struct intel_encoder *encoder,
2293 				struct intel_crtc_state *pipe_config)
2294 {
2295 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2296 	u32 tmp, flags = 0;
2297 	struct drm_device *dev = encoder->base.dev;
2298 	struct drm_i915_private *dev_priv = dev->dev_private;
2299 	enum port port = dp_to_dig_port(intel_dp)->port;
2300 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2301 	int dotclock;
2302 
2303 	tmp = I915_READ(intel_dp->output_reg);
2304 
2305 	pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2306 
2307 	if (HAS_PCH_CPT(dev) && port != PORT_A) {
2308 		u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2309 
2310 		if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2311 			flags |= DRM_MODE_FLAG_PHSYNC;
2312 		else
2313 			flags |= DRM_MODE_FLAG_NHSYNC;
2314 
2315 		if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2316 			flags |= DRM_MODE_FLAG_PVSYNC;
2317 		else
2318 			flags |= DRM_MODE_FLAG_NVSYNC;
2319 	} else {
2320 		if (tmp & DP_SYNC_HS_HIGH)
2321 			flags |= DRM_MODE_FLAG_PHSYNC;
2322 		else
2323 			flags |= DRM_MODE_FLAG_NHSYNC;
2324 
2325 		if (tmp & DP_SYNC_VS_HIGH)
2326 			flags |= DRM_MODE_FLAG_PVSYNC;
2327 		else
2328 			flags |= DRM_MODE_FLAG_NVSYNC;
2329 	}
2330 
2331 	pipe_config->base.adjusted_mode.flags |= flags;
2332 
2333 	if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2334 	    tmp & DP_COLOR_RANGE_16_235)
2335 		pipe_config->limited_color_range = true;
2336 
2337 	pipe_config->has_dp_encoder = true;
2338 
2339 	pipe_config->lane_count =
2340 		((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2341 
2342 	intel_dp_get_m_n(crtc, pipe_config);
2343 
2344 	if (port == PORT_A) {
2345 		if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2346 			pipe_config->port_clock = 162000;
2347 		else
2348 			pipe_config->port_clock = 270000;
2349 	}
2350 
2351 	dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2352 					    &pipe_config->dp_m_n);
2353 
2354 	if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2355 		ironlake_check_encoder_dotclock(pipe_config, dotclock);
2356 
2357 	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2358 
2359 	if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2360 	    pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2361 		/*
2362 		 * This is a big fat ugly hack.
2363 		 *
2364 		 * Some machines in UEFI boot mode provide us a VBT that has 18
2365 		 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2366 		 * unknown we fail to light up. Yet the same BIOS boots up with
2367 		 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2368 		 * max, not what it tells us to use.
2369 		 *
2370 		 * Note: This will still be broken if the eDP panel is not lit
2371 		 * up by the BIOS, and thus we can't get the mode at module
2372 		 * load.
2373 		 */
2374 		DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2375 			      pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2376 		dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2377 	}
2378 }
2379 
2380 static void intel_disable_dp(struct intel_encoder *encoder)
2381 {
2382 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2383 	struct drm_device *dev = encoder->base.dev;
2384 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2385 
2386 	if (crtc->config->has_audio)
2387 		intel_audio_codec_disable(encoder);
2388 
2389 	if (HAS_PSR(dev) && !HAS_DDI(dev))
2390 		intel_psr_disable(intel_dp);
2391 
2392 	/* Make sure the panel is off before trying to change the mode. But also
2393 	 * ensure that we have vdd while we switch off the panel. */
2394 	intel_edp_panel_vdd_on(intel_dp);
2395 	intel_edp_backlight_off(intel_dp);
2396 	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2397 	intel_edp_panel_off(intel_dp);
2398 
2399 	/* disable the port before the pipe on g4x */
2400 	if (INTEL_INFO(dev)->gen < 5)
2401 		intel_dp_link_down(intel_dp);
2402 }
2403 
2404 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2405 {
2406 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2407 	enum port port = dp_to_dig_port(intel_dp)->port;
2408 
2409 	intel_dp_link_down(intel_dp);
2410 	if (port == PORT_A)
2411 		ironlake_edp_pll_off(intel_dp);
2412 }
2413 
2414 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2415 {
2416 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2417 
2418 	intel_dp_link_down(intel_dp);
2419 }
2420 
2421 static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2422 				     bool reset)
2423 {
2424 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2425 	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2426 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2427 	enum i915_pipe pipe = crtc->pipe;
2428 	uint32_t val;
2429 
2430 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2431 	if (reset)
2432 		val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2433 	else
2434 		val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2435 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2436 
2437 	if (crtc->config->lane_count > 2) {
2438 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2439 		if (reset)
2440 			val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2441 		else
2442 			val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2443 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2444 	}
2445 
2446 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2447 	val |= CHV_PCS_REQ_SOFTRESET_EN;
2448 	if (reset)
2449 		val &= ~DPIO_PCS_CLK_SOFT_RESET;
2450 	else
2451 		val |= DPIO_PCS_CLK_SOFT_RESET;
2452 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2453 
2454 	if (crtc->config->lane_count > 2) {
2455 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2456 		val |= CHV_PCS_REQ_SOFTRESET_EN;
2457 		if (reset)
2458 			val &= ~DPIO_PCS_CLK_SOFT_RESET;
2459 		else
2460 			val |= DPIO_PCS_CLK_SOFT_RESET;
2461 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2462 	}
2463 }
2464 
2465 static void chv_post_disable_dp(struct intel_encoder *encoder)
2466 {
2467 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2468 	struct drm_device *dev = encoder->base.dev;
2469 	struct drm_i915_private *dev_priv = dev->dev_private;
2470 
2471 	intel_dp_link_down(intel_dp);
2472 
2473 	mutex_lock(&dev_priv->sb_lock);
2474 
2475 	/* Assert data lane reset */
2476 	chv_data_lane_soft_reset(encoder, true);
2477 
2478 	mutex_unlock(&dev_priv->sb_lock);
2479 }
2480 
2481 static void
2482 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2483 			 uint32_t *DP,
2484 			 uint8_t dp_train_pat)
2485 {
2486 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2487 	struct drm_device *dev = intel_dig_port->base.base.dev;
2488 	struct drm_i915_private *dev_priv = dev->dev_private;
2489 	enum port port = intel_dig_port->port;
2490 
2491 	if (HAS_DDI(dev)) {
2492 		uint32_t temp = I915_READ(DP_TP_CTL(port));
2493 
2494 		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2495 			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2496 		else
2497 			temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2498 
2499 		temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2500 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2501 		case DP_TRAINING_PATTERN_DISABLE:
2502 			temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2503 
2504 			break;
2505 		case DP_TRAINING_PATTERN_1:
2506 			temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2507 			break;
2508 		case DP_TRAINING_PATTERN_2:
2509 			temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2510 			break;
2511 		case DP_TRAINING_PATTERN_3:
2512 			temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2513 			break;
2514 		}
2515 		I915_WRITE(DP_TP_CTL(port), temp);
2516 
2517 	} else if ((IS_GEN7(dev) && port == PORT_A) ||
2518 		   (HAS_PCH_CPT(dev) && port != PORT_A)) {
2519 		*DP &= ~DP_LINK_TRAIN_MASK_CPT;
2520 
2521 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2522 		case DP_TRAINING_PATTERN_DISABLE:
2523 			*DP |= DP_LINK_TRAIN_OFF_CPT;
2524 			break;
2525 		case DP_TRAINING_PATTERN_1:
2526 			*DP |= DP_LINK_TRAIN_PAT_1_CPT;
2527 			break;
2528 		case DP_TRAINING_PATTERN_2:
2529 			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2530 			break;
2531 		case DP_TRAINING_PATTERN_3:
2532 			DRM_ERROR("DP training pattern 3 not supported\n");
2533 			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2534 			break;
2535 		}
2536 
2537 	} else {
2538 		if (IS_CHERRYVIEW(dev))
2539 			*DP &= ~DP_LINK_TRAIN_MASK_CHV;
2540 		else
2541 			*DP &= ~DP_LINK_TRAIN_MASK;
2542 
2543 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2544 		case DP_TRAINING_PATTERN_DISABLE:
2545 			*DP |= DP_LINK_TRAIN_OFF;
2546 			break;
2547 		case DP_TRAINING_PATTERN_1:
2548 			*DP |= DP_LINK_TRAIN_PAT_1;
2549 			break;
2550 		case DP_TRAINING_PATTERN_2:
2551 			*DP |= DP_LINK_TRAIN_PAT_2;
2552 			break;
2553 		case DP_TRAINING_PATTERN_3:
2554 			if (IS_CHERRYVIEW(dev)) {
2555 				*DP |= DP_LINK_TRAIN_PAT_3_CHV;
2556 			} else {
2557 				DRM_ERROR("DP training pattern 3 not supported\n");
2558 				*DP |= DP_LINK_TRAIN_PAT_2;
2559 			}
2560 			break;
2561 		}
2562 	}
2563 }
2564 
2565 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2566 {
2567 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2568 	struct drm_i915_private *dev_priv = dev->dev_private;
2569 
2570 	/* enable with pattern 1 (as per spec) */
2571 	_intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2572 				 DP_TRAINING_PATTERN_1);
2573 
2574 	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2575 	POSTING_READ(intel_dp->output_reg);
2576 
2577 	/*
2578 	 * Magic for VLV/CHV. We _must_ first set up the register
2579 	 * without actually enabling the port, and then do another
2580 	 * write to enable the port. Otherwise link training will
2581 	 * fail when the power sequencer is freshly used for this port.
2582 	 */
2583 	intel_dp->DP |= DP_PORT_EN;
2584 
2585 	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2586 	POSTING_READ(intel_dp->output_reg);
2587 }
2588 
2589 static void intel_enable_dp(struct intel_encoder *encoder)
2590 {
2591 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2592 	struct drm_device *dev = encoder->base.dev;
2593 	struct drm_i915_private *dev_priv = dev->dev_private;
2594 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2595 	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2596 
2597 	if (WARN_ON(dp_reg & DP_PORT_EN))
2598 		return;
2599 
2600 	pps_lock(intel_dp);
2601 
2602 	if (IS_VALLEYVIEW(dev))
2603 		vlv_init_panel_power_sequencer(intel_dp);
2604 
2605 	intel_dp_enable_port(intel_dp);
2606 
2607 	edp_panel_vdd_on(intel_dp);
2608 	edp_panel_on(intel_dp);
2609 	edp_panel_vdd_off(intel_dp, true);
2610 
2611 	pps_unlock(intel_dp);
2612 
2613 	if (IS_VALLEYVIEW(dev)) {
2614 		unsigned int lane_mask = 0x0;
2615 
2616 		if (IS_CHERRYVIEW(dev))
2617 			lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2618 
2619 		vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2620 				    lane_mask);
2621 	}
2622 
2623 	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2624 	intel_dp_start_link_train(intel_dp);
2625 	intel_dp_stop_link_train(intel_dp);
2626 
2627 	if (crtc->config->has_audio) {
2628 		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2629 				 pipe_name(crtc->pipe));
2630 		intel_audio_codec_enable(encoder);
2631 	}
2632 }
2633 
2634 static void g4x_enable_dp(struct intel_encoder *encoder)
2635 {
2636 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2637 
2638 	intel_enable_dp(encoder);
2639 	intel_edp_backlight_on(intel_dp);
2640 }
2641 
2642 static void vlv_enable_dp(struct intel_encoder *encoder)
2643 {
2644 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2645 
2646 	intel_edp_backlight_on(intel_dp);
2647 	intel_psr_enable(intel_dp);
2648 }
2649 
2650 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2651 {
2652 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2653 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2654 
2655 	intel_dp_prepare(encoder);
2656 
2657 	/* Only ilk+ has port A */
2658 	if (dport->port == PORT_A) {
2659 		ironlake_set_pll_cpu_edp(intel_dp);
2660 		ironlake_edp_pll_on(intel_dp);
2661 	}
2662 }
2663 
2664 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2665 {
2666 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2667 	struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2668 	enum i915_pipe pipe = intel_dp->pps_pipe;
2669 	int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2670 
2671 	edp_panel_vdd_off_sync(intel_dp);
2672 
2673 	/*
2674 	 * VLV seems to get confused when multiple power seqeuencers
2675 	 * have the same port selected (even if only one has power/vdd
2676 	 * enabled). The failure manifests as vlv_wait_port_ready() failing
2677 	 * CHV on the other hand doesn't seem to mind having the same port
2678 	 * selected in multiple power seqeuencers, but let's clear the
2679 	 * port select always when logically disconnecting a power sequencer
2680 	 * from a port.
2681 	 */
2682 	DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2683 		      pipe_name(pipe), port_name(intel_dig_port->port));
2684 	I915_WRITE(pp_on_reg, 0);
2685 	POSTING_READ(pp_on_reg);
2686 
2687 	intel_dp->pps_pipe = INVALID_PIPE;
2688 }
2689 
2690 static void vlv_steal_power_sequencer(struct drm_device *dev,
2691 				      enum i915_pipe pipe)
2692 {
2693 	struct drm_i915_private *dev_priv = dev->dev_private;
2694 	struct intel_encoder *encoder;
2695 
2696 	lockdep_assert_held(&dev_priv->pps_mutex);
2697 
2698 	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2699 		return;
2700 
2701 	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2702 			    base.head) {
2703 		struct intel_dp *intel_dp;
2704 		enum port port;
2705 
2706 		if (encoder->type != INTEL_OUTPUT_EDP)
2707 			continue;
2708 
2709 		intel_dp = enc_to_intel_dp(&encoder->base);
2710 		port = dp_to_dig_port(intel_dp)->port;
2711 
2712 		if (intel_dp->pps_pipe != pipe)
2713 			continue;
2714 
2715 		DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2716 			      pipe_name(pipe), port_name(port));
2717 
2718 		WARN(encoder->base.crtc,
2719 		     "stealing pipe %c power sequencer from active eDP port %c\n",
2720 		     pipe_name(pipe), port_name(port));
2721 
2722 		/* make sure vdd is off before we steal it */
2723 		vlv_detach_power_sequencer(intel_dp);
2724 	}
2725 }
2726 
2727 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2728 {
2729 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2730 	struct intel_encoder *encoder = &intel_dig_port->base;
2731 	struct drm_device *dev = encoder->base.dev;
2732 	struct drm_i915_private *dev_priv = dev->dev_private;
2733 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2734 
2735 	lockdep_assert_held(&dev_priv->pps_mutex);
2736 
2737 	if (!is_edp(intel_dp))
2738 		return;
2739 
2740 	if (intel_dp->pps_pipe == crtc->pipe)
2741 		return;
2742 
2743 	/*
2744 	 * If another power sequencer was being used on this
2745 	 * port previously make sure to turn off vdd there while
2746 	 * we still have control of it.
2747 	 */
2748 	if (intel_dp->pps_pipe != INVALID_PIPE)
2749 		vlv_detach_power_sequencer(intel_dp);
2750 
2751 	/*
2752 	 * We may be stealing the power
2753 	 * sequencer from another port.
2754 	 */
2755 	vlv_steal_power_sequencer(dev, crtc->pipe);
2756 
2757 	/* now it's all ours */
2758 	intel_dp->pps_pipe = crtc->pipe;
2759 
2760 	DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2761 		      pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2762 
2763 	/* init power sequencer on this pipe and port */
2764 	intel_dp_init_panel_power_sequencer(dev, intel_dp);
2765 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2766 }
2767 
2768 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2769 {
2770 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2771 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2772 	struct drm_device *dev = encoder->base.dev;
2773 	struct drm_i915_private *dev_priv = dev->dev_private;
2774 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2775 	enum dpio_channel port = vlv_dport_to_channel(dport);
2776 	int pipe = intel_crtc->pipe;
2777 	u32 val;
2778 
2779 	mutex_lock(&dev_priv->sb_lock);
2780 
2781 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2782 	val = 0;
2783 	if (pipe)
2784 		val |= (1<<21);
2785 	else
2786 		val &= ~(1<<21);
2787 	val |= 0x001000c4;
2788 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2789 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2790 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2791 
2792 	mutex_unlock(&dev_priv->sb_lock);
2793 
2794 	intel_enable_dp(encoder);
2795 }
2796 
2797 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2798 {
2799 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2800 	struct drm_device *dev = encoder->base.dev;
2801 	struct drm_i915_private *dev_priv = dev->dev_private;
2802 	struct intel_crtc *intel_crtc =
2803 		to_intel_crtc(encoder->base.crtc);
2804 	enum dpio_channel port = vlv_dport_to_channel(dport);
2805 	int pipe = intel_crtc->pipe;
2806 
2807 	intel_dp_prepare(encoder);
2808 
2809 	/* Program Tx lane resets to default */
2810 	mutex_lock(&dev_priv->sb_lock);
2811 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2812 			 DPIO_PCS_TX_LANE2_RESET |
2813 			 DPIO_PCS_TX_LANE1_RESET);
2814 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2815 			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2816 			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2817 			 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2818 				 DPIO_PCS_CLK_SOFT_RESET);
2819 
2820 	/* Fix up inter-pair skew failure */
2821 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2822 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2823 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2824 	mutex_unlock(&dev_priv->sb_lock);
2825 }
2826 
2827 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2828 {
2829 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2830 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2831 	struct drm_device *dev = encoder->base.dev;
2832 	struct drm_i915_private *dev_priv = dev->dev_private;
2833 	struct intel_crtc *intel_crtc =
2834 		to_intel_crtc(encoder->base.crtc);
2835 	enum dpio_channel ch = vlv_dport_to_channel(dport);
2836 	int pipe = intel_crtc->pipe;
2837 	int data, i, stagger;
2838 	u32 val;
2839 
2840 	mutex_lock(&dev_priv->sb_lock);
2841 
2842 	/* allow hardware to manage TX FIFO reset source */
2843 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2844 	val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2845 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2846 
2847 	if (intel_crtc->config->lane_count > 2) {
2848 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2849 		val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2850 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2851 	}
2852 
2853 	/* Program Tx lane latency optimal setting*/
2854 	for (i = 0; i < intel_crtc->config->lane_count; i++) {
2855 		/* Set the upar bit */
2856 		if (intel_crtc->config->lane_count == 1)
2857 			data = 0x0;
2858 		else
2859 			data = (i == 1) ? 0x0 : 0x1;
2860 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2861 				data << DPIO_UPAR_SHIFT);
2862 	}
2863 
2864 	/* Data lane stagger programming */
2865 	if (intel_crtc->config->port_clock > 270000)
2866 		stagger = 0x18;
2867 	else if (intel_crtc->config->port_clock > 135000)
2868 		stagger = 0xd;
2869 	else if (intel_crtc->config->port_clock > 67500)
2870 		stagger = 0x7;
2871 	else if (intel_crtc->config->port_clock > 33750)
2872 		stagger = 0x4;
2873 	else
2874 		stagger = 0x2;
2875 
2876 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2877 	val |= DPIO_TX2_STAGGER_MASK(0x1f);
2878 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2879 
2880 	if (intel_crtc->config->lane_count > 2) {
2881 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2882 		val |= DPIO_TX2_STAGGER_MASK(0x1f);
2883 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2884 	}
2885 
2886 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2887 		       DPIO_LANESTAGGER_STRAP(stagger) |
2888 		       DPIO_LANESTAGGER_STRAP_OVRD |
2889 		       DPIO_TX1_STAGGER_MASK(0x1f) |
2890 		       DPIO_TX1_STAGGER_MULT(6) |
2891 		       DPIO_TX2_STAGGER_MULT(0));
2892 
2893 	if (intel_crtc->config->lane_count > 2) {
2894 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2895 			       DPIO_LANESTAGGER_STRAP(stagger) |
2896 			       DPIO_LANESTAGGER_STRAP_OVRD |
2897 			       DPIO_TX1_STAGGER_MASK(0x1f) |
2898 			       DPIO_TX1_STAGGER_MULT(7) |
2899 			       DPIO_TX2_STAGGER_MULT(5));
2900 	}
2901 
2902 	/* Deassert data lane reset */
2903 	chv_data_lane_soft_reset(encoder, false);
2904 
2905 	mutex_unlock(&dev_priv->sb_lock);
2906 
2907 	intel_enable_dp(encoder);
2908 
2909 	/* Second common lane will stay alive on its own now */
2910 	if (dport->release_cl2_override) {
2911 		chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2912 		dport->release_cl2_override = false;
2913 	}
2914 }
2915 
2916 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2917 {
2918 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2919 	struct drm_device *dev = encoder->base.dev;
2920 	struct drm_i915_private *dev_priv = dev->dev_private;
2921 	struct intel_crtc *intel_crtc =
2922 		to_intel_crtc(encoder->base.crtc);
2923 	enum dpio_channel ch = vlv_dport_to_channel(dport);
2924 	enum i915_pipe pipe = intel_crtc->pipe;
2925 	unsigned int lane_mask =
2926 		intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
2927 	u32 val;
2928 
2929 	intel_dp_prepare(encoder);
2930 
2931 	/*
2932 	 * Must trick the second common lane into life.
2933 	 * Otherwise we can't even access the PLL.
2934 	 */
2935 	if (ch == DPIO_CH0 && pipe == PIPE_B)
2936 		dport->release_cl2_override =
2937 			!chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
2938 
2939 	chv_phy_powergate_lanes(encoder, true, lane_mask);
2940 
2941 	mutex_lock(&dev_priv->sb_lock);
2942 
2943 	/* Assert data lane reset */
2944 	chv_data_lane_soft_reset(encoder, true);
2945 
2946 	/* program left/right clock distribution */
2947 	if (pipe != PIPE_B) {
2948 		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2949 		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2950 		if (ch == DPIO_CH0)
2951 			val |= CHV_BUFLEFTENA1_FORCE;
2952 		if (ch == DPIO_CH1)
2953 			val |= CHV_BUFRIGHTENA1_FORCE;
2954 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2955 	} else {
2956 		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2957 		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2958 		if (ch == DPIO_CH0)
2959 			val |= CHV_BUFLEFTENA2_FORCE;
2960 		if (ch == DPIO_CH1)
2961 			val |= CHV_BUFRIGHTENA2_FORCE;
2962 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2963 	}
2964 
2965 	/* program clock channel usage */
2966 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2967 	val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2968 	if (pipe != PIPE_B)
2969 		val &= ~CHV_PCS_USEDCLKCHANNEL;
2970 	else
2971 		val |= CHV_PCS_USEDCLKCHANNEL;
2972 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2973 
2974 	if (intel_crtc->config->lane_count > 2) {
2975 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2976 		val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2977 		if (pipe != PIPE_B)
2978 			val &= ~CHV_PCS_USEDCLKCHANNEL;
2979 		else
2980 			val |= CHV_PCS_USEDCLKCHANNEL;
2981 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2982 	}
2983 
2984 	/*
2985 	 * This a a bit weird since generally CL
2986 	 * matches the pipe, but here we need to
2987 	 * pick the CL based on the port.
2988 	 */
2989 	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2990 	if (pipe != PIPE_B)
2991 		val &= ~CHV_CMN_USEDCLKCHANNEL;
2992 	else
2993 		val |= CHV_CMN_USEDCLKCHANNEL;
2994 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2995 
2996 	mutex_unlock(&dev_priv->sb_lock);
2997 }
2998 
2999 static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3000 {
3001 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3002 	enum i915_pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3003 	u32 val;
3004 
3005 	mutex_lock(&dev_priv->sb_lock);
3006 
3007 	/* disable left/right clock distribution */
3008 	if (pipe != PIPE_B) {
3009 		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3010 		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3011 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3012 	} else {
3013 		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3014 		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3015 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3016 	}
3017 
3018 	mutex_unlock(&dev_priv->sb_lock);
3019 
3020 	/*
3021 	 * Leave the power down bit cleared for at least one
3022 	 * lane so that chv_powergate_phy_ch() will power
3023 	 * on something when the channel is otherwise unused.
3024 	 * When the port is off and the override is removed
3025 	 * the lanes power down anyway, so otherwise it doesn't
3026 	 * really matter what the state of power down bits is
3027 	 * after this.
3028 	 */
3029 	chv_phy_powergate_lanes(encoder, false, 0x0);
3030 }
3031 
3032 /*
3033  * Native read with retry for link status and receiver capability reads for
3034  * cases where the sink may still be asleep.
3035  *
3036  * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3037  * supposed to retry 3 times per the spec.
3038  */
3039 static ssize_t
3040 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3041 			void *buffer, size_t size)
3042 {
3043 	ssize_t ret;
3044 	int i;
3045 
3046 	/*
3047 	 * Sometime we just get the same incorrect byte repeated
3048 	 * over the entire buffer. Doing just one throw away read
3049 	 * initially seems to "solve" it.
3050 	 */
3051 	drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3052 
3053 	for (i = 0; i < 3; i++) {
3054 		ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3055 		if (ret == size)
3056 			return ret;
3057 		msleep(1);
3058 	}
3059 
3060 	return ret;
3061 }
3062 
3063 /*
3064  * Fetch AUX CH registers 0x202 - 0x207 which contain
3065  * link status information
3066  */
3067 static bool
3068 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3069 {
3070 	return intel_dp_dpcd_read_wake(&intel_dp->aux,
3071 				       DP_LANE0_1_STATUS,
3072 				       link_status,
3073 				       DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3074 }
3075 
3076 /* These are source-specific values. */
3077 static uint8_t
3078 intel_dp_voltage_max(struct intel_dp *intel_dp)
3079 {
3080 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3081 	struct drm_i915_private *dev_priv = dev->dev_private;
3082 	enum port port = dp_to_dig_port(intel_dp)->port;
3083 
3084 	if (IS_BROXTON(dev))
3085 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3086 	else if (INTEL_INFO(dev)->gen >= 9) {
3087 		if (dev_priv->edp_low_vswing && port == PORT_A)
3088 			return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3089 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3090 	} else if (IS_VALLEYVIEW(dev))
3091 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3092 	else if (IS_GEN7(dev) && port == PORT_A)
3093 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3094 	else if (HAS_PCH_CPT(dev) && port != PORT_A)
3095 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3096 	else
3097 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3098 }
3099 
3100 static uint8_t
3101 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3102 {
3103 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3104 	enum port port = dp_to_dig_port(intel_dp)->port;
3105 
3106 	if (INTEL_INFO(dev)->gen >= 9) {
3107 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3108 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3109 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3110 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3111 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3112 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3113 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3114 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3115 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3116 		default:
3117 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3118 		}
3119 	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3120 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3121 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3122 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3123 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3124 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3125 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3126 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3127 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3128 		default:
3129 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3130 		}
3131 	} else if (IS_VALLEYVIEW(dev)) {
3132 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3133 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3134 			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3135 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3136 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3137 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3138 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3139 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3140 		default:
3141 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3142 		}
3143 	} else if (IS_GEN7(dev) && port == PORT_A) {
3144 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3145 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3146 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3147 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3148 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3149 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3150 		default:
3151 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3152 		}
3153 	} else {
3154 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3155 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3156 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3157 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3158 			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3159 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3160 			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3161 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3162 		default:
3163 			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3164 		}
3165 	}
3166 }
3167 
3168 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3169 {
3170 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3171 	struct drm_i915_private *dev_priv = dev->dev_private;
3172 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3173 	struct intel_crtc *intel_crtc =
3174 		to_intel_crtc(dport->base.base.crtc);
3175 	unsigned long demph_reg_value, preemph_reg_value,
3176 		uniqtranscale_reg_value;
3177 	uint8_t train_set = intel_dp->train_set[0];
3178 	enum dpio_channel port = vlv_dport_to_channel(dport);
3179 	int pipe = intel_crtc->pipe;
3180 
3181 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3182 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3183 		preemph_reg_value = 0x0004000;
3184 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3185 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3186 			demph_reg_value = 0x2B405555;
3187 			uniqtranscale_reg_value = 0x552AB83A;
3188 			break;
3189 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3190 			demph_reg_value = 0x2B404040;
3191 			uniqtranscale_reg_value = 0x5548B83A;
3192 			break;
3193 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3194 			demph_reg_value = 0x2B245555;
3195 			uniqtranscale_reg_value = 0x5560B83A;
3196 			break;
3197 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3198 			demph_reg_value = 0x2B405555;
3199 			uniqtranscale_reg_value = 0x5598DA3A;
3200 			break;
3201 		default:
3202 			return 0;
3203 		}
3204 		break;
3205 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3206 		preemph_reg_value = 0x0002000;
3207 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3208 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3209 			demph_reg_value = 0x2B404040;
3210 			uniqtranscale_reg_value = 0x5552B83A;
3211 			break;
3212 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3213 			demph_reg_value = 0x2B404848;
3214 			uniqtranscale_reg_value = 0x5580B83A;
3215 			break;
3216 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3217 			demph_reg_value = 0x2B404040;
3218 			uniqtranscale_reg_value = 0x55ADDA3A;
3219 			break;
3220 		default:
3221 			return 0;
3222 		}
3223 		break;
3224 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3225 		preemph_reg_value = 0x0000000;
3226 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3227 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3228 			demph_reg_value = 0x2B305555;
3229 			uniqtranscale_reg_value = 0x5570B83A;
3230 			break;
3231 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3232 			demph_reg_value = 0x2B2B4040;
3233 			uniqtranscale_reg_value = 0x55ADDA3A;
3234 			break;
3235 		default:
3236 			return 0;
3237 		}
3238 		break;
3239 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3240 		preemph_reg_value = 0x0006000;
3241 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3242 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3243 			demph_reg_value = 0x1B405555;
3244 			uniqtranscale_reg_value = 0x55ADDA3A;
3245 			break;
3246 		default:
3247 			return 0;
3248 		}
3249 		break;
3250 	default:
3251 		return 0;
3252 	}
3253 
3254 	mutex_lock(&dev_priv->sb_lock);
3255 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3256 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3257 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3258 			 uniqtranscale_reg_value);
3259 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3260 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3261 	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3262 	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3263 	mutex_unlock(&dev_priv->sb_lock);
3264 
3265 	return 0;
3266 }
3267 
3268 static bool chv_need_uniq_trans_scale(uint8_t train_set)
3269 {
3270 	return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3271 		(train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3272 }
3273 
3274 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3275 {
3276 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3277 	struct drm_i915_private *dev_priv = dev->dev_private;
3278 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3279 	struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3280 	u32 deemph_reg_value, margin_reg_value, val;
3281 	uint8_t train_set = intel_dp->train_set[0];
3282 	enum dpio_channel ch = vlv_dport_to_channel(dport);
3283 	enum i915_pipe pipe = intel_crtc->pipe;
3284 	int i;
3285 
3286 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3287 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3288 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3289 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3290 			deemph_reg_value = 128;
3291 			margin_reg_value = 52;
3292 			break;
3293 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3294 			deemph_reg_value = 128;
3295 			margin_reg_value = 77;
3296 			break;
3297 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3298 			deemph_reg_value = 128;
3299 			margin_reg_value = 102;
3300 			break;
3301 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3302 			deemph_reg_value = 128;
3303 			margin_reg_value = 154;
3304 			/* FIXME extra to set for 1200 */
3305 			break;
3306 		default:
3307 			return 0;
3308 		}
3309 		break;
3310 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3311 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3312 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3313 			deemph_reg_value = 85;
3314 			margin_reg_value = 78;
3315 			break;
3316 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3317 			deemph_reg_value = 85;
3318 			margin_reg_value = 116;
3319 			break;
3320 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3321 			deemph_reg_value = 85;
3322 			margin_reg_value = 154;
3323 			break;
3324 		default:
3325 			return 0;
3326 		}
3327 		break;
3328 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3329 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3330 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3331 			deemph_reg_value = 64;
3332 			margin_reg_value = 104;
3333 			break;
3334 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3335 			deemph_reg_value = 64;
3336 			margin_reg_value = 154;
3337 			break;
3338 		default:
3339 			return 0;
3340 		}
3341 		break;
3342 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3343 		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3344 		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3345 			deemph_reg_value = 43;
3346 			margin_reg_value = 154;
3347 			break;
3348 		default:
3349 			return 0;
3350 		}
3351 		break;
3352 	default:
3353 		return 0;
3354 	}
3355 
3356 	mutex_lock(&dev_priv->sb_lock);
3357 
3358 	/* Clear calc init */
3359 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3360 	val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3361 	val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3362 	val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3363 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3364 
3365 	if (intel_crtc->config->lane_count > 2) {
3366 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3367 		val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3368 		val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3369 		val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3370 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3371 	}
3372 
3373 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3374 	val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3375 	val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3376 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3377 
3378 	if (intel_crtc->config->lane_count > 2) {
3379 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3380 		val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3381 		val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3382 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3383 	}
3384 
3385 	/* Program swing deemph */
3386 	for (i = 0; i < intel_crtc->config->lane_count; i++) {
3387 		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3388 		val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3389 		val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3390 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3391 	}
3392 
3393 	/* Program swing margin */
3394 	for (i = 0; i < intel_crtc->config->lane_count; i++) {
3395 		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3396 
3397 		val &= ~DPIO_SWING_MARGIN000_MASK;
3398 		val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3399 
3400 		/*
3401 		 * Supposedly this value shouldn't matter when unique transition
3402 		 * scale is disabled, but in fact it does matter. Let's just
3403 		 * always program the same value and hope it's OK.
3404 		 */
3405 		val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3406 		val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3407 
3408 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3409 	}
3410 
3411 	/*
3412 	 * The document said it needs to set bit 27 for ch0 and bit 26
3413 	 * for ch1. Might be a typo in the doc.
3414 	 * For now, for this unique transition scale selection, set bit
3415 	 * 27 for ch0 and ch1.
3416 	 */
3417 	for (i = 0; i < intel_crtc->config->lane_count; i++) {
3418 		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3419 		if (chv_need_uniq_trans_scale(train_set))
3420 			val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3421 		else
3422 			val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3423 		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3424 	}
3425 
3426 	/* Start swing calculation */
3427 	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3428 	val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3429 	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3430 
3431 	if (intel_crtc->config->lane_count > 2) {
3432 		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3433 		val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3434 		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3435 	}
3436 
3437 	mutex_unlock(&dev_priv->sb_lock);
3438 
3439 	return 0;
3440 }
3441 
3442 static void
3443 intel_get_adjust_train(struct intel_dp *intel_dp,
3444 		       const uint8_t link_status[DP_LINK_STATUS_SIZE])
3445 {
3446 	uint8_t v = 0;
3447 	uint8_t p = 0;
3448 	int lane;
3449 	uint8_t voltage_max;
3450 	uint8_t preemph_max;
3451 
3452 	for (lane = 0; lane < intel_dp->lane_count; lane++) {
3453 		uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3454 		uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3455 
3456 		if (this_v > v)
3457 			v = this_v;
3458 		if (this_p > p)
3459 			p = this_p;
3460 	}
3461 
3462 	voltage_max = intel_dp_voltage_max(intel_dp);
3463 	if (v >= voltage_max)
3464 		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3465 
3466 	preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3467 	if (p >= preemph_max)
3468 		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3469 
3470 	for (lane = 0; lane < 4; lane++)
3471 		intel_dp->train_set[lane] = v | p;
3472 }
3473 
3474 static uint32_t
3475 gen4_signal_levels(uint8_t train_set)
3476 {
3477 	uint32_t	signal_levels = 0;
3478 
3479 	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3480 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3481 	default:
3482 		signal_levels |= DP_VOLTAGE_0_4;
3483 		break;
3484 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3485 		signal_levels |= DP_VOLTAGE_0_6;
3486 		break;
3487 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3488 		signal_levels |= DP_VOLTAGE_0_8;
3489 		break;
3490 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3491 		signal_levels |= DP_VOLTAGE_1_2;
3492 		break;
3493 	}
3494 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3495 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3496 	default:
3497 		signal_levels |= DP_PRE_EMPHASIS_0;
3498 		break;
3499 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3500 		signal_levels |= DP_PRE_EMPHASIS_3_5;
3501 		break;
3502 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3503 		signal_levels |= DP_PRE_EMPHASIS_6;
3504 		break;
3505 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3506 		signal_levels |= DP_PRE_EMPHASIS_9_5;
3507 		break;
3508 	}
3509 	return signal_levels;
3510 }
3511 
3512 /* Gen6's DP voltage swing and pre-emphasis control */
3513 static uint32_t
3514 gen6_edp_signal_levels(uint8_t train_set)
3515 {
3516 	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3517 					 DP_TRAIN_PRE_EMPHASIS_MASK);
3518 	switch (signal_levels) {
3519 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3520 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3521 		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3522 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3523 		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3524 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3525 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3526 		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3527 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3528 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3529 		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3530 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3531 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3532 		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3533 	default:
3534 		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3535 			      "0x%x\n", signal_levels);
3536 		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3537 	}
3538 }
3539 
3540 /* Gen7's DP voltage swing and pre-emphasis control */
3541 static uint32_t
3542 gen7_edp_signal_levels(uint8_t train_set)
3543 {
3544 	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3545 					 DP_TRAIN_PRE_EMPHASIS_MASK);
3546 	switch (signal_levels) {
3547 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3548 		return EDP_LINK_TRAIN_400MV_0DB_IVB;
3549 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3550 		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3551 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3552 		return EDP_LINK_TRAIN_400MV_6DB_IVB;
3553 
3554 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3555 		return EDP_LINK_TRAIN_600MV_0DB_IVB;
3556 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3557 		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3558 
3559 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3560 		return EDP_LINK_TRAIN_800MV_0DB_IVB;
3561 	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3562 		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3563 
3564 	default:
3565 		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3566 			      "0x%x\n", signal_levels);
3567 		return EDP_LINK_TRAIN_500MV_0DB_IVB;
3568 	}
3569 }
3570 
3571 /* Properly updates "DP" with the correct signal levels. */
3572 static void
3573 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3574 {
3575 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3576 	enum port port = intel_dig_port->port;
3577 	struct drm_device *dev = intel_dig_port->base.base.dev;
3578 	uint32_t signal_levels, mask = 0;
3579 	uint8_t train_set = intel_dp->train_set[0];
3580 
3581 	if (HAS_DDI(dev)) {
3582 		signal_levels = ddi_signal_levels(intel_dp);
3583 
3584 		if (IS_BROXTON(dev))
3585 			signal_levels = 0;
3586 		else
3587 			mask = DDI_BUF_EMP_MASK;
3588 	} else if (IS_CHERRYVIEW(dev)) {
3589 		signal_levels = chv_signal_levels(intel_dp);
3590 	} else if (IS_VALLEYVIEW(dev)) {
3591 		signal_levels = vlv_signal_levels(intel_dp);
3592 	} else if (IS_GEN7(dev) && port == PORT_A) {
3593 		signal_levels = gen7_edp_signal_levels(train_set);
3594 		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3595 	} else if (IS_GEN6(dev) && port == PORT_A) {
3596 		signal_levels = gen6_edp_signal_levels(train_set);
3597 		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3598 	} else {
3599 		signal_levels = gen4_signal_levels(train_set);
3600 		mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3601 	}
3602 
3603 	if (mask)
3604 		DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3605 
3606 	DRM_DEBUG_KMS("Using vswing level %d\n",
3607 		train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3608 	DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3609 		(train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3610 			DP_TRAIN_PRE_EMPHASIS_SHIFT);
3611 
3612 	*DP = (*DP & ~mask) | signal_levels;
3613 }
3614 
3615 static bool
3616 intel_dp_set_link_train(struct intel_dp *intel_dp,
3617 			uint32_t *DP,
3618 			uint8_t dp_train_pat)
3619 {
3620 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3621 	struct drm_i915_private *dev_priv =
3622 		to_i915(intel_dig_port->base.base.dev);
3623 	uint8_t buf[sizeof(intel_dp->train_set) + 1];
3624 	int ret, len;
3625 
3626 	_intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3627 
3628 	I915_WRITE(intel_dp->output_reg, *DP);
3629 	POSTING_READ(intel_dp->output_reg);
3630 
3631 	buf[0] = dp_train_pat;
3632 	if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3633 	    DP_TRAINING_PATTERN_DISABLE) {
3634 		/* don't write DP_TRAINING_LANEx_SET on disable */
3635 		len = 1;
3636 	} else {
3637 		/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3638 		memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3639 		len = intel_dp->lane_count + 1;
3640 	}
3641 
3642 	ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3643 				buf, len);
3644 
3645 	return ret == len;
3646 }
3647 
3648 static bool
3649 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3650 			uint8_t dp_train_pat)
3651 {
3652 	if (!intel_dp->train_set_valid)
3653 		memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3654 	intel_dp_set_signal_levels(intel_dp, DP);
3655 	return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3656 }
3657 
3658 static bool
3659 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3660 			   const uint8_t link_status[DP_LINK_STATUS_SIZE])
3661 {
3662 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3663 	struct drm_i915_private *dev_priv =
3664 		to_i915(intel_dig_port->base.base.dev);
3665 	int ret;
3666 
3667 	intel_get_adjust_train(intel_dp, link_status);
3668 	intel_dp_set_signal_levels(intel_dp, DP);
3669 
3670 	I915_WRITE(intel_dp->output_reg, *DP);
3671 	POSTING_READ(intel_dp->output_reg);
3672 
3673 	ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3674 				intel_dp->train_set, intel_dp->lane_count);
3675 
3676 	return ret == intel_dp->lane_count;
3677 }
3678 
3679 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3680 {
3681 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3682 	struct drm_device *dev = intel_dig_port->base.base.dev;
3683 	struct drm_i915_private *dev_priv = dev->dev_private;
3684 	enum port port = intel_dig_port->port;
3685 	uint32_t val;
3686 
3687 	if (!HAS_DDI(dev))
3688 		return;
3689 
3690 	val = I915_READ(DP_TP_CTL(port));
3691 	val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3692 	val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3693 	I915_WRITE(DP_TP_CTL(port), val);
3694 
3695 	/*
3696 	 * On PORT_A we can have only eDP in SST mode. There the only reason
3697 	 * we need to set idle transmission mode is to work around a HW issue
3698 	 * where we enable the pipe while not in idle link-training mode.
3699 	 * In this case there is requirement to wait for a minimum number of
3700 	 * idle patterns to be sent.
3701 	 */
3702 	if (port == PORT_A)
3703 		return;
3704 
3705 	if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3706 		     1))
3707 		DRM_ERROR("Timed out waiting for DP idle patterns\n");
3708 }
3709 
3710 /* Enable corresponding port and start training pattern 1 */
3711 static void
3712 intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
3713 {
3714 	struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3715 	struct drm_device *dev = encoder->dev;
3716 	int i;
3717 	uint8_t voltage;
3718 	int voltage_tries, loop_tries;
3719 	uint32_t DP = intel_dp->DP;
3720 	uint8_t link_config[2];
3721 	uint8_t link_bw, rate_select;
3722 
3723 	if (HAS_DDI(dev))
3724 		intel_ddi_prepare_link_retrain(encoder);
3725 
3726 	intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
3727 			      &link_bw, &rate_select);
3728 
3729 	/* Write the link configuration data */
3730 	link_config[0] = link_bw;
3731 	link_config[1] = intel_dp->lane_count;
3732 	if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3733 		link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3734 	drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3735 	if (intel_dp->num_sink_rates)
3736 		drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3737 				  &rate_select, 1);
3738 
3739 	link_config[0] = 0;
3740 	link_config[1] = DP_SET_ANSI_8B10B;
3741 	drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3742 
3743 	DP |= DP_PORT_EN;
3744 
3745 	/* clock recovery */
3746 	if (!intel_dp_reset_link_train(intel_dp, &DP,
3747 				       DP_TRAINING_PATTERN_1 |
3748 				       DP_LINK_SCRAMBLING_DISABLE)) {
3749 		DRM_ERROR("failed to enable link training\n");
3750 		return;
3751 	}
3752 
3753 	voltage = 0xff;
3754 	voltage_tries = 0;
3755 	loop_tries = 0;
3756 	for (;;) {
3757 		uint8_t link_status[DP_LINK_STATUS_SIZE];
3758 
3759 		drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3760 		if (!intel_dp_get_link_status(intel_dp, link_status)) {
3761 			DRM_ERROR("failed to get link status\n");
3762 			break;
3763 		}
3764 
3765 		if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3766 			DRM_DEBUG_KMS("clock recovery OK\n");
3767 			break;
3768 		}
3769 
3770 		/*
3771 		 * if we used previously trained voltage and pre-emphasis values
3772 		 * and we don't get clock recovery, reset link training values
3773 		 */
3774 		if (intel_dp->train_set_valid) {
3775 			DRM_DEBUG_KMS("clock recovery not ok, reset");
3776 			/* clear the flag as we are not reusing train set */
3777 			intel_dp->train_set_valid = false;
3778 			if (!intel_dp_reset_link_train(intel_dp, &DP,
3779 						       DP_TRAINING_PATTERN_1 |
3780 						       DP_LINK_SCRAMBLING_DISABLE)) {
3781 				DRM_ERROR("failed to enable link training\n");
3782 				return;
3783 			}
3784 			continue;
3785 		}
3786 
3787 		/* Check to see if we've tried the max voltage */
3788 		for (i = 0; i < intel_dp->lane_count; i++)
3789 			if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3790 				break;
3791 		if (i == intel_dp->lane_count) {
3792 			++loop_tries;
3793 			if (loop_tries == 5) {
3794 				DRM_ERROR("too many full retries, give up\n");
3795 				break;
3796 			}
3797 			intel_dp_reset_link_train(intel_dp, &DP,
3798 						  DP_TRAINING_PATTERN_1 |
3799 						  DP_LINK_SCRAMBLING_DISABLE);
3800 			voltage_tries = 0;
3801 			continue;
3802 		}
3803 
3804 		/* Check to see if we've tried the same voltage 5 times */
3805 		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3806 			++voltage_tries;
3807 			if (voltage_tries == 5) {
3808 				DRM_ERROR("too many voltage retries, give up\n");
3809 				break;
3810 			}
3811 		} else
3812 			voltage_tries = 0;
3813 		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3814 
3815 		/* Update training set as requested by target */
3816 		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3817 			DRM_ERROR("failed to update link training\n");
3818 			break;
3819 		}
3820 	}
3821 
3822 	intel_dp->DP = DP;
3823 }
3824 
3825 static void
3826 intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
3827 {
3828 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3829 	struct drm_device *dev = dig_port->base.base.dev;
3830 	bool channel_eq = false;
3831 	int tries, cr_tries;
3832 	uint32_t DP = intel_dp->DP;
3833 	uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3834 
3835 	/*
3836 	 * Training Pattern 3 for HBR2 or 1.2 devices that support it.
3837 	 *
3838 	 * Intel platforms that support HBR2 also support TPS3. TPS3 support is
3839 	 * also mandatory for downstream devices that support HBR2.
3840 	 *
3841 	 * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
3842 	 * supported but still not enabled.
3843 	 */
3844 	if (intel_dp_source_supports_hbr2(dev) &&
3845 	    drm_dp_tps3_supported(intel_dp->dpcd))
3846 		training_pattern = DP_TRAINING_PATTERN_3;
3847 	else if (intel_dp->link_rate == 540000)
3848 		DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n");
3849 
3850 	/* channel equalization */
3851 	if (!intel_dp_set_link_train(intel_dp, &DP,
3852 				     training_pattern |
3853 				     DP_LINK_SCRAMBLING_DISABLE)) {
3854 		DRM_ERROR("failed to start channel equalization\n");
3855 		return;
3856 	}
3857 
3858 	tries = 0;
3859 	cr_tries = 0;
3860 	channel_eq = false;
3861 	for (;;) {
3862 		uint8_t link_status[DP_LINK_STATUS_SIZE];
3863 
3864 		if (cr_tries > 5) {
3865 			DRM_ERROR("failed to train DP, aborting\n");
3866 			break;
3867 		}
3868 
3869 		drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3870 		if (!intel_dp_get_link_status(intel_dp, link_status)) {
3871 			DRM_ERROR("failed to get link status\n");
3872 			break;
3873 		}
3874 
3875 		/* Make sure clock is still ok */
3876 		if (!drm_dp_clock_recovery_ok(link_status,
3877 					      intel_dp->lane_count)) {
3878 			intel_dp->train_set_valid = false;
3879 			intel_dp_link_training_clock_recovery(intel_dp);
3880 			intel_dp_set_link_train(intel_dp, &DP,
3881 						training_pattern |
3882 						DP_LINK_SCRAMBLING_DISABLE);
3883 			cr_tries++;
3884 			continue;
3885 		}
3886 
3887 		if (drm_dp_channel_eq_ok(link_status,
3888 					 intel_dp->lane_count)) {
3889 			channel_eq = true;
3890 			break;
3891 		}
3892 
3893 		/* Try 5 times, then try clock recovery if that fails */
3894 		if (tries > 5) {
3895 			intel_dp->train_set_valid = false;
3896 			intel_dp_link_training_clock_recovery(intel_dp);
3897 			intel_dp_set_link_train(intel_dp, &DP,
3898 						training_pattern |
3899 						DP_LINK_SCRAMBLING_DISABLE);
3900 			tries = 0;
3901 			cr_tries++;
3902 			continue;
3903 		}
3904 
3905 		/* Update training set as requested by target */
3906 		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3907 			DRM_ERROR("failed to update link training\n");
3908 			break;
3909 		}
3910 		++tries;
3911 	}
3912 
3913 	intel_dp_set_idle_link_train(intel_dp);
3914 
3915 	intel_dp->DP = DP;
3916 
3917 	if (channel_eq) {
3918 		intel_dp->train_set_valid = true;
3919 		DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3920 	}
3921 }
3922 
3923 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3924 {
3925 	intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3926 				DP_TRAINING_PATTERN_DISABLE);
3927 }
3928 
3929 void
3930 intel_dp_start_link_train(struct intel_dp *intel_dp)
3931 {
3932 	intel_dp_link_training_clock_recovery(intel_dp);
3933 	intel_dp_link_training_channel_equalization(intel_dp);
3934 }
3935 
3936 static void
3937 intel_dp_link_down(struct intel_dp *intel_dp)
3938 {
3939 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3940 	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3941 	enum port port = intel_dig_port->port;
3942 	struct drm_device *dev = intel_dig_port->base.base.dev;
3943 	struct drm_i915_private *dev_priv = dev->dev_private;
3944 	uint32_t DP = intel_dp->DP;
3945 
3946 	if (WARN_ON(HAS_DDI(dev)))
3947 		return;
3948 
3949 	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3950 		return;
3951 
3952 	DRM_DEBUG_KMS("\n");
3953 
3954 	if ((IS_GEN7(dev) && port == PORT_A) ||
3955 	    (HAS_PCH_CPT(dev) && port != PORT_A)) {
3956 		DP &= ~DP_LINK_TRAIN_MASK_CPT;
3957 		DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3958 	} else {
3959 		if (IS_CHERRYVIEW(dev))
3960 			DP &= ~DP_LINK_TRAIN_MASK_CHV;
3961 		else
3962 			DP &= ~DP_LINK_TRAIN_MASK;
3963 		DP |= DP_LINK_TRAIN_PAT_IDLE;
3964 	}
3965 	I915_WRITE(intel_dp->output_reg, DP);
3966 	POSTING_READ(intel_dp->output_reg);
3967 
3968 	DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3969 	I915_WRITE(intel_dp->output_reg, DP);
3970 	POSTING_READ(intel_dp->output_reg);
3971 
3972 	/*
3973 	 * HW workaround for IBX, we need to move the port
3974 	 * to transcoder A after disabling it to allow the
3975 	 * matching HDMI port to be enabled on transcoder A.
3976 	 */
3977 	if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3978 		/* always enable with pattern 1 (as per spec) */
3979 		DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3980 		DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3981 		I915_WRITE(intel_dp->output_reg, DP);
3982 		POSTING_READ(intel_dp->output_reg);
3983 
3984 		DP &= ~DP_PORT_EN;
3985 		I915_WRITE(intel_dp->output_reg, DP);
3986 		POSTING_READ(intel_dp->output_reg);
3987 	}
3988 
3989 	msleep(intel_dp->panel_power_down_delay);
3990 }
3991 
3992 static bool
3993 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3994 {
3995 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3996 	struct drm_device *dev = dig_port->base.base.dev;
3997 	struct drm_i915_private *dev_priv = dev->dev_private;
3998 	uint8_t rev;
3999 
4000 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
4001 				    sizeof(intel_dp->dpcd)) < 0)
4002 		return false; /* aux transfer failed */
4003 
4004 #ifdef __DragonFly__
4005 	char dpcd_hex_dump[DP_RECEIVER_CAP_SIZE * 3];
4006 	DRM_DEBUG_KMS("DPCD: %s\n", hexncpy(intel_dp->dpcd, sizeof(intel_dp->dpcd),
4007 		      dpcd_hex_dump, sizeof(dpcd_hex_dump), " "));
4008 #else
4009 	DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
4010 #endif
4011 
4012 	if (intel_dp->dpcd[DP_DPCD_REV] == 0)
4013 		return false; /* DPCD not present */
4014 
4015 	/* Check if the panel supports PSR */
4016 	memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
4017 	if (is_edp(intel_dp)) {
4018 		intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
4019 					intel_dp->psr_dpcd,
4020 					sizeof(intel_dp->psr_dpcd));
4021 		if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
4022 			dev_priv->psr.sink_support = true;
4023 			DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
4024 		}
4025 
4026 		if (INTEL_INFO(dev)->gen >= 9 &&
4027 			(intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
4028 			uint8_t frame_sync_cap;
4029 
4030 			dev_priv->psr.sink_support = true;
4031 			intel_dp_dpcd_read_wake(&intel_dp->aux,
4032 					DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
4033 					&frame_sync_cap, 1);
4034 			dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
4035 			/* PSR2 needs frame sync as well */
4036 			dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
4037 			DRM_DEBUG_KMS("PSR2 %s on sink",
4038 				dev_priv->psr.psr2_support ? "supported" : "not supported");
4039 		}
4040 	}
4041 
4042 	DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
4043 		      yesno(intel_dp_source_supports_hbr2(dev)),
4044 		      yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
4045 
4046 	/* Intermediate frequency support */
4047 	if (is_edp(intel_dp) &&
4048 	    (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] &	DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
4049 	    (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
4050 	    (rev >= 0x03)) { /* eDp v1.4 or higher */
4051 		__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4052 		int i;
4053 
4054 		intel_dp_dpcd_read_wake(&intel_dp->aux,
4055 				DP_SUPPORTED_LINK_RATES,
4056 				sink_rates,
4057 				sizeof(sink_rates));
4058 
4059 		for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4060 			int val = le16_to_cpu(sink_rates[i]);
4061 
4062 			if (val == 0)
4063 				break;
4064 
4065 			/* Value read is in kHz while drm clock is saved in deca-kHz */
4066 			intel_dp->sink_rates[i] = (val * 200) / 10;
4067 		}
4068 		intel_dp->num_sink_rates = i;
4069 	}
4070 
4071 	intel_dp_print_rates(intel_dp);
4072 
4073 	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4074 	      DP_DWN_STRM_PORT_PRESENT))
4075 		return true; /* native DP sink */
4076 
4077 	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4078 		return true; /* no per-port downstream info */
4079 
4080 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4081 				    intel_dp->downstream_ports,
4082 				    DP_MAX_DOWNSTREAM_PORTS) < 0)
4083 		return false; /* downstream port status fetch failed */
4084 
4085 	return true;
4086 }
4087 
4088 static void
4089 intel_dp_probe_oui(struct intel_dp *intel_dp)
4090 {
4091 	u8 buf[3];
4092 
4093 	if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4094 		return;
4095 
4096 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
4097 		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4098 			      buf[0], buf[1], buf[2]);
4099 
4100 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
4101 		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4102 			      buf[0], buf[1], buf[2]);
4103 }
4104 
4105 static bool
4106 intel_dp_probe_mst(struct intel_dp *intel_dp)
4107 {
4108 	u8 buf[1];
4109 
4110 	if (!intel_dp->can_mst)
4111 		return false;
4112 
4113 	if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4114 		return false;
4115 
4116 	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4117 		if (buf[0] & DP_MST_CAP) {
4118 			DRM_DEBUG_KMS("Sink is MST capable\n");
4119 			intel_dp->is_mst = true;
4120 		} else {
4121 			DRM_DEBUG_KMS("Sink is not MST capable\n");
4122 			intel_dp->is_mst = false;
4123 		}
4124 	}
4125 
4126 #if 0
4127 	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4128 	return intel_dp->is_mst;
4129 #else
4130 	return false;
4131 #endif
4132 }
4133 
4134 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
4135 {
4136 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4137 	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4138 	u8 buf;
4139 	int ret = 0;
4140 
4141 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4142 		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4143 		ret = -EIO;
4144 		goto out;
4145 	}
4146 
4147 	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4148 			       buf & ~DP_TEST_SINK_START) < 0) {
4149 		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4150 		ret = -EIO;
4151 		goto out;
4152 	}
4153 
4154 	intel_dp->sink_crc.started = false;
4155  out:
4156 	hsw_enable_ips(intel_crtc);
4157 	return ret;
4158 }
4159 
4160 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4161 {
4162 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4163 	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4164 	u8 buf;
4165 	int ret;
4166 
4167 	if (intel_dp->sink_crc.started) {
4168 		ret = intel_dp_sink_crc_stop(intel_dp);
4169 		if (ret)
4170 			return ret;
4171 	}
4172 
4173 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4174 		return -EIO;
4175 
4176 	if (!(buf & DP_TEST_CRC_SUPPORTED))
4177 		return -ENOTTY;
4178 
4179 	intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4180 
4181 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4182 		return -EIO;
4183 
4184 	hsw_disable_ips(intel_crtc);
4185 
4186 	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4187 			       buf | DP_TEST_SINK_START) < 0) {
4188 		hsw_enable_ips(intel_crtc);
4189 		return -EIO;
4190 	}
4191 
4192 	intel_dp->sink_crc.started = true;
4193 	return 0;
4194 }
4195 
4196 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4197 {
4198 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4199 	struct drm_device *dev = dig_port->base.base.dev;
4200 	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4201 	u8 buf;
4202 	int count, ret;
4203 	int attempts = 6;
4204 	bool old_equal_new;
4205 
4206 	ret = intel_dp_sink_crc_start(intel_dp);
4207 	if (ret)
4208 		return ret;
4209 
4210 	do {
4211 		intel_wait_for_vblank(dev, intel_crtc->pipe);
4212 
4213 		if (drm_dp_dpcd_readb(&intel_dp->aux,
4214 				      DP_TEST_SINK_MISC, &buf) < 0) {
4215 			ret = -EIO;
4216 			goto stop;
4217 		}
4218 		count = buf & DP_TEST_COUNT_MASK;
4219 
4220 		/*
4221 		 * Count might be reset during the loop. In this case
4222 		 * last known count needs to be reset as well.
4223 		 */
4224 		if (count == 0)
4225 			intel_dp->sink_crc.last_count = 0;
4226 
4227 		if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4228 			ret = -EIO;
4229 			goto stop;
4230 		}
4231 
4232 		old_equal_new = (count == intel_dp->sink_crc.last_count &&
4233 				 !memcmp(intel_dp->sink_crc.last_crc, crc,
4234 					 6 * sizeof(u8)));
4235 
4236 	} while (--attempts && (count == 0 || old_equal_new));
4237 
4238 	intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4239 	memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
4240 
4241 	if (attempts == 0) {
4242 		if (old_equal_new) {
4243 			DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
4244 		} else {
4245 			DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4246 			ret = -ETIMEDOUT;
4247 			goto stop;
4248 		}
4249 	}
4250 
4251 stop:
4252 	intel_dp_sink_crc_stop(intel_dp);
4253 	return ret;
4254 }
4255 
4256 static bool
4257 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4258 {
4259 	return intel_dp_dpcd_read_wake(&intel_dp->aux,
4260 				       DP_DEVICE_SERVICE_IRQ_VECTOR,
4261 				       sink_irq_vector, 1) == 1;
4262 }
4263 
4264 #if 0
4265 static bool
4266 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4267 {
4268 	int ret;
4269 
4270 	ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4271 					     DP_SINK_COUNT_ESI,
4272 					     sink_irq_vector, 14);
4273 	if (ret != 14)
4274 		return false;
4275 
4276 	return true;
4277 }
4278 #endif
4279 
4280 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4281 {
4282 	uint8_t test_result = DP_TEST_ACK;
4283 	return test_result;
4284 }
4285 
4286 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4287 {
4288 	uint8_t test_result = DP_TEST_NAK;
4289 	return test_result;
4290 }
4291 
4292 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4293 {
4294 	uint8_t test_result = DP_TEST_NAK;
4295 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4296 	struct drm_connector *connector = &intel_connector->base;
4297 
4298 	if (intel_connector->detect_edid == NULL ||
4299 	    connector->edid_corrupt ||
4300 	    intel_dp->aux.i2c_defer_count > 6) {
4301 		/* Check EDID read for NACKs, DEFERs and corruption
4302 		 * (DP CTS 1.2 Core r1.1)
4303 		 *    4.2.2.4 : Failed EDID read, I2C_NAK
4304 		 *    4.2.2.5 : Failed EDID read, I2C_DEFER
4305 		 *    4.2.2.6 : EDID corruption detected
4306 		 * Use failsafe mode for all cases
4307 		 */
4308 		if (intel_dp->aux.i2c_nack_count > 0 ||
4309 			intel_dp->aux.i2c_defer_count > 0)
4310 			DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4311 				      intel_dp->aux.i2c_nack_count,
4312 				      intel_dp->aux.i2c_defer_count);
4313 		intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4314 	} else {
4315 		struct edid *block = intel_connector->detect_edid;
4316 
4317 		/* We have to write the checksum
4318 		 * of the last block read
4319 		 */
4320 		block += intel_connector->detect_edid->extensions;
4321 
4322 		if (!drm_dp_dpcd_write(&intel_dp->aux,
4323 					DP_TEST_EDID_CHECKSUM,
4324 					&block->checksum,
4325 					1))
4326 			DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4327 
4328 		test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4329 		intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4330 	}
4331 
4332 	/* Set test active flag here so userspace doesn't interrupt things */
4333 	intel_dp->compliance_test_active = 1;
4334 
4335 	return test_result;
4336 }
4337 
4338 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4339 {
4340 	uint8_t test_result = DP_TEST_NAK;
4341 	return test_result;
4342 }
4343 
4344 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4345 {
4346 	uint8_t response = DP_TEST_NAK;
4347 	uint8_t rxdata = 0;
4348 	int status = 0;
4349 
4350 	intel_dp->compliance_test_active = 0;
4351 	intel_dp->compliance_test_type = 0;
4352 	intel_dp->compliance_test_data = 0;
4353 
4354 	intel_dp->aux.i2c_nack_count = 0;
4355 	intel_dp->aux.i2c_defer_count = 0;
4356 
4357 	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4358 	if (status <= 0) {
4359 		DRM_DEBUG_KMS("Could not read test request from sink\n");
4360 		goto update_status;
4361 	}
4362 
4363 	switch (rxdata) {
4364 	case DP_TEST_LINK_TRAINING:
4365 		DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4366 		intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4367 		response = intel_dp_autotest_link_training(intel_dp);
4368 		break;
4369 	case DP_TEST_LINK_VIDEO_PATTERN:
4370 		DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4371 		intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4372 		response = intel_dp_autotest_video_pattern(intel_dp);
4373 		break;
4374 	case DP_TEST_LINK_EDID_READ:
4375 		DRM_DEBUG_KMS("EDID test requested\n");
4376 		intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4377 		response = intel_dp_autotest_edid(intel_dp);
4378 		break;
4379 	case DP_TEST_LINK_PHY_TEST_PATTERN:
4380 		DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4381 		intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4382 		response = intel_dp_autotest_phy_pattern(intel_dp);
4383 		break;
4384 	default:
4385 		DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4386 		break;
4387 	}
4388 
4389 update_status:
4390 	status = drm_dp_dpcd_write(&intel_dp->aux,
4391 				   DP_TEST_RESPONSE,
4392 				   &response, 1);
4393 	if (status <= 0)
4394 		DRM_DEBUG_KMS("Could not write test response to sink\n");
4395 }
4396 
4397 #if 0
4398 static int
4399 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4400 {
4401 	bool bret;
4402 
4403 	if (intel_dp->is_mst) {
4404 		u8 esi[16] = { 0 };
4405 		int ret = 0;
4406 		int retry;
4407 		bool handled;
4408 		bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4409 go_again:
4410 		if (bret == true) {
4411 
4412 			/* check link status - esi[10] = 0x200c */
4413 			if (intel_dp->active_mst_links &&
4414 			    !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4415 				DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4416 				intel_dp_start_link_train(intel_dp);
4417 				intel_dp_stop_link_train(intel_dp);
4418 			}
4419 
4420 			DRM_DEBUG_KMS("got esi %3ph\n", esi);
4421 			ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4422 
4423 			if (handled) {
4424 				for (retry = 0; retry < 3; retry++) {
4425 					int wret;
4426 					wret = drm_dp_dpcd_write(&intel_dp->aux,
4427 								 DP_SINK_COUNT_ESI+1,
4428 								 &esi[1], 3);
4429 					if (wret == 3) {
4430 						break;
4431 					}
4432 				}
4433 
4434 				bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4435 				if (bret == true) {
4436 					DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4437 					goto go_again;
4438 				}
4439 			} else
4440 				ret = 0;
4441 
4442 			return ret;
4443 		} else {
4444 			struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4445 			DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4446 			intel_dp->is_mst = false;
4447 			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4448 			/* send a hotplug event */
4449 			drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4450 		}
4451 	}
4452 	return -EINVAL;
4453 }
4454 #endif
4455 
4456 /*
4457  * According to DP spec
4458  * 5.1.2:
4459  *  1. Read DPCD
4460  *  2. Configure link according to Receiver Capabilities
4461  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4462  *  4. Check link status on receipt of hot-plug interrupt
4463  */
4464 static void
4465 intel_dp_check_link_status(struct intel_dp *intel_dp)
4466 {
4467 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4468 	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4469 	u8 sink_irq_vector;
4470 	u8 link_status[DP_LINK_STATUS_SIZE];
4471 
4472 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4473 
4474 	if (!intel_encoder->base.crtc)
4475 		return;
4476 
4477 	if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4478 		return;
4479 
4480 	/* Try to read receiver status if the link appears to be up */
4481 	if (!intel_dp_get_link_status(intel_dp, link_status)) {
4482 		return;
4483 	}
4484 
4485 	/* Now read the DPCD to see if it's actually running */
4486 	if (!intel_dp_get_dpcd(intel_dp)) {
4487 		return;
4488 	}
4489 
4490 	/* Try to read the source of the interrupt */
4491 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4492 	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4493 		/* Clear interrupt source */
4494 		drm_dp_dpcd_writeb(&intel_dp->aux,
4495 				   DP_DEVICE_SERVICE_IRQ_VECTOR,
4496 				   sink_irq_vector);
4497 
4498 		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4499 			DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4500 		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4501 			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4502 	}
4503 
4504 	if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4505 		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4506 			      intel_encoder->base.name);
4507 		intel_dp_start_link_train(intel_dp);
4508 		intel_dp_stop_link_train(intel_dp);
4509 	}
4510 }
4511 
4512 /* XXX this is probably wrong for multiple downstream ports */
4513 static enum drm_connector_status
4514 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4515 {
4516 	uint8_t *dpcd = intel_dp->dpcd;
4517 	uint8_t type;
4518 
4519 	if (!intel_dp_get_dpcd(intel_dp))
4520 		return connector_status_disconnected;
4521 
4522 	/* if there's no downstream port, we're done */
4523 	if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4524 		return connector_status_connected;
4525 
4526 	/* If we're HPD-aware, SINK_COUNT changes dynamically */
4527 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4528 	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4529 		uint8_t reg;
4530 
4531 		if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4532 					    &reg, 1) < 0)
4533 			return connector_status_unknown;
4534 
4535 		return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4536 					      : connector_status_disconnected;
4537 	}
4538 
4539 	/* If no HPD, poke DDC gently */
4540 	if (drm_probe_ddc(&intel_dp->aux.ddc))
4541 		return connector_status_connected;
4542 
4543 	/* Well we tried, say unknown for unreliable port types */
4544 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4545 		type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4546 		if (type == DP_DS_PORT_TYPE_VGA ||
4547 		    type == DP_DS_PORT_TYPE_NON_EDID)
4548 			return connector_status_unknown;
4549 	} else {
4550 		type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4551 			DP_DWN_STRM_PORT_TYPE_MASK;
4552 		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4553 		    type == DP_DWN_STRM_PORT_TYPE_OTHER)
4554 			return connector_status_unknown;
4555 	}
4556 
4557 	/* Anything else is out of spec, warn and ignore */
4558 	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4559 	return connector_status_disconnected;
4560 }
4561 
4562 static enum drm_connector_status
4563 edp_detect(struct intel_dp *intel_dp)
4564 {
4565 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4566 	enum drm_connector_status status;
4567 
4568 	status = intel_panel_detect(dev);
4569 	if (status == connector_status_unknown)
4570 		status = connector_status_connected;
4571 
4572 	return status;
4573 }
4574 
4575 static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4576 				       struct intel_digital_port *port)
4577 {
4578 	u32 bit;
4579 
4580 	switch (port->port) {
4581 	case PORT_A:
4582 		return true;
4583 	case PORT_B:
4584 		bit = SDE_PORTB_HOTPLUG;
4585 		break;
4586 	case PORT_C:
4587 		bit = SDE_PORTC_HOTPLUG;
4588 		break;
4589 	case PORT_D:
4590 		bit = SDE_PORTD_HOTPLUG;
4591 		break;
4592 	default:
4593 		MISSING_CASE(port->port);
4594 		return false;
4595 	}
4596 
4597 	return I915_READ(SDEISR) & bit;
4598 }
4599 
4600 static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4601 				       struct intel_digital_port *port)
4602 {
4603 	u32 bit;
4604 
4605 	switch (port->port) {
4606 	case PORT_A:
4607 		return true;
4608 	case PORT_B:
4609 		bit = SDE_PORTB_HOTPLUG_CPT;
4610 		break;
4611 	case PORT_C:
4612 		bit = SDE_PORTC_HOTPLUG_CPT;
4613 		break;
4614 	case PORT_D:
4615 		bit = SDE_PORTD_HOTPLUG_CPT;
4616 		break;
4617 	case PORT_E:
4618 		bit = SDE_PORTE_HOTPLUG_SPT;
4619 		break;
4620 	default:
4621 		MISSING_CASE(port->port);
4622 		return false;
4623 	}
4624 
4625 	return I915_READ(SDEISR) & bit;
4626 }
4627 
4628 static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4629 				       struct intel_digital_port *port)
4630 {
4631 	u32 bit;
4632 
4633 	switch (port->port) {
4634 	case PORT_B:
4635 		bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4636 		break;
4637 	case PORT_C:
4638 		bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4639 		break;
4640 	case PORT_D:
4641 		bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4642 		break;
4643 	default:
4644 		MISSING_CASE(port->port);
4645 		return false;
4646 	}
4647 
4648 	return I915_READ(PORT_HOTPLUG_STAT) & bit;
4649 }
4650 
4651 static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4652 				       struct intel_digital_port *port)
4653 {
4654 	u32 bit;
4655 
4656 	switch (port->port) {
4657 	case PORT_B:
4658 		bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4659 		break;
4660 	case PORT_C:
4661 		bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4662 		break;
4663 	case PORT_D:
4664 		bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4665 		break;
4666 	default:
4667 		MISSING_CASE(port->port);
4668 		return false;
4669 	}
4670 
4671 	return I915_READ(PORT_HOTPLUG_STAT) & bit;
4672 }
4673 
4674 static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4675 				       struct intel_digital_port *intel_dig_port)
4676 {
4677 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4678 	enum port port;
4679 	u32 bit;
4680 
4681 	intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4682 	switch (port) {
4683 	case PORT_A:
4684 		bit = BXT_DE_PORT_HP_DDIA;
4685 		break;
4686 	case PORT_B:
4687 		bit = BXT_DE_PORT_HP_DDIB;
4688 		break;
4689 	case PORT_C:
4690 		bit = BXT_DE_PORT_HP_DDIC;
4691 		break;
4692 	default:
4693 		MISSING_CASE(port);
4694 		return false;
4695 	}
4696 
4697 	return I915_READ(GEN8_DE_PORT_ISR) & bit;
4698 }
4699 
4700 /*
4701  * intel_digital_port_connected - is the specified port connected?
4702  * @dev_priv: i915 private structure
4703  * @port: the port to test
4704  *
4705  * Return %true if @port is connected, %false otherwise.
4706  */
4707 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4708 					 struct intel_digital_port *port)
4709 {
4710 	if (HAS_PCH_IBX(dev_priv))
4711 		return ibx_digital_port_connected(dev_priv, port);
4712 	if (HAS_PCH_SPLIT(dev_priv))
4713 		return cpt_digital_port_connected(dev_priv, port);
4714 	else if (IS_BROXTON(dev_priv))
4715 		return bxt_digital_port_connected(dev_priv, port);
4716 	else if (IS_VALLEYVIEW(dev_priv))
4717 		return vlv_digital_port_connected(dev_priv, port);
4718 	else
4719 		return g4x_digital_port_connected(dev_priv, port);
4720 }
4721 
4722 static enum drm_connector_status
4723 ironlake_dp_detect(struct intel_dp *intel_dp)
4724 {
4725 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4726 	struct drm_i915_private *dev_priv = dev->dev_private;
4727 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4728 
4729 	if (!intel_digital_port_connected(dev_priv, intel_dig_port))
4730 		return connector_status_disconnected;
4731 
4732 	return intel_dp_detect_dpcd(intel_dp);
4733 }
4734 
4735 static enum drm_connector_status
4736 g4x_dp_detect(struct intel_dp *intel_dp)
4737 {
4738 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4739 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4740 
4741 	/* Can't disconnect eDP, but you can close the lid... */
4742 	if (is_edp(intel_dp)) {
4743 		enum drm_connector_status status;
4744 
4745 		status = intel_panel_detect(dev);
4746 		if (status == connector_status_unknown)
4747 			status = connector_status_connected;
4748 		return status;
4749 	}
4750 
4751 	if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
4752 		return connector_status_disconnected;
4753 
4754 	return intel_dp_detect_dpcd(intel_dp);
4755 }
4756 
4757 static struct edid *
4758 intel_dp_get_edid(struct intel_dp *intel_dp)
4759 {
4760 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4761 
4762 	/* use cached edid if we have one */
4763 	if (intel_connector->edid) {
4764 		/* invalid edid */
4765 		if (IS_ERR(intel_connector->edid))
4766 			return NULL;
4767 
4768 		return drm_edid_duplicate(intel_connector->edid);
4769 	} else
4770 		return drm_get_edid(&intel_connector->base,
4771 				    &intel_dp->aux.ddc);
4772 }
4773 
4774 static void
4775 intel_dp_set_edid(struct intel_dp *intel_dp)
4776 {
4777 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4778 	struct edid *edid;
4779 
4780 	edid = intel_dp_get_edid(intel_dp);
4781 	intel_connector->detect_edid = edid;
4782 
4783 	if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4784 		intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4785 	else
4786 		intel_dp->has_audio = drm_detect_monitor_audio(edid);
4787 }
4788 
4789 static void
4790 intel_dp_unset_edid(struct intel_dp *intel_dp)
4791 {
4792 	struct intel_connector *intel_connector = intel_dp->attached_connector;
4793 
4794 	kfree(intel_connector->detect_edid);
4795 	intel_connector->detect_edid = NULL;
4796 
4797 	intel_dp->has_audio = false;
4798 }
4799 
4800 static enum drm_connector_status
4801 intel_dp_detect(struct drm_connector *connector, bool force)
4802 {
4803 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4804 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4805 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4806 	struct drm_device *dev = connector->dev;
4807 	enum drm_connector_status status;
4808 	enum intel_display_power_domain power_domain;
4809 	bool ret;
4810 	u8 sink_irq_vector;
4811 
4812 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4813 		      connector->base.id, connector->name);
4814 	intel_dp_unset_edid(intel_dp);
4815 
4816 	if (intel_dp->is_mst) {
4817 		/* MST devices are disconnected from a monitor POV */
4818 		if (intel_encoder->type != INTEL_OUTPUT_EDP)
4819 			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4820 		return connector_status_disconnected;
4821 	}
4822 
4823 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
4824 	intel_display_power_get(to_i915(dev), power_domain);
4825 
4826 	/* Can't disconnect eDP, but you can close the lid... */
4827 	if (is_edp(intel_dp))
4828 		status = edp_detect(intel_dp);
4829 	else if (HAS_PCH_SPLIT(dev))
4830 		status = ironlake_dp_detect(intel_dp);
4831 	else
4832 		status = g4x_dp_detect(intel_dp);
4833 	if (status != connector_status_connected)
4834 		goto out;
4835 
4836 	intel_dp_probe_oui(intel_dp);
4837 
4838 	ret = intel_dp_probe_mst(intel_dp);
4839 	if (ret) {
4840 		/* if we are in MST mode then this connector
4841 		   won't appear connected or have anything with EDID on it */
4842 		if (intel_encoder->type != INTEL_OUTPUT_EDP)
4843 			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4844 		status = connector_status_disconnected;
4845 		goto out;
4846 	}
4847 
4848 	intel_dp_set_edid(intel_dp);
4849 
4850 	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4851 		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4852 	status = connector_status_connected;
4853 
4854 	/* Try to read the source of the interrupt */
4855 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4856 	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4857 		/* Clear interrupt source */
4858 		drm_dp_dpcd_writeb(&intel_dp->aux,
4859 				   DP_DEVICE_SERVICE_IRQ_VECTOR,
4860 				   sink_irq_vector);
4861 
4862 		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4863 			intel_dp_handle_test_request(intel_dp);
4864 		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4865 			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4866 	}
4867 
4868 out:
4869 	intel_display_power_put(to_i915(dev), power_domain);
4870 	return status;
4871 }
4872 
4873 static void
4874 intel_dp_force(struct drm_connector *connector)
4875 {
4876 	struct intel_dp *intel_dp = intel_attached_dp(connector);
4877 	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4878 	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4879 	enum intel_display_power_domain power_domain;
4880 
4881 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4882 		      connector->base.id, connector->name);
4883 	intel_dp_unset_edid(intel_dp);
4884 
4885 	if (connector->status != connector_status_connected)
4886 		return;
4887 
4888 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
4889 	intel_display_power_get(dev_priv, power_domain);
4890 
4891 	intel_dp_set_edid(intel_dp);
4892 
4893 	intel_display_power_put(dev_priv, power_domain);
4894 
4895 	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4896 		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4897 }
4898 
4899 static int intel_dp_get_modes(struct drm_connector *connector)
4900 {
4901 	struct intel_connector *intel_connector = to_intel_connector(connector);
4902 	struct edid *edid;
4903 
4904 	edid = intel_connector->detect_edid;
4905 	if (edid) {
4906 		int ret = intel_connector_update_modes(connector, edid);
4907 		if (ret)
4908 			return ret;
4909 	}
4910 
4911 	/* if eDP has no EDID, fall back to fixed mode */
4912 	if (is_edp(intel_attached_dp(connector)) &&
4913 	    intel_connector->panel.fixed_mode) {
4914 		struct drm_display_mode *mode;
4915 
4916 		mode = drm_mode_duplicate(connector->dev,
4917 					  intel_connector->panel.fixed_mode);
4918 		if (mode) {
4919 			drm_mode_probed_add(connector, mode);
4920 			return 1;
4921 		}
4922 	}
4923 
4924 	return 0;
4925 }
4926 
4927 static bool
4928 intel_dp_detect_audio(struct drm_connector *connector)
4929 {
4930 	bool has_audio = false;
4931 	struct edid *edid;
4932 
4933 	edid = to_intel_connector(connector)->detect_edid;
4934 	if (edid)
4935 		has_audio = drm_detect_monitor_audio(edid);
4936 
4937 	return has_audio;
4938 }
4939 
4940 static int
4941 intel_dp_set_property(struct drm_connector *connector,
4942 		      struct drm_property *property,
4943 		      uint64_t val)
4944 {
4945 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
4946 	struct intel_connector *intel_connector = to_intel_connector(connector);
4947 	struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4948 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4949 	int ret;
4950 
4951 	ret = drm_object_property_set_value(&connector->base, property, val);
4952 	if (ret)
4953 		return ret;
4954 
4955 	if (property == dev_priv->force_audio_property) {
4956 		int i = val;
4957 		bool has_audio;
4958 
4959 		if (i == intel_dp->force_audio)
4960 			return 0;
4961 
4962 		intel_dp->force_audio = i;
4963 
4964 		if (i == HDMI_AUDIO_AUTO)
4965 			has_audio = intel_dp_detect_audio(connector);
4966 		else
4967 			has_audio = (i == HDMI_AUDIO_ON);
4968 
4969 		if (has_audio == intel_dp->has_audio)
4970 			return 0;
4971 
4972 		intel_dp->has_audio = has_audio;
4973 		goto done;
4974 	}
4975 
4976 	if (property == dev_priv->broadcast_rgb_property) {
4977 		bool old_auto = intel_dp->color_range_auto;
4978 		bool old_range = intel_dp->limited_color_range;
4979 
4980 		switch (val) {
4981 		case INTEL_BROADCAST_RGB_AUTO:
4982 			intel_dp->color_range_auto = true;
4983 			break;
4984 		case INTEL_BROADCAST_RGB_FULL:
4985 			intel_dp->color_range_auto = false;
4986 			intel_dp->limited_color_range = false;
4987 			break;
4988 		case INTEL_BROADCAST_RGB_LIMITED:
4989 			intel_dp->color_range_auto = false;
4990 			intel_dp->limited_color_range = true;
4991 			break;
4992 		default:
4993 			return -EINVAL;
4994 		}
4995 
4996 		if (old_auto == intel_dp->color_range_auto &&
4997 		    old_range == intel_dp->limited_color_range)
4998 			return 0;
4999 
5000 		goto done;
5001 	}
5002 
5003 	if (is_edp(intel_dp) &&
5004 	    property == connector->dev->mode_config.scaling_mode_property) {
5005 		if (val == DRM_MODE_SCALE_NONE) {
5006 			DRM_DEBUG_KMS("no scaling not supported\n");
5007 			return -EINVAL;
5008 		}
5009 
5010 		if (intel_connector->panel.fitting_mode == val) {
5011 			/* the eDP scaling property is not changed */
5012 			return 0;
5013 		}
5014 		intel_connector->panel.fitting_mode = val;
5015 
5016 		goto done;
5017 	}
5018 
5019 	return -EINVAL;
5020 
5021 done:
5022 	if (intel_encoder->base.crtc)
5023 		intel_crtc_restore_mode(intel_encoder->base.crtc);
5024 
5025 	return 0;
5026 }
5027 
5028 static void
5029 intel_dp_connector_destroy(struct drm_connector *connector)
5030 {
5031 	struct intel_connector *intel_connector = to_intel_connector(connector);
5032 
5033 	kfree(intel_connector->detect_edid);
5034 
5035 	if (!IS_ERR_OR_NULL(intel_connector->edid))
5036 		kfree(intel_connector->edid);
5037 
5038 	/* Can't call is_edp() since the encoder may have been destroyed
5039 	 * already. */
5040 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5041 		intel_panel_fini(&intel_connector->panel);
5042 
5043 	drm_connector_cleanup(connector);
5044 	kfree(connector);
5045 }
5046 
5047 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5048 {
5049 	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5050 	struct intel_dp *intel_dp = &intel_dig_port->dp;
5051 
5052 	drm_dp_aux_unregister(&intel_dp->aux);
5053 	intel_dp_mst_encoder_cleanup(intel_dig_port);
5054 	if (is_edp(intel_dp)) {
5055 		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5056 		/*
5057 		 * vdd might still be enabled do to the delayed vdd off.
5058 		 * Make sure vdd is actually turned off here.
5059 		 */
5060 		pps_lock(intel_dp);
5061 		edp_panel_vdd_off_sync(intel_dp);
5062 		pps_unlock(intel_dp);
5063 
5064 #if 0
5065 		if (intel_dp->edp_notifier.notifier_call) {
5066 			unregister_reboot_notifier(&intel_dp->edp_notifier);
5067 			intel_dp->edp_notifier.notifier_call = NULL;
5068 		}
5069 #endif
5070 	}
5071 	drm_encoder_cleanup(encoder);
5072 	kfree(intel_dig_port);
5073 }
5074 
5075 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5076 {
5077 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5078 
5079 	if (!is_edp(intel_dp))
5080 		return;
5081 
5082 	/*
5083 	 * vdd might still be enabled do to the delayed vdd off.
5084 	 * Make sure vdd is actually turned off here.
5085 	 */
5086 	cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5087 	pps_lock(intel_dp);
5088 	edp_panel_vdd_off_sync(intel_dp);
5089 	pps_unlock(intel_dp);
5090 }
5091 
5092 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5093 {
5094 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5095 	struct drm_device *dev = intel_dig_port->base.base.dev;
5096 	struct drm_i915_private *dev_priv = dev->dev_private;
5097 	enum intel_display_power_domain power_domain;
5098 
5099 	lockdep_assert_held(&dev_priv->pps_mutex);
5100 
5101 	if (!edp_have_panel_vdd(intel_dp))
5102 		return;
5103 
5104 	/*
5105 	 * The VDD bit needs a power domain reference, so if the bit is
5106 	 * already enabled when we boot or resume, grab this reference and
5107 	 * schedule a vdd off, so we don't hold on to the reference
5108 	 * indefinitely.
5109 	 */
5110 	DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5111 	power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
5112 	intel_display_power_get(dev_priv, power_domain);
5113 
5114 	edp_panel_vdd_schedule_off(intel_dp);
5115 }
5116 
5117 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
5118 {
5119 	struct intel_dp *intel_dp;
5120 
5121 	if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
5122 		return;
5123 
5124 	intel_dp = enc_to_intel_dp(encoder);
5125 
5126 	pps_lock(intel_dp);
5127 
5128 	/*
5129 	 * Read out the current power sequencer assignment,
5130 	 * in case the BIOS did something with it.
5131 	 */
5132 	if (IS_VALLEYVIEW(encoder->dev))
5133 		vlv_initial_power_sequencer_setup(intel_dp);
5134 
5135 	intel_edp_panel_vdd_sanitize(intel_dp);
5136 
5137 	pps_unlock(intel_dp);
5138 }
5139 
5140 static const struct drm_connector_funcs intel_dp_connector_funcs = {
5141 	.dpms = drm_atomic_helper_connector_dpms,
5142 	.detect = intel_dp_detect,
5143 	.force = intel_dp_force,
5144 	.fill_modes = drm_helper_probe_single_connector_modes,
5145 	.set_property = intel_dp_set_property,
5146 	.atomic_get_property = intel_connector_atomic_get_property,
5147 	.destroy = intel_dp_connector_destroy,
5148 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5149 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
5150 };
5151 
5152 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5153 	.get_modes = intel_dp_get_modes,
5154 	.mode_valid = intel_dp_mode_valid,
5155 	.best_encoder = intel_best_encoder,
5156 };
5157 
5158 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5159 	.reset = intel_dp_encoder_reset,
5160 	.destroy = intel_dp_encoder_destroy,
5161 };
5162 
5163 bool
5164 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5165 {
5166 	struct intel_dp *intel_dp = &intel_dig_port->dp;
5167 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5168 	struct drm_device *dev = intel_dig_port->base.base.dev;
5169 	struct drm_i915_private *dev_priv = dev->dev_private;
5170 	enum intel_display_power_domain power_domain;
5171 	bool ret = true;
5172 
5173 	if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
5174 	    intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
5175 		intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
5176 
5177 	if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5178 		/*
5179 		 * vdd off can generate a long pulse on eDP which
5180 		 * would require vdd on to handle it, and thus we
5181 		 * would end up in an endless cycle of
5182 		 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5183 		 */
5184 		DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5185 			      port_name(intel_dig_port->port));
5186 		return false;
5187 	}
5188 
5189 	DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5190 		      port_name(intel_dig_port->port),
5191 		      long_hpd ? "long" : "short");
5192 
5193 	power_domain = intel_display_port_aux_power_domain(intel_encoder);
5194 	intel_display_power_get(dev_priv, power_domain);
5195 
5196 	if (long_hpd) {
5197 		/* indicate that we need to restart link training */
5198 		intel_dp->train_set_valid = false;
5199 
5200 		if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5201 			goto mst_fail;
5202 
5203 		if (!intel_dp_get_dpcd(intel_dp)) {
5204 			goto mst_fail;
5205 		}
5206 
5207 		intel_dp_probe_oui(intel_dp);
5208 
5209 		if (!intel_dp_probe_mst(intel_dp)) {
5210 			goto mst_fail;
5211 		}
5212 	} else {
5213 		if (intel_dp->is_mst) {
5214 #if 0
5215 			if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
5216 				goto mst_fail;
5217 #endif
5218 		}
5219 
5220 		if (!intel_dp->is_mst) {
5221 			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5222 			intel_dp_check_link_status(intel_dp);
5223 			drm_modeset_unlock(&dev->mode_config.connection_mutex);
5224 		}
5225 	}
5226 
5227 	ret = false;
5228 
5229 	goto put_power;
5230 mst_fail:
5231 	/* if we were in MST mode, and device is not there get out of MST mode */
5232 	if (intel_dp->is_mst) {
5233 		DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5234 		intel_dp->is_mst = false;
5235 #if 0
5236 		drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5237 #endif
5238 	}
5239 put_power:
5240 	intel_display_power_put(dev_priv, power_domain);
5241 
5242 	return ret;
5243 }
5244 
5245 /* Return which DP Port should be selected for Transcoder DP control */
5246 int
5247 intel_trans_dp_port_sel(struct drm_crtc *crtc)
5248 {
5249 	struct drm_device *dev = crtc->dev;
5250 	struct intel_encoder *intel_encoder;
5251 	struct intel_dp *intel_dp;
5252 
5253 	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5254 		intel_dp = enc_to_intel_dp(&intel_encoder->base);
5255 
5256 		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5257 		    intel_encoder->type == INTEL_OUTPUT_EDP)
5258 			return intel_dp->output_reg;
5259 	}
5260 
5261 	return -1;
5262 }
5263 
5264 /* check the VBT to see whether the eDP is on another port */
5265 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5266 {
5267 	struct drm_i915_private *dev_priv = dev->dev_private;
5268 	union child_device_config *p_child;
5269 	int i;
5270 	static const short port_mapping[] = {
5271 		[PORT_B] = DVO_PORT_DPB,
5272 		[PORT_C] = DVO_PORT_DPC,
5273 		[PORT_D] = DVO_PORT_DPD,
5274 		[PORT_E] = DVO_PORT_DPE,
5275 	};
5276 
5277 	/*
5278 	 * eDP not supported on g4x. so bail out early just
5279 	 * for a bit extra safety in case the VBT is bonkers.
5280 	 */
5281 	if (INTEL_INFO(dev)->gen < 5)
5282 		return false;
5283 
5284 	if (port == PORT_A)
5285 		return true;
5286 
5287 	if (!dev_priv->vbt.child_dev_num)
5288 		return false;
5289 
5290 	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5291 		p_child = dev_priv->vbt.child_dev + i;
5292 
5293 		if (p_child->common.dvo_port == port_mapping[port] &&
5294 		    (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5295 		    (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5296 			return true;
5297 	}
5298 	return false;
5299 }
5300 
5301 void
5302 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5303 {
5304 	struct intel_connector *intel_connector = to_intel_connector(connector);
5305 
5306 	intel_attach_force_audio_property(connector);
5307 	intel_attach_broadcast_rgb_property(connector);
5308 	intel_dp->color_range_auto = true;
5309 
5310 	if (is_edp(intel_dp)) {
5311 		drm_mode_create_scaling_mode_property(connector->dev);
5312 		drm_object_attach_property(
5313 			&connector->base,
5314 			connector->dev->mode_config.scaling_mode_property,
5315 			DRM_MODE_SCALE_ASPECT);
5316 		intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5317 	}
5318 }
5319 
5320 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5321 {
5322 	intel_dp->last_power_cycle = jiffies;
5323 	intel_dp->last_power_on = jiffies;
5324 	intel_dp->last_backlight_off = jiffies;
5325 }
5326 
5327 static void
5328 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5329 				    struct intel_dp *intel_dp)
5330 {
5331 	struct drm_i915_private *dev_priv = dev->dev_private;
5332 	struct edp_power_seq cur, vbt, spec,
5333 		*final = &intel_dp->pps_delays;
5334 	u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5335 	int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
5336 
5337 	lockdep_assert_held(&dev_priv->pps_mutex);
5338 
5339 	/* already initialized? */
5340 	if (final->t11_t12 != 0)
5341 		return;
5342 
5343 	if (IS_BROXTON(dev)) {
5344 		/*
5345 		 * TODO: BXT has 2 sets of PPS registers.
5346 		 * Correct Register for Broxton need to be identified
5347 		 * using VBT. hardcoding for now
5348 		 */
5349 		pp_ctrl_reg = BXT_PP_CONTROL(0);
5350 		pp_on_reg = BXT_PP_ON_DELAYS(0);
5351 		pp_off_reg = BXT_PP_OFF_DELAYS(0);
5352 	} else if (HAS_PCH_SPLIT(dev)) {
5353 		pp_ctrl_reg = PCH_PP_CONTROL;
5354 		pp_on_reg = PCH_PP_ON_DELAYS;
5355 		pp_off_reg = PCH_PP_OFF_DELAYS;
5356 		pp_div_reg = PCH_PP_DIVISOR;
5357 	} else {
5358 		enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5359 
5360 		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5361 		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5362 		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5363 		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5364 	}
5365 
5366 	/* Workaround: Need to write PP_CONTROL with the unlock key as
5367 	 * the very first thing. */
5368 	pp_ctl = ironlake_get_pp_control(intel_dp);
5369 
5370 	pp_on = I915_READ(pp_on_reg);
5371 	pp_off = I915_READ(pp_off_reg);
5372 	if (!IS_BROXTON(dev)) {
5373 		I915_WRITE(pp_ctrl_reg, pp_ctl);
5374 		pp_div = I915_READ(pp_div_reg);
5375 	}
5376 
5377 	/* Pull timing values out of registers */
5378 	cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5379 		PANEL_POWER_UP_DELAY_SHIFT;
5380 
5381 	cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5382 		PANEL_LIGHT_ON_DELAY_SHIFT;
5383 
5384 	cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5385 		PANEL_LIGHT_OFF_DELAY_SHIFT;
5386 
5387 	cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5388 		PANEL_POWER_DOWN_DELAY_SHIFT;
5389 
5390 	if (IS_BROXTON(dev)) {
5391 		u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5392 			BXT_POWER_CYCLE_DELAY_SHIFT;
5393 		if (tmp > 0)
5394 			cur.t11_t12 = (tmp - 1) * 1000;
5395 		else
5396 			cur.t11_t12 = 0;
5397 	} else {
5398 		cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5399 		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5400 	}
5401 
5402 	DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5403 		      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5404 
5405 	vbt = dev_priv->vbt.edp_pps;
5406 
5407 	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5408 	 * our hw here, which are all in 100usec. */
5409 	spec.t1_t3 = 210 * 10;
5410 	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5411 	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5412 	spec.t10 = 500 * 10;
5413 	/* This one is special and actually in units of 100ms, but zero
5414 	 * based in the hw (so we need to add 100 ms). But the sw vbt
5415 	 * table multiplies it with 1000 to make it in units of 100usec,
5416 	 * too. */
5417 	spec.t11_t12 = (510 + 100) * 10;
5418 
5419 	DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5420 		      vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5421 
5422 	/* Use the max of the register settings and vbt. If both are
5423 	 * unset, fall back to the spec limits. */
5424 #define assign_final(field)	final->field = (max(cur.field, vbt.field) == 0 ? \
5425 				       spec.field : \
5426 				       max(cur.field, vbt.field))
5427 	assign_final(t1_t3);
5428 	assign_final(t8);
5429 	assign_final(t9);
5430 	assign_final(t10);
5431 	assign_final(t11_t12);
5432 #undef assign_final
5433 
5434 #define get_delay(field)	(DIV_ROUND_UP(final->field, 10))
5435 	intel_dp->panel_power_up_delay = get_delay(t1_t3);
5436 	intel_dp->backlight_on_delay = get_delay(t8);
5437 	intel_dp->backlight_off_delay = get_delay(t9);
5438 	intel_dp->panel_power_down_delay = get_delay(t10);
5439 	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5440 #undef get_delay
5441 
5442 	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5443 		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5444 		      intel_dp->panel_power_cycle_delay);
5445 
5446 	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5447 		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5448 }
5449 
5450 static void
5451 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5452 					      struct intel_dp *intel_dp)
5453 {
5454 	struct drm_i915_private *dev_priv = dev->dev_private;
5455 	u32 pp_on, pp_off, pp_div, port_sel = 0;
5456 	int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5457 	int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
5458 	enum port port = dp_to_dig_port(intel_dp)->port;
5459 	const struct edp_power_seq *seq = &intel_dp->pps_delays;
5460 
5461 	lockdep_assert_held(&dev_priv->pps_mutex);
5462 
5463 	if (IS_BROXTON(dev)) {
5464 		/*
5465 		 * TODO: BXT has 2 sets of PPS registers.
5466 		 * Correct Register for Broxton need to be identified
5467 		 * using VBT. hardcoding for now
5468 		 */
5469 		pp_ctrl_reg = BXT_PP_CONTROL(0);
5470 		pp_on_reg = BXT_PP_ON_DELAYS(0);
5471 		pp_off_reg = BXT_PP_OFF_DELAYS(0);
5472 
5473 	} else if (HAS_PCH_SPLIT(dev)) {
5474 		pp_on_reg = PCH_PP_ON_DELAYS;
5475 		pp_off_reg = PCH_PP_OFF_DELAYS;
5476 		pp_div_reg = PCH_PP_DIVISOR;
5477 	} else {
5478 		enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5479 
5480 		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5481 		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5482 		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5483 	}
5484 
5485 	/*
5486 	 * And finally store the new values in the power sequencer. The
5487 	 * backlight delays are set to 1 because we do manual waits on them. For
5488 	 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5489 	 * we'll end up waiting for the backlight off delay twice: once when we
5490 	 * do the manual sleep, and once when we disable the panel and wait for
5491 	 * the PP_STATUS bit to become zero.
5492 	 */
5493 	pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5494 		(1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5495 	pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5496 		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5497 	/* Compute the divisor for the pp clock, simply match the Bspec
5498 	 * formula. */
5499 	if (IS_BROXTON(dev)) {
5500 		pp_div = I915_READ(pp_ctrl_reg);
5501 		pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5502 		pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5503 				<< BXT_POWER_CYCLE_DELAY_SHIFT);
5504 	} else {
5505 		pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5506 		pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5507 				<< PANEL_POWER_CYCLE_DELAY_SHIFT);
5508 	}
5509 
5510 	/* Haswell doesn't have any port selection bits for the panel
5511 	 * power sequencer any more. */
5512 	if (IS_VALLEYVIEW(dev)) {
5513 		port_sel = PANEL_PORT_SELECT_VLV(port);
5514 	} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5515 		if (port == PORT_A)
5516 			port_sel = PANEL_PORT_SELECT_DPA;
5517 		else
5518 			port_sel = PANEL_PORT_SELECT_DPD;
5519 	}
5520 
5521 	pp_on |= port_sel;
5522 
5523 	I915_WRITE(pp_on_reg, pp_on);
5524 	I915_WRITE(pp_off_reg, pp_off);
5525 	if (IS_BROXTON(dev))
5526 		I915_WRITE(pp_ctrl_reg, pp_div);
5527 	else
5528 		I915_WRITE(pp_div_reg, pp_div);
5529 
5530 	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5531 		      I915_READ(pp_on_reg),
5532 		      I915_READ(pp_off_reg),
5533 		      IS_BROXTON(dev) ?
5534 		      (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5535 		      I915_READ(pp_div_reg));
5536 }
5537 
5538 /**
5539  * intel_dp_set_drrs_state - program registers for RR switch to take effect
5540  * @dev: DRM device
5541  * @refresh_rate: RR to be programmed
5542  *
5543  * This function gets called when refresh rate (RR) has to be changed from
5544  * one frequency to another. Switches can be between high and low RR
5545  * supported by the panel or to any other RR based on media playback (in
5546  * this case, RR value needs to be passed from user space).
5547  *
5548  * The caller of this function needs to take a lock on dev_priv->drrs.
5549  */
5550 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5551 {
5552 	struct drm_i915_private *dev_priv = dev->dev_private;
5553 	struct intel_encoder *encoder;
5554 	struct intel_digital_port *dig_port = NULL;
5555 	struct intel_dp *intel_dp = dev_priv->drrs.dp;
5556 	struct intel_crtc_state *config = NULL;
5557 	struct intel_crtc *intel_crtc = NULL;
5558 	enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5559 
5560 	if (refresh_rate <= 0) {
5561 		DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5562 		return;
5563 	}
5564 
5565 	if (intel_dp == NULL) {
5566 		DRM_DEBUG_KMS("DRRS not supported.\n");
5567 		return;
5568 	}
5569 
5570 	/*
5571 	 * FIXME: This needs proper synchronization with psr state for some
5572 	 * platforms that cannot have PSR and DRRS enabled at the same time.
5573 	 */
5574 
5575 	dig_port = dp_to_dig_port(intel_dp);
5576 	encoder = &dig_port->base;
5577 	intel_crtc = to_intel_crtc(encoder->base.crtc);
5578 
5579 	if (!intel_crtc) {
5580 		DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5581 		return;
5582 	}
5583 
5584 	config = intel_crtc->config;
5585 
5586 	if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5587 		DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5588 		return;
5589 	}
5590 
5591 	if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5592 			refresh_rate)
5593 		index = DRRS_LOW_RR;
5594 
5595 	if (index == dev_priv->drrs.refresh_rate_type) {
5596 		DRM_DEBUG_KMS(
5597 			"DRRS requested for previously set RR...ignoring\n");
5598 		return;
5599 	}
5600 
5601 	if (!intel_crtc->active) {
5602 		DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5603 		return;
5604 	}
5605 
5606 	if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5607 		switch (index) {
5608 		case DRRS_HIGH_RR:
5609 			intel_dp_set_m_n(intel_crtc, M1_N1);
5610 			break;
5611 		case DRRS_LOW_RR:
5612 			intel_dp_set_m_n(intel_crtc, M2_N2);
5613 			break;
5614 		case DRRS_MAX_RR:
5615 		default:
5616 			DRM_ERROR("Unsupported refreshrate type\n");
5617 		}
5618 	} else if (INTEL_INFO(dev)->gen > 6) {
5619 		u32 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5620 		u32 val;
5621 
5622 		val = I915_READ(reg);
5623 		if (index > DRRS_HIGH_RR) {
5624 			if (IS_VALLEYVIEW(dev))
5625 				val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5626 			else
5627 				val |= PIPECONF_EDP_RR_MODE_SWITCH;
5628 		} else {
5629 			if (IS_VALLEYVIEW(dev))
5630 				val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5631 			else
5632 				val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5633 		}
5634 		I915_WRITE(reg, val);
5635 	}
5636 
5637 	dev_priv->drrs.refresh_rate_type = index;
5638 
5639 	DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5640 }
5641 
5642 /**
5643  * intel_edp_drrs_enable - init drrs struct if supported
5644  * @intel_dp: DP struct
5645  *
5646  * Initializes frontbuffer_bits and drrs.dp
5647  */
5648 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5649 {
5650 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5651 	struct drm_i915_private *dev_priv = dev->dev_private;
5652 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5653 	struct drm_crtc *crtc = dig_port->base.base.crtc;
5654 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5655 
5656 	if (!intel_crtc->config->has_drrs) {
5657 		DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5658 		return;
5659 	}
5660 
5661 	mutex_lock(&dev_priv->drrs.mutex);
5662 	if (WARN_ON(dev_priv->drrs.dp)) {
5663 		DRM_ERROR("DRRS already enabled\n");
5664 		goto unlock;
5665 	}
5666 
5667 	dev_priv->drrs.busy_frontbuffer_bits = 0;
5668 
5669 	dev_priv->drrs.dp = intel_dp;
5670 
5671 unlock:
5672 	mutex_unlock(&dev_priv->drrs.mutex);
5673 }
5674 
5675 /**
5676  * intel_edp_drrs_disable - Disable DRRS
5677  * @intel_dp: DP struct
5678  *
5679  */
5680 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5681 {
5682 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5683 	struct drm_i915_private *dev_priv = dev->dev_private;
5684 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5685 	struct drm_crtc *crtc = dig_port->base.base.crtc;
5686 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5687 
5688 	if (!intel_crtc->config->has_drrs)
5689 		return;
5690 
5691 	mutex_lock(&dev_priv->drrs.mutex);
5692 	if (!dev_priv->drrs.dp) {
5693 		mutex_unlock(&dev_priv->drrs.mutex);
5694 		return;
5695 	}
5696 
5697 	if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5698 		intel_dp_set_drrs_state(dev_priv->dev,
5699 			intel_dp->attached_connector->panel.
5700 			fixed_mode->vrefresh);
5701 
5702 	dev_priv->drrs.dp = NULL;
5703 	mutex_unlock(&dev_priv->drrs.mutex);
5704 
5705 	cancel_delayed_work_sync(&dev_priv->drrs.work);
5706 }
5707 
5708 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5709 {
5710 	struct drm_i915_private *dev_priv =
5711 		container_of(work, typeof(*dev_priv), drrs.work.work);
5712 	struct intel_dp *intel_dp;
5713 
5714 	mutex_lock(&dev_priv->drrs.mutex);
5715 
5716 	intel_dp = dev_priv->drrs.dp;
5717 
5718 	if (!intel_dp)
5719 		goto unlock;
5720 
5721 	/*
5722 	 * The delayed work can race with an invalidate hence we need to
5723 	 * recheck.
5724 	 */
5725 
5726 	if (dev_priv->drrs.busy_frontbuffer_bits)
5727 		goto unlock;
5728 
5729 	if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5730 		intel_dp_set_drrs_state(dev_priv->dev,
5731 			intel_dp->attached_connector->panel.
5732 			downclock_mode->vrefresh);
5733 
5734 unlock:
5735 	mutex_unlock(&dev_priv->drrs.mutex);
5736 }
5737 
5738 /**
5739  * intel_edp_drrs_invalidate - Disable Idleness DRRS
5740  * @dev: DRM device
5741  * @frontbuffer_bits: frontbuffer plane tracking bits
5742  *
5743  * This function gets called everytime rendering on the given planes start.
5744  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5745  *
5746  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5747  */
5748 void intel_edp_drrs_invalidate(struct drm_device *dev,
5749 		unsigned frontbuffer_bits)
5750 {
5751 	struct drm_i915_private *dev_priv = dev->dev_private;
5752 	struct drm_crtc *crtc;
5753 	enum i915_pipe pipe;
5754 
5755 	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5756 		return;
5757 
5758 	cancel_delayed_work(&dev_priv->drrs.work);
5759 
5760 	mutex_lock(&dev_priv->drrs.mutex);
5761 	if (!dev_priv->drrs.dp) {
5762 		mutex_unlock(&dev_priv->drrs.mutex);
5763 		return;
5764 	}
5765 
5766 	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5767 	pipe = to_intel_crtc(crtc)->pipe;
5768 
5769 	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5770 	dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5771 
5772 	/* invalidate means busy screen hence upclock */
5773 	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5774 		intel_dp_set_drrs_state(dev_priv->dev,
5775 				dev_priv->drrs.dp->attached_connector->panel.
5776 				fixed_mode->vrefresh);
5777 
5778 	mutex_unlock(&dev_priv->drrs.mutex);
5779 }
5780 
5781 /**
5782  * intel_edp_drrs_flush - Restart Idleness DRRS
5783  * @dev: DRM device
5784  * @frontbuffer_bits: frontbuffer plane tracking bits
5785  *
5786  * This function gets called every time rendering on the given planes has
5787  * completed or flip on a crtc is completed. So DRRS should be upclocked
5788  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5789  * if no other planes are dirty.
5790  *
5791  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5792  */
5793 void intel_edp_drrs_flush(struct drm_device *dev,
5794 		unsigned frontbuffer_bits)
5795 {
5796 	struct drm_i915_private *dev_priv = dev->dev_private;
5797 	struct drm_crtc *crtc;
5798 	enum i915_pipe pipe;
5799 
5800 	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5801 		return;
5802 
5803 	cancel_delayed_work(&dev_priv->drrs.work);
5804 
5805 	mutex_lock(&dev_priv->drrs.mutex);
5806 	if (!dev_priv->drrs.dp) {
5807 		mutex_unlock(&dev_priv->drrs.mutex);
5808 		return;
5809 	}
5810 
5811 	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5812 	pipe = to_intel_crtc(crtc)->pipe;
5813 
5814 	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5815 	dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5816 
5817 	/* flush means busy screen hence upclock */
5818 	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5819 		intel_dp_set_drrs_state(dev_priv->dev,
5820 				dev_priv->drrs.dp->attached_connector->panel.
5821 				fixed_mode->vrefresh);
5822 
5823 	/*
5824 	 * flush also means no more activity hence schedule downclock, if all
5825 	 * other fbs are quiescent too
5826 	 */
5827 	if (!dev_priv->drrs.busy_frontbuffer_bits)
5828 		schedule_delayed_work(&dev_priv->drrs.work,
5829 				msecs_to_jiffies(1000));
5830 	mutex_unlock(&dev_priv->drrs.mutex);
5831 }
5832 
5833 /**
5834  * DOC: Display Refresh Rate Switching (DRRS)
5835  *
5836  * Display Refresh Rate Switching (DRRS) is a power conservation feature
5837  * which enables swtching between low and high refresh rates,
5838  * dynamically, based on the usage scenario. This feature is applicable
5839  * for internal panels.
5840  *
5841  * Indication that the panel supports DRRS is given by the panel EDID, which
5842  * would list multiple refresh rates for one resolution.
5843  *
5844  * DRRS is of 2 types - static and seamless.
5845  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5846  * (may appear as a blink on screen) and is used in dock-undock scenario.
5847  * Seamless DRRS involves changing RR without any visual effect to the user
5848  * and can be used during normal system usage. This is done by programming
5849  * certain registers.
5850  *
5851  * Support for static/seamless DRRS may be indicated in the VBT based on
5852  * inputs from the panel spec.
5853  *
5854  * DRRS saves power by switching to low RR based on usage scenarios.
5855  *
5856  * eDP DRRS:-
5857  *        The implementation is based on frontbuffer tracking implementation.
5858  * When there is a disturbance on the screen triggered by user activity or a
5859  * periodic system activity, DRRS is disabled (RR is changed to high RR).
5860  * When there is no movement on screen, after a timeout of 1 second, a switch
5861  * to low RR is made.
5862  *        For integration with frontbuffer tracking code,
5863  * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5864  *
5865  * DRRS can be further extended to support other internal panels and also
5866  * the scenario of video playback wherein RR is set based on the rate
5867  * requested by userspace.
5868  */
5869 
5870 /**
5871  * intel_dp_drrs_init - Init basic DRRS work and mutex.
5872  * @intel_connector: eDP connector
5873  * @fixed_mode: preferred mode of panel
5874  *
5875  * This function is  called only once at driver load to initialize basic
5876  * DRRS stuff.
5877  *
5878  * Returns:
5879  * Downclock mode if panel supports it, else return NULL.
5880  * DRRS support is determined by the presence of downclock mode (apart
5881  * from VBT setting).
5882  */
5883 static struct drm_display_mode *
5884 intel_dp_drrs_init(struct intel_connector *intel_connector,
5885 		struct drm_display_mode *fixed_mode)
5886 {
5887 	struct drm_connector *connector = &intel_connector->base;
5888 	struct drm_device *dev = connector->dev;
5889 	struct drm_i915_private *dev_priv = dev->dev_private;
5890 	struct drm_display_mode *downclock_mode = NULL;
5891 
5892 	INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5893 	lockinit(&dev_priv->drrs.mutex, "i915dm", 0, LK_CANRECURSE);
5894 
5895 	if (INTEL_INFO(dev)->gen <= 6) {
5896 		DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5897 		return NULL;
5898 	}
5899 
5900 	if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5901 		DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5902 		return NULL;
5903 	}
5904 
5905 	downclock_mode = intel_find_panel_downclock
5906 					(dev, fixed_mode, connector);
5907 
5908 	if (!downclock_mode) {
5909 		DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5910 		return NULL;
5911 	}
5912 
5913 	dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5914 
5915 	dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5916 	DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5917 	return downclock_mode;
5918 }
5919 
5920 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5921 				     struct intel_connector *intel_connector)
5922 {
5923 	struct drm_connector *connector = &intel_connector->base;
5924 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5925 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5926 	struct drm_device *dev = intel_encoder->base.dev;
5927 	struct drm_i915_private *dev_priv = dev->dev_private;
5928 	struct drm_display_mode *fixed_mode = NULL;
5929 	struct drm_display_mode *downclock_mode = NULL;
5930 	bool has_dpcd;
5931 	struct drm_display_mode *scan;
5932 	struct edid *edid;
5933 	enum i915_pipe pipe = INVALID_PIPE;
5934 
5935 	if (!is_edp(intel_dp))
5936 		return true;
5937 
5938 	pps_lock(intel_dp);
5939 	intel_edp_panel_vdd_sanitize(intel_dp);
5940 	pps_unlock(intel_dp);
5941 
5942 	/* Cache DPCD and EDID for edp. */
5943 	has_dpcd = intel_dp_get_dpcd(intel_dp);
5944 
5945 	if (has_dpcd) {
5946 		if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5947 			dev_priv->no_aux_handshake =
5948 				intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5949 				DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5950 	} else {
5951 		/* if this fails, presume the device is a ghost */
5952 		DRM_INFO("failed to retrieve link info, disabling eDP\n");
5953 		return false;
5954 	}
5955 
5956 	/* We now know it's not a ghost, init power sequence regs. */
5957 	pps_lock(intel_dp);
5958 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5959 	pps_unlock(intel_dp);
5960 
5961 	mutex_lock(&dev->mode_config.mutex);
5962 	edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5963 	if (edid) {
5964 		if (drm_add_edid_modes(connector, edid)) {
5965 			drm_mode_connector_update_edid_property(connector,
5966 								edid);
5967 			drm_edid_to_eld(connector, edid);
5968 		} else {
5969 			kfree(edid);
5970 			edid = ERR_PTR(-EINVAL);
5971 		}
5972 	} else {
5973 		edid = ERR_PTR(-ENOENT);
5974 	}
5975 	intel_connector->edid = edid;
5976 
5977 	/* prefer fixed mode from EDID if available */
5978 	list_for_each_entry(scan, &connector->probed_modes, head) {
5979 		if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5980 			fixed_mode = drm_mode_duplicate(dev, scan);
5981 			downclock_mode = intel_dp_drrs_init(
5982 						intel_connector, fixed_mode);
5983 			break;
5984 		}
5985 	}
5986 
5987 	/* fallback to VBT if available for eDP */
5988 	if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5989 		fixed_mode = drm_mode_duplicate(dev,
5990 					dev_priv->vbt.lfp_lvds_vbt_mode);
5991 		if (fixed_mode)
5992 			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5993 	}
5994 	mutex_unlock(&dev->mode_config.mutex);
5995 
5996 	if (IS_VALLEYVIEW(dev)) {
5997 #if 0
5998 		intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5999 		register_reboot_notifier(&intel_dp->edp_notifier);
6000 #endif
6001 
6002 		/*
6003 		 * Figure out the current pipe for the initial backlight setup.
6004 		 * If the current pipe isn't valid, try the PPS pipe, and if that
6005 		 * fails just assume pipe A.
6006 		 */
6007 		if (IS_CHERRYVIEW(dev))
6008 			pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
6009 		else
6010 			pipe = PORT_TO_PIPE(intel_dp->DP);
6011 
6012 		if (pipe != PIPE_A && pipe != PIPE_B)
6013 			pipe = intel_dp->pps_pipe;
6014 
6015 		if (pipe != PIPE_A && pipe != PIPE_B)
6016 			pipe = PIPE_A;
6017 
6018 		DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
6019 			      pipe_name(pipe));
6020 	}
6021 
6022 	intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
6023 	intel_connector->panel.backlight.power = intel_edp_backlight_power;
6024 	intel_panel_setup_backlight(connector, pipe);
6025 
6026 	return true;
6027 }
6028 
6029 bool
6030 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
6031 			struct intel_connector *intel_connector)
6032 {
6033 	struct drm_connector *connector = &intel_connector->base;
6034 	struct intel_dp *intel_dp = &intel_dig_port->dp;
6035 	struct intel_encoder *intel_encoder = &intel_dig_port->base;
6036 	struct drm_device *dev = intel_encoder->base.dev;
6037 	struct drm_i915_private *dev_priv = dev->dev_private;
6038 	enum port port = intel_dig_port->port;
6039 	int type;
6040 
6041 	intel_dp->pps_pipe = INVALID_PIPE;
6042 
6043 	/* intel_dp vfuncs */
6044 	if (INTEL_INFO(dev)->gen >= 9)
6045 		intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
6046 	else if (IS_VALLEYVIEW(dev))
6047 		intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
6048 	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
6049 		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
6050 	else if (HAS_PCH_SPLIT(dev))
6051 		intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
6052 	else
6053 		intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
6054 
6055 	if (INTEL_INFO(dev)->gen >= 9)
6056 		intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
6057 	else
6058 		intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
6059 
6060 	/* Preserve the current hw state. */
6061 	intel_dp->DP = I915_READ(intel_dp->output_reg);
6062 	intel_dp->attached_connector = intel_connector;
6063 
6064 	if (intel_dp_is_edp(dev, port))
6065 		type = DRM_MODE_CONNECTOR_eDP;
6066 	else
6067 		type = DRM_MODE_CONNECTOR_DisplayPort;
6068 
6069 	/*
6070 	 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
6071 	 * for DP the encoder type can be set by the caller to
6072 	 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
6073 	 */
6074 	if (type == DRM_MODE_CONNECTOR_eDP)
6075 		intel_encoder->type = INTEL_OUTPUT_EDP;
6076 
6077 	/* eDP only on port B and/or C on vlv/chv */
6078 	if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
6079 		    port != PORT_B && port != PORT_C))
6080 		return false;
6081 
6082 	DRM_DEBUG_KMS("Adding %s connector on port %c\n",
6083 			type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
6084 			port_name(port));
6085 
6086 	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
6087 	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
6088 
6089 	connector->interlace_allowed = true;
6090 	connector->doublescan_allowed = 0;
6091 
6092 	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
6093 			  edp_panel_vdd_work);
6094 
6095 	intel_connector_attach_encoder(intel_connector, intel_encoder);
6096 	drm_connector_register(connector);
6097 
6098 	if (HAS_DDI(dev))
6099 		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
6100 	else
6101 		intel_connector->get_hw_state = intel_connector_get_hw_state;
6102 	intel_connector->unregister = intel_dp_connector_unregister;
6103 
6104 	/* Set up the hotplug pin. */
6105 	switch (port) {
6106 	case PORT_A:
6107 		intel_encoder->hpd_pin = HPD_PORT_A;
6108 		break;
6109 	case PORT_B:
6110 		intel_encoder->hpd_pin = HPD_PORT_B;
6111 		if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
6112 			intel_encoder->hpd_pin = HPD_PORT_A;
6113 		break;
6114 	case PORT_C:
6115 		intel_encoder->hpd_pin = HPD_PORT_C;
6116 		break;
6117 	case PORT_D:
6118 		intel_encoder->hpd_pin = HPD_PORT_D;
6119 		break;
6120 	case PORT_E:
6121 		intel_encoder->hpd_pin = HPD_PORT_E;
6122 		break;
6123 	default:
6124 		BUG();
6125 	}
6126 
6127 	if (is_edp(intel_dp)) {
6128 		pps_lock(intel_dp);
6129 		intel_dp_init_panel_power_timestamps(intel_dp);
6130 		if (IS_VALLEYVIEW(dev))
6131 			vlv_initial_power_sequencer_setup(intel_dp);
6132 		else
6133 			intel_dp_init_panel_power_sequencer(dev, intel_dp);
6134 		pps_unlock(intel_dp);
6135 	}
6136 
6137 	intel_dp_aux_init(intel_dp, intel_connector);
6138 
6139 	/* init MST on ports that can support it */
6140 	if (HAS_DP_MST(dev) &&
6141 	    (port == PORT_B || port == PORT_C || port == PORT_D))
6142 		intel_dp_mst_encoder_init(intel_dig_port,
6143 					  intel_connector->base.base.id);
6144 
6145 	if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6146 		drm_dp_aux_unregister(&intel_dp->aux);
6147 		if (is_edp(intel_dp)) {
6148 			cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
6149 			/*
6150 			 * vdd might still be enabled do to the delayed vdd off.
6151 			 * Make sure vdd is actually turned off here.
6152 			 */
6153 			pps_lock(intel_dp);
6154 			edp_panel_vdd_off_sync(intel_dp);
6155 			pps_unlock(intel_dp);
6156 		}
6157 		drm_connector_unregister(connector);
6158 		drm_connector_cleanup(connector);
6159 		return false;
6160 	}
6161 
6162 	intel_dp_add_properties(intel_dp, connector);
6163 
6164 	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6165 	 * 0xd.  Failure to do so will result in spurious interrupts being
6166 	 * generated on the port when a cable is not attached.
6167 	 */
6168 	if (IS_G4X(dev) && !IS_GM45(dev)) {
6169 		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6170 		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6171 	}
6172 
6173 	i915_debugfs_connector_add(connector);
6174 
6175 	return true;
6176 }
6177 
6178 void
6179 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
6180 {
6181 	struct drm_i915_private *dev_priv = dev->dev_private;
6182 	struct intel_digital_port *intel_dig_port;
6183 	struct intel_encoder *intel_encoder;
6184 	struct drm_encoder *encoder;
6185 	struct intel_connector *intel_connector;
6186 
6187 	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6188 	if (!intel_dig_port)
6189 		return;
6190 
6191 	intel_connector = intel_connector_alloc();
6192 	if (!intel_connector)
6193 		goto err_connector_alloc;
6194 
6195 	intel_encoder = &intel_dig_port->base;
6196 	encoder = &intel_encoder->base;
6197 
6198 	drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6199 			 DRM_MODE_ENCODER_TMDS);
6200 
6201 	intel_encoder->compute_config = intel_dp_compute_config;
6202 	intel_encoder->disable = intel_disable_dp;
6203 	intel_encoder->get_hw_state = intel_dp_get_hw_state;
6204 	intel_encoder->get_config = intel_dp_get_config;
6205 	intel_encoder->suspend = intel_dp_encoder_suspend;
6206 	if (IS_CHERRYVIEW(dev)) {
6207 		intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6208 		intel_encoder->pre_enable = chv_pre_enable_dp;
6209 		intel_encoder->enable = vlv_enable_dp;
6210 		intel_encoder->post_disable = chv_post_disable_dp;
6211 		intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6212 	} else if (IS_VALLEYVIEW(dev)) {
6213 		intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6214 		intel_encoder->pre_enable = vlv_pre_enable_dp;
6215 		intel_encoder->enable = vlv_enable_dp;
6216 		intel_encoder->post_disable = vlv_post_disable_dp;
6217 	} else {
6218 		intel_encoder->pre_enable = g4x_pre_enable_dp;
6219 		intel_encoder->enable = g4x_enable_dp;
6220 		if (INTEL_INFO(dev)->gen >= 5)
6221 			intel_encoder->post_disable = ilk_post_disable_dp;
6222 	}
6223 
6224 	intel_dig_port->port = port;
6225 	intel_dig_port->dp.output_reg = output_reg;
6226 
6227 	intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
6228 	if (IS_CHERRYVIEW(dev)) {
6229 		if (port == PORT_D)
6230 			intel_encoder->crtc_mask = 1 << 2;
6231 		else
6232 			intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6233 	} else {
6234 		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6235 	}
6236 	intel_encoder->cloneable = 0;
6237 
6238 	intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6239 	dev_priv->hotplug.irq_port[port] = intel_dig_port;
6240 
6241 	if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6242 		goto err_init_connector;
6243 
6244 	return;
6245 
6246 err_init_connector:
6247 	drm_encoder_cleanup(encoder);
6248 	kfree(intel_connector);
6249 err_connector_alloc:
6250 	kfree(intel_dig_port);
6251 
6252 	return;
6253 }
6254 
6255 #if 0
6256 void intel_dp_mst_suspend(struct drm_device *dev)
6257 {
6258 	struct drm_i915_private *dev_priv = dev->dev_private;
6259 	int i;
6260 
6261 	/* disable MST */
6262 	for (i = 0; i < I915_MAX_PORTS; i++) {
6263 		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6264 		if (!intel_dig_port)
6265 			continue;
6266 
6267 		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6268 			if (!intel_dig_port->dp.can_mst)
6269 				continue;
6270 			if (intel_dig_port->dp.is_mst)
6271 				drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6272 		}
6273 	}
6274 }
6275 #endif
6276 
6277 void intel_dp_mst_resume(struct drm_device *dev)
6278 {
6279 	struct drm_i915_private *dev_priv = dev->dev_private;
6280 	int i;
6281 
6282 	for (i = 0; i < I915_MAX_PORTS; i++) {
6283 		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6284 		if (!intel_dig_port)
6285 			continue;
6286 		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6287 #if 0
6288 			int ret;
6289 
6290 			if (!intel_dig_port->dp.can_mst)
6291 				continue;
6292 
6293 			ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6294 			if (ret != 0) {
6295 				intel_dp_check_mst_status(&intel_dig_port->dp);
6296 			}
6297 #endif
6298 		}
6299 	}
6300 }
6301